summaryrefslogtreecommitdiffstats
path: root/src/cmd/compile/internal
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile/internal')
-rw-r--r--src/cmd/compile/internal/amd64/galign.go26
-rw-r--r--src/cmd/compile/internal/amd64/ggen.go137
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go1334
-rw-r--r--src/cmd/compile/internal/arm/galign.go26
-rw-r--r--src/cmd/compile/internal/arm/ggen.go58
-rw-r--r--src/cmd/compile/internal/arm/ssa.go981
-rw-r--r--src/cmd/compile/internal/arm64/galign.go26
-rw-r--r--src/cmd/compile/internal/arm64/ggen.go74
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go1251
-rw-r--r--src/cmd/compile/internal/gc/alg.go959
-rw-r--r--src/cmd/compile/internal/gc/algkind_string.go48
-rw-r--r--src/cmd/compile/internal/gc/align.go531
-rw-r--r--src/cmd/compile/internal/gc/bench_test.go64
-rw-r--r--src/cmd/compile/internal/gc/bexport.go177
-rw-r--r--src/cmd/compile/internal/gc/bimport.go24
-rw-r--r--src/cmd/compile/internal/gc/bitset.go59
-rw-r--r--src/cmd/compile/internal/gc/bootstrap.go13
-rw-r--r--src/cmd/compile/internal/gc/builtin.go340
-rw-r--r--src/cmd/compile/internal/gc/builtin/runtime.go259
-rw-r--r--src/cmd/compile/internal/gc/builtin_test.go32
-rw-r--r--src/cmd/compile/internal/gc/bv.go278
-rw-r--r--src/cmd/compile/internal/gc/class_string.go29
-rw-r--r--src/cmd/compile/internal/gc/closure.go594
-rw-r--r--src/cmd/compile/internal/gc/const.go1323
-rw-r--r--src/cmd/compile/internal/gc/constFold_test.go18111
-rw-r--r--src/cmd/compile/internal/gc/dcl.go1185
-rw-r--r--src/cmd/compile/internal/gc/dep_test.go25
-rw-r--r--src/cmd/compile/internal/gc/dump.go280
-rw-r--r--src/cmd/compile/internal/gc/dwinl.go450
-rw-r--r--src/cmd/compile/internal/gc/embed.go256
-rw-r--r--src/cmd/compile/internal/gc/esc.go472
-rw-r--r--src/cmd/compile/internal/gc/escape.go1539
-rw-r--r--src/cmd/compile/internal/gc/export.go233
-rw-r--r--src/cmd/compile/internal/gc/fixedbugs_test.go92
-rw-r--r--src/cmd/compile/internal/gc/float_test.go544
-rw-r--r--src/cmd/compile/internal/gc/fmt.go1986
-rw-r--r--src/cmd/compile/internal/gc/gen.go86
-rw-r--r--src/cmd/compile/internal/gc/global_test.go116
-rw-r--r--src/cmd/compile/internal/gc/go.go349
-rw-r--r--src/cmd/compile/internal/gc/gsubr.go333
-rw-r--r--src/cmd/compile/internal/gc/iexport.go1515
-rw-r--r--src/cmd/compile/internal/gc/iface_test.go128
-rw-r--r--src/cmd/compile/internal/gc/iimport.go1117
-rw-r--r--src/cmd/compile/internal/gc/init.go109
-rw-r--r--src/cmd/compile/internal/gc/initorder.go358
-rw-r--r--src/cmd/compile/internal/gc/inl.go1507
-rw-r--r--src/cmd/compile/internal/gc/inl_test.go269
-rw-r--r--src/cmd/compile/internal/gc/lang_test.go64
-rw-r--r--src/cmd/compile/internal/gc/lex.go224
-rw-r--r--src/cmd/compile/internal/gc/lex_test.go121
-rw-r--r--src/cmd/compile/internal/gc/logic_test.go289
-rw-r--r--src/cmd/compile/internal/gc/main.go1610
-rw-r--r--src/cmd/compile/internal/gc/mapfile_mmap.go48
-rw-r--r--src/cmd/compile/internal/gc/mapfile_read.go21
-rw-r--r--src/cmd/compile/internal/gc/mkbuiltin.go225
-rw-r--r--src/cmd/compile/internal/gc/mpfloat.go357
-rw-r--r--src/cmd/compile/internal/gc/mpint.go304
-rw-r--r--src/cmd/compile/internal/gc/noder.go1756
-rw-r--r--src/cmd/compile/internal/gc/obj.go639
-rw-r--r--src/cmd/compile/internal/gc/op_string.go175
-rw-r--r--src/cmd/compile/internal/gc/order.go1441
-rw-r--r--src/cmd/compile/internal/gc/pgen.go798
-rw-r--r--src/cmd/compile/internal/gc/pgen_test.go196
-rw-r--r--src/cmd/compile/internal/gc/phi.go538
-rw-r--r--src/cmd/compile/internal/gc/plive.go1321
-rw-r--r--src/cmd/compile/internal/gc/pprof.go13
-rw-r--r--src/cmd/compile/internal/gc/racewalk.go93
-rw-r--r--src/cmd/compile/internal/gc/range.go628
-rw-r--r--src/cmd/compile/internal/gc/reflect.go1901
-rw-r--r--src/cmd/compile/internal/gc/reproduciblebuilds_test.go112
-rw-r--r--src/cmd/compile/internal/gc/scc.go140
-rw-r--r--src/cmd/compile/internal/gc/scope.go109
-rw-r--r--src/cmd/compile/internal/gc/scope_test.go538
-rw-r--r--src/cmd/compile/internal/gc/select.go387
-rw-r--r--src/cmd/compile/internal/gc/shift_test.go1031
-rw-r--r--src/cmd/compile/internal/gc/sinit.go1172
-rw-r--r--src/cmd/compile/internal/gc/sizeof_test.go39
-rw-r--r--src/cmd/compile/internal/gc/ssa.go7231
-rw-r--r--src/cmd/compile/internal/gc/ssa_test.go191
-rw-r--r--src/cmd/compile/internal/gc/subr.go1918
-rw-r--r--src/cmd/compile/internal/gc/swt.go756
-rw-r--r--src/cmd/compile/internal/gc/syntax.go1196
-rw-r--r--src/cmd/compile/internal/gc/testdata/addressed_test.go210
-rw-r--r--src/cmd/compile/internal/gc/testdata/append_test.go61
-rw-r--r--src/cmd/compile/internal/gc/testdata/arithBoundary_test.go694
-rw-r--r--src/cmd/compile/internal/gc/testdata/arithConst_test.go9570
-rw-r--r--src/cmd/compile/internal/gc/testdata/arith_test.go1454
-rw-r--r--src/cmd/compile/internal/gc/testdata/array_test.go132
-rw-r--r--src/cmd/compile/internal/gc/testdata/assert_test.go128
-rw-r--r--src/cmd/compile/internal/gc/testdata/break_test.go250
-rw-r--r--src/cmd/compile/internal/gc/testdata/chan_test.go63
-rw-r--r--src/cmd/compile/internal/gc/testdata/closure_test.go32
-rw-r--r--src/cmd/compile/internal/gc/testdata/cmpConst_test.go2209
-rw-r--r--src/cmd/compile/internal/gc/testdata/cmp_test.go37
-rw-r--r--src/cmd/compile/internal/gc/testdata/compound_test.go128
-rw-r--r--src/cmd/compile/internal/gc/testdata/copy_test.go760
-rw-r--r--src/cmd/compile/internal/gc/testdata/ctl_test.go149
-rw-r--r--src/cmd/compile/internal/gc/testdata/deferNoReturn_test.go21
-rw-r--r--src/cmd/compile/internal/gc/testdata/divbyzero_test.go48
-rw-r--r--src/cmd/compile/internal/gc/testdata/dupLoad_test.go83
-rw-r--r--src/cmd/compile/internal/gc/testdata/flowgraph_generator1.go315
-rw-r--r--src/cmd/compile/internal/gc/testdata/fp_test.go1773
-rw-r--r--src/cmd/compile/internal/gc/testdata/gen/arithBoundaryGen.go209
-rw-r--r--src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go346
-rw-r--r--src/cmd/compile/internal/gc/testdata/gen/cmpConstGen.go247
-rw-r--r--src/cmd/compile/internal/gc/testdata/gen/constFoldGen.go307
-rw-r--r--src/cmd/compile/internal/gc/testdata/gen/copyGen.go121
-rw-r--r--src/cmd/compile/internal/gc/testdata/gen/zeroGen.go143
-rw-r--r--src/cmd/compile/internal/gc/testdata/loadstore_test.go204
-rw-r--r--src/cmd/compile/internal/gc/testdata/map_test.go37
-rw-r--r--src/cmd/compile/internal/gc/testdata/namedReturn_test.go93
-rw-r--r--src/cmd/compile/internal/gc/testdata/phi_test.go99
-rw-r--r--src/cmd/compile/internal/gc/testdata/regalloc_test.go50
-rw-r--r--src/cmd/compile/internal/gc/testdata/reproducible/issue20272.go34
-rw-r--r--src/cmd/compile/internal/gc/testdata/reproducible/issue27013.go15
-rw-r--r--src/cmd/compile/internal/gc/testdata/reproducible/issue30202.go17
-rw-r--r--src/cmd/compile/internal/gc/testdata/reproducible/issue38068.go70
-rw-r--r--src/cmd/compile/internal/gc/testdata/short_test.go57
-rw-r--r--src/cmd/compile/internal/gc/testdata/slice_test.go46
-rw-r--r--src/cmd/compile/internal/gc/testdata/sqrtConst_test.go50
-rw-r--r--src/cmd/compile/internal/gc/testdata/string_test.go207
-rw-r--r--src/cmd/compile/internal/gc/testdata/unsafe_test.go145
-rw-r--r--src/cmd/compile/internal/gc/testdata/zero_test.go711
-rw-r--r--src/cmd/compile/internal/gc/timings.go235
-rw-r--r--src/cmd/compile/internal/gc/trace.go27
-rw-r--r--src/cmd/compile/internal/gc/truncconst_test.go63
-rw-r--r--src/cmd/compile/internal/gc/typecheck.go4019
-rw-r--r--src/cmd/compile/internal/gc/types.go58
-rw-r--r--src/cmd/compile/internal/gc/types_acc.go16
-rw-r--r--src/cmd/compile/internal/gc/universe.go453
-rw-r--r--src/cmd/compile/internal/gc/unsafe.go76
-rw-r--r--src/cmd/compile/internal/gc/util.go103
-rw-r--r--src/cmd/compile/internal/gc/walk.go4125
-rw-r--r--src/cmd/compile/internal/gc/zerorange_test.go98
-rw-r--r--src/cmd/compile/internal/logopt/escape.go13
-rw-r--r--src/cmd/compile/internal/logopt/escape_bootstrap.go12
-rw-r--r--src/cmd/compile/internal/logopt/log_opts.go523
-rw-r--r--src/cmd/compile/internal/logopt/logopt_test.go258
-rw-r--r--src/cmd/compile/internal/mips/galign.go28
-rw-r--r--src/cmd/compile/internal/mips/ggen.go53
-rw-r--r--src/cmd/compile/internal/mips/ssa.go885
-rw-r--r--src/cmd/compile/internal/mips64/galign.go29
-rw-r--r--src/cmd/compile/internal/mips64/ggen.go57
-rw-r--r--src/cmd/compile/internal/mips64/ssa.go846
-rw-r--r--src/cmd/compile/internal/ppc64/galign.go28
-rw-r--r--src/cmd/compile/internal/ppc64/ggen.go79
-rw-r--r--src/cmd/compile/internal/ppc64/opt.go12
-rw-r--r--src/cmd/compile/internal/ppc64/ssa.go1967
-rw-r--r--src/cmd/compile/internal/riscv64/galign.go25
-rw-r--r--src/cmd/compile/internal/riscv64/ggen.go56
-rw-r--r--src/cmd/compile/internal/riscv64/gsubr.go20
-rw-r--r--src/cmd/compile/internal/riscv64/ssa.go720
-rw-r--r--src/cmd/compile/internal/s390x/galign.go24
-rw-r--r--src/cmd/compile/internal/s390x/ggen.go88
-rw-r--r--src/cmd/compile/internal/s390x/ssa.go988
-rw-r--r--src/cmd/compile/internal/ssa/README.md209
-rw-r--r--src/cmd/compile/internal/ssa/TODO24
-rw-r--r--src/cmd/compile/internal/ssa/addressingmodes.go460
-rw-r--r--src/cmd/compile/internal/ssa/biasedsparsemap.go112
-rw-r--r--src/cmd/compile/internal/ssa/block.go371
-rw-r--r--src/cmd/compile/internal/ssa/branchelim.go449
-rw-r--r--src/cmd/compile/internal/ssa/branchelim_test.go172
-rw-r--r--src/cmd/compile/internal/ssa/cache.go81
-rw-r--r--src/cmd/compile/internal/ssa/check.go597
-rw-r--r--src/cmd/compile/internal/ssa/checkbce.go35
-rw-r--r--src/cmd/compile/internal/ssa/compile.go573
-rw-r--r--src/cmd/compile/internal/ssa/config.go390
-rw-r--r--src/cmd/compile/internal/ssa/copyelim.go84
-rw-r--r--src/cmd/compile/internal/ssa/copyelim_test.go41
-rw-r--r--src/cmd/compile/internal/ssa/critical.go116
-rw-r--r--src/cmd/compile/internal/ssa/cse.go373
-rw-r--r--src/cmd/compile/internal/ssa/cse_test.go129
-rw-r--r--src/cmd/compile/internal/ssa/deadcode.go393
-rw-r--r--src/cmd/compile/internal/ssa/deadcode_test.go161
-rw-r--r--src/cmd/compile/internal/ssa/deadstore.go348
-rw-r--r--src/cmd/compile/internal/ssa/deadstore_test.go129
-rw-r--r--src/cmd/compile/internal/ssa/debug.go1187
-rw-r--r--src/cmd/compile/internal/ssa/debug_test.go1020
-rw-r--r--src/cmd/compile/internal/ssa/decompose.go449
-rw-r--r--src/cmd/compile/internal/ssa/dom.go302
-rw-r--r--src/cmd/compile/internal/ssa/dom_test.go608
-rw-r--r--src/cmd/compile/internal/ssa/expand_calls.go975
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go209
-rw-r--r--src/cmd/compile/internal/ssa/flagalloc.go269
-rw-r--r--src/cmd/compile/internal/ssa/flags_amd64_test.s31
-rw-r--r--src/cmd/compile/internal/ssa/flags_arm64_test.s32
-rw-r--r--src/cmd/compile/internal/ssa/flags_test.go108
-rw-r--r--src/cmd/compile/internal/ssa/func.go799
-rw-r--r--src/cmd/compile/internal/ssa/func_test.go484
-rw-r--r--src/cmd/compile/internal/ssa/fuse.go243
-rw-r--r--src/cmd/compile/internal/ssa/fuse_comparisons.go157
-rw-r--r--src/cmd/compile/internal/ssa/fuse_test.go203
-rw-r--r--src/cmd/compile/internal/ssa/gen/386.rules1111
-rw-r--r--src/cmd/compile/internal/ssa/gen/386Ops.go585
-rw-r--r--src/cmd/compile/internal/ssa/gen/386splitload.rules11
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules2216
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64Ops.go946
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64splitload.rules45
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM.rules1475
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules2789
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64Ops.go762
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARMOps.go600
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS.rules697
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS64.rules678
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS64Ops.go482
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPSOps.go439
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64.rules1461
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64Ops.go717
-rw-r--r--src/cmd/compile/internal/ssa/gen/README7
-rw-r--r--src/cmd/compile/internal/ssa/gen/RISCV64.rules737
-rw-r--r--src/cmd/compile/internal/ssa/gen/RISCV64Ops.go464
-rw-r--r--src/cmd/compile/internal/ssa/gen/S390X.rules1695
-rw-r--r--src/cmd/compile/internal/ssa/gen/S390XOps.go816
-rw-r--r--src/cmd/compile/internal/ssa/gen/Wasm.rules408
-rw-r--r--src/cmd/compile/internal/ssa/gen/WasmOps.go278
-rwxr-xr-xsrc/cmd/compile/internal/ssa/gen/cover.bash26
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec.rules92
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec64.rules396
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec64Ops.go20
-rw-r--r--src/cmd/compile/internal/ssa/gen/decArgs.rules58
-rw-r--r--src/cmd/compile/internal/ssa/gen/decArgsOps.go20
-rw-r--r--src/cmd/compile/internal/ssa/gen/decOps.go20
-rw-r--r--src/cmd/compile/internal/ssa/gen/generic.rules2535
-rw-r--r--src/cmd/compile/internal/ssa/gen/genericOps.go620
-rw-r--r--src/cmd/compile/internal/ssa/gen/main.go541
-rw-r--r--src/cmd/compile/internal/ssa/gen/rulegen.go1856
-rw-r--r--src/cmd/compile/internal/ssa/html.go1319
-rw-r--r--src/cmd/compile/internal/ssa/id.go28
-rw-r--r--src/cmd/compile/internal/ssa/layout.go180
-rw-r--r--src/cmd/compile/internal/ssa/lca.go123
-rw-r--r--src/cmd/compile/internal/ssa/lca_test.go88
-rw-r--r--src/cmd/compile/internal/ssa/likelyadjust.go575
-rw-r--r--src/cmd/compile/internal/ssa/location.go88
-rw-r--r--src/cmd/compile/internal/ssa/loopbce.go346
-rw-r--r--src/cmd/compile/internal/ssa/loopreschedchecks.go499
-rw-r--r--src/cmd/compile/internal/ssa/looprotate.go106
-rw-r--r--src/cmd/compile/internal/ssa/lower.go39
-rw-r--r--src/cmd/compile/internal/ssa/magic.go424
-rw-r--r--src/cmd/compile/internal/ssa/magic_test.go410
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck.go336
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck_test.go434
-rw-r--r--src/cmd/compile/internal/ssa/numberlines.go271
-rw-r--r--src/cmd/compile/internal/ssa/op.go405
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go36677
-rw-r--r--src/cmd/compile/internal/ssa/opt.go10
-rw-r--r--src/cmd/compile/internal/ssa/passbm_test.go101
-rw-r--r--src/cmd/compile/internal/ssa/phielim.go69
-rw-r--r--src/cmd/compile/internal/ssa/phiopt.go176
-rw-r--r--src/cmd/compile/internal/ssa/poset.go1359
-rw-r--r--src/cmd/compile/internal/ssa/poset_test.go800
-rw-r--r--src/cmd/compile/internal/ssa/print.go159
-rw-r--r--src/cmd/compile/internal/ssa/prove.go1426
-rw-r--r--src/cmd/compile/internal/ssa/redblack32.go429
-rw-r--r--src/cmd/compile/internal/ssa/redblack32_test.go274
-rw-r--r--src/cmd/compile/internal/ssa/regalloc.go2696
-rw-r--r--src/cmd/compile/internal/ssa/regalloc_test.go230
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go1892
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386.go12575
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386splitload.go162
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go35989
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64splitload.go853
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM.go22017
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go28662
-rw-r--r--src/cmd/compile/internal/ssa/rewriteCond_test.go597
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS.go7535
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS64.go8040
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go18258
-rw-r--r--src/cmd/compile/internal/ssa/rewriteRISCV64.go6604
-rw-r--r--src/cmd/compile/internal/ssa/rewriteS390X.go17859
-rw-r--r--src/cmd/compile/internal/ssa/rewriteWasm.go4905
-rw-r--r--src/cmd/compile/internal/ssa/rewrite_test.go220
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec.go415
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec64.go2464
-rw-r--r--src/cmd/compile/internal/ssa/rewritedecArgs.go247
-rw-r--r--src/cmd/compile/internal/ssa/rewritegeneric.go25091
-rw-r--r--src/cmd/compile/internal/ssa/schedule.go503
-rw-r--r--src/cmd/compile/internal/ssa/schedule_test.go101
-rw-r--r--src/cmd/compile/internal/ssa/shift_test.go107
-rw-r--r--src/cmd/compile/internal/ssa/shortcircuit.go510
-rw-r--r--src/cmd/compile/internal/ssa/shortcircuit_test.go53
-rw-r--r--src/cmd/compile/internal/ssa/sizeof_test.go39
-rw-r--r--src/cmd/compile/internal/ssa/softfloat.go79
-rw-r--r--src/cmd/compile/internal/ssa/sparsemap.go93
-rw-r--r--src/cmd/compile/internal/ssa/sparseset.go79
-rw-r--r--src/cmd/compile/internal/ssa/sparsetree.go235
-rw-r--r--src/cmd/compile/internal/ssa/sparsetreemap.go189
-rw-r--r--src/cmd/compile/internal/ssa/stackalloc.go420
-rw-r--r--src/cmd/compile/internal/ssa/stackframe.go10
-rw-r--r--src/cmd/compile/internal/ssa/stmtlines_test.go132
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts99
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts94
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts123
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts143
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.go106
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts11
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts11
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22558.go51
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts7
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts7
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22600.go27
-rw-r--r--src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts12
-rw-r--r--src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts4
-rw-r--r--src/cmd/compile/internal/ssa/testdata/infloop.go16
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts56
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts46
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts64
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts55
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.go107
-rw-r--r--src/cmd/compile/internal/ssa/tighten.go164
-rw-r--r--src/cmd/compile/internal/ssa/trim.go172
-rw-r--r--src/cmd/compile/internal/ssa/tuple.go59
-rw-r--r--src/cmd/compile/internal/ssa/value.go494
-rw-r--r--src/cmd/compile/internal/ssa/writebarrier.go616
-rw-r--r--src/cmd/compile/internal/ssa/writebarrier_test.go56
-rw-r--r--src/cmd/compile/internal/ssa/xposmap.go116
-rw-r--r--src/cmd/compile/internal/ssa/zcse.go79
-rw-r--r--src/cmd/compile/internal/ssa/zeroextension_test.go34
-rw-r--r--src/cmd/compile/internal/syntax/branches.go311
-rw-r--r--src/cmd/compile/internal/syntax/dumper.go212
-rw-r--r--src/cmd/compile/internal/syntax/dumper_test.go25
-rw-r--r--src/cmd/compile/internal/syntax/error_test.go191
-rw-r--r--src/cmd/compile/internal/syntax/nodes.go469
-rw-r--r--src/cmd/compile/internal/syntax/nodes_test.go329
-rw-r--r--src/cmd/compile/internal/syntax/operator_string.go17
-rw-r--r--src/cmd/compile/internal/syntax/parser.go2322
-rw-r--r--src/cmd/compile/internal/syntax/parser_test.go349
-rw-r--r--src/cmd/compile/internal/syntax/pos.go156
-rw-r--r--src/cmd/compile/internal/syntax/printer.go938
-rw-r--r--src/cmd/compile/internal/syntax/printer_test.go55
-rw-r--r--src/cmd/compile/internal/syntax/scanner.go876
-rw-r--r--src/cmd/compile/internal/syntax/scanner_test.go764
-rw-r--r--src/cmd/compile/internal/syntax/source.go218
-rw-r--r--src/cmd/compile/internal/syntax/syntax.go95
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue20789.src9
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue23385.src17
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue23434.src31
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue31092.src16
-rw-r--r--src/cmd/compile/internal/syntax/testdata/sample.src33
-rw-r--r--src/cmd/compile/internal/syntax/token_string.go17
-rw-r--r--src/cmd/compile/internal/syntax/tokens.go156
-rw-r--r--src/cmd/compile/internal/test/README4
-rw-r--r--src/cmd/compile/internal/test/divconst_test.go325
-rw-r--r--src/cmd/compile/internal/test/mulconst_test.go242
-rw-r--r--src/cmd/compile/internal/test/test.go1
-rw-r--r--src/cmd/compile/internal/types/etype_string.go60
-rw-r--r--src/cmd/compile/internal/types/identity.go126
-rw-r--r--src/cmd/compile/internal/types/pkg.go146
-rw-r--r--src/cmd/compile/internal/types/scope.go103
-rw-r--r--src/cmd/compile/internal/types/sizeof_test.go48
-rw-r--r--src/cmd/compile/internal/types/sym.go142
-rw-r--r--src/cmd/compile/internal/types/sym_test.go59
-rw-r--r--src/cmd/compile/internal/types/type.go1525
-rw-r--r--src/cmd/compile/internal/types/type_test.go28
-rw-r--r--src/cmd/compile/internal/types/utils.go73
-rw-r--r--src/cmd/compile/internal/wasm/ssa.go505
-rw-r--r--src/cmd/compile/internal/x86/galign.go39
-rw-r--r--src/cmd/compile/internal/x86/ggen.go48
-rw-r--r--src/cmd/compile/internal/x86/ssa.go960
358 files changed, 404401 insertions, 0 deletions
diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go
new file mode 100644
index 0000000..af58440
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/galign.go
@@ -0,0 +1,26 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj/x86"
+)
+
+var leaptr = x86.ALEAQ
+
+func Init(arch *gc.Arch) {
+ arch.LinkArch = &x86.Linkamd64
+ arch.REGSP = x86.REGSP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+ arch.Ginsnopdefer = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
new file mode 100644
index 0000000..0c1456f
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -0,0 +1,137 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "cmd/internal/objabi"
+)
+
+// no floating point in note handlers on Plan 9
+var isPlan9 = objabi.GOOS == "plan9"
+
+// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ,
+// See runtime/mkduff.go.
+const (
+ dzBlocks = 16 // number of MOV/ADD blocks
+ dzBlockLen = 4 // number of clears per block
+ dzBlockSize = 19 // size of instructions in a single block
+ dzMovSize = 4 // size of single MOV instruction w/ offset
+ dzLeaqSize = 4 // size of single LEAQ instruction
+ dzClearStep = 16 // number of bytes cleared by each MOV instruction
+
+ dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block
+ dzSize = dzBlocks * dzBlockSize
+)
+
+// dzOff returns the offset for a jump into DUFFZERO.
+// b is the number of bytes to zero.
+func dzOff(b int64) int64 {
+ off := int64(dzSize)
+ off -= b / dzClearLen * dzBlockSize
+ tailLen := b % dzClearLen
+ if tailLen >= dzClearStep {
+ off -= dzLeaqSize + dzMovSize*(tailLen/dzClearStep)
+ }
+ return off
+}
+
+// duffzeroDI returns the pre-adjustment to DI for a call to DUFFZERO.
+// b is the number of bytes to zero.
+func dzDI(b int64) int64 {
+ tailLen := b % dzClearLen
+ if tailLen < dzClearStep {
+ return 0
+ }
+ tailSteps := tailLen / dzClearStep
+ return -dzClearStep * (dzBlockLen - tailSteps)
+}
+
+func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
+ const (
+ ax = 1 << iota
+ x0
+ )
+
+ if cnt == 0 {
+ return p
+ }
+
+ if cnt%int64(gc.Widthreg) != 0 {
+ // should only happen with nacl
+ if cnt%int64(gc.Widthptr) != 0 {
+ gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
+ }
+ if *state&ax == 0 {
+ p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ *state |= ax
+ }
+ p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
+ off += int64(gc.Widthptr)
+ cnt -= int64(gc.Widthptr)
+ }
+
+ if cnt == 8 {
+ if *state&ax == 0 {
+ p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ *state |= ax
+ }
+ p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
+ } else if !isPlan9 && cnt <= int64(8*gc.Widthreg) {
+ if *state&x0 == 0 {
+ p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
+ *state |= x0
+ }
+
+ for i := int64(0); i < cnt/16; i++ {
+ p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
+ }
+
+ if cnt%16 != 0 {
+ p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
+ }
+ } else if !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
+ if *state&x0 == 0 {
+ p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
+ *state |= x0
+ }
+ p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
+ p.To.Sym = gc.Duffzero
+
+ if cnt%16 != 0 {
+ p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
+ }
+ } else {
+ if *state&ax == 0 {
+ p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ *state |= ax
+ }
+
+ p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
+ p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ }
+
+ return p
+}
+
+func ginsnop(pp *gc.Progs) *obj.Prog {
+ // This is a hardware nop (1-byte 0x90) instruction,
+ // even though we describe it as an explicit XCHGL here.
+ // Particularly, this does not zero the high 32 bits
+ // like typical *L opcodes.
+ // (gas assembles "xchg %eax,%eax" to 0x87 0xc0, which
+ // does zero the high 32 bits.)
+ p := pp.Prog(x86.AXCHGL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ return p
+}
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
new file mode 100644
index 0000000..5ff05a0
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -0,0 +1,1334 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64
+
+import (
+ "fmt"
+ "math"
+
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+
+// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+ flive := b.FlagsLiveAtEnd
+ for _, c := range b.ControlValues() {
+ flive = c.Type.IsFlags() || flive
+ }
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if flive && (v.Op == ssa.OpAMD64MOVLconst || v.Op == ssa.OpAMD64MOVQconst) {
+ // The "mark" is any non-nil Aux value.
+ v.Aux = v
+ }
+ if v.Type.IsFlags() {
+ flive = false
+ }
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ flive = true
+ }
+ }
+ }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ // Avoid partial register write
+ if !t.IsFloat() {
+ switch t.Size() {
+ case 1:
+ return x86.AMOVBLZX
+ case 2:
+ return x86.AMOVWLZX
+ }
+ }
+ // Otherwise, there's no difference between load and store opcodes.
+ return storeByType(t)
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ width := t.Size()
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return x86.AMOVSS
+ case 8:
+ return x86.AMOVSD
+ }
+ } else {
+ switch width {
+ case 1:
+ return x86.AMOVB
+ case 2:
+ return x86.AMOVW
+ case 4:
+ return x86.AMOVL
+ case 8:
+ return x86.AMOVQ
+ }
+ }
+ panic(fmt.Sprintf("bad store type %v", t))
+}
+
+// moveByType returns the reg->reg move instruction of the given type.
+func moveByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ // Moving the whole sse2 register is faster
+ // than moving just the correct low portion of it.
+ // There is no xmm->xmm move with 1 byte opcode,
+ // so use movups, which has 2 byte opcode.
+ return x86.AMOVUPS
+ } else {
+ switch t.Size() {
+ case 1:
+ // Avoids partial register write
+ return x86.AMOVL
+ case 2:
+ return x86.AMOVL
+ case 4:
+ return x86.AMOVL
+ case 8:
+ return x86.AMOVQ
+ case 16:
+ return x86.AMOVUPS // int128s are in SSE registers
+ default:
+ panic(fmt.Sprintf("bad int register width %d:%v", t.Size(), t))
+ }
+ }
+}
+
+// opregreg emits instructions for
+// dest := dest(To) op src(From)
+// and also returns the created obj.Prog so it
+// may be further adjusted (offset, scale, etc).
+func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dest
+ p.From.Reg = src
+ return p
+}
+
+// memIdx fills out a as an indexed memory reference for v.
+// It assumes that the base register and the index register
+// are v.Args[0].Reg() and v.Args[1].Reg(), respectively.
+// The caller must still use gc.AddAux/gc.AddAux2 to handle v.Aux as necessary.
+func memIdx(a *obj.Addr, v *ssa.Value) {
+ r, i := v.Args[0].Reg(), v.Args[1].Reg()
+ a.Type = obj.TYPE_MEM
+ a.Scale = v.Op.Scale()
+ if a.Scale == 1 && i == x86.REG_SP {
+ r, i = i, r
+ }
+ a.Reg = r
+ a.Index = i
+}
+
+// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ,
+// See runtime/mkduff.go.
+func duffStart(size int64) int64 {
+ x, _ := duff(size)
+ return x
+}
+func duffAdj(size int64) int64 {
+ _, x := duff(size)
+ return x
+}
+
+// duff returns the offset (from duffzero, in bytes) and pointer adjust (in bytes)
+// required to use the duffzero mechanism for a block of the given size.
+func duff(size int64) (int64, int64) {
+ if size < 32 || size > 1024 || size%dzClearStep != 0 {
+ panic("bad duffzero size")
+ }
+ steps := size / dzClearStep
+ blocks := steps / dzBlockLen
+ steps %= dzBlockLen
+ off := dzBlockSize * (dzBlocks - blocks)
+ var adj int64
+ if steps != 0 {
+ off -= dzLeaqSize
+ off -= dzMovSize * steps
+ adj -= dzClearStep * (dzBlockLen - steps)
+ }
+ return off, adj
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpAMD64VFMADD231SD:
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[2].Reg()}
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[1].Reg()})
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ case ssa.OpAMD64ADDQ, ssa.OpAMD64ADDL:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ switch {
+ case r == r1:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case r == r2:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ default:
+ var asm obj.As
+ if v.Op == ssa.OpAMD64ADDQ {
+ asm = x86.ALEAQ
+ } else {
+ asm = x86.ALEAL
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r1
+ p.From.Scale = 1
+ p.From.Index = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ // 2-address opcode arithmetic
+ case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL,
+ ssa.OpAMD64MULQ, ssa.OpAMD64MULL,
+ ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL,
+ ssa.OpAMD64ORQ, ssa.OpAMD64ORL,
+ ssa.OpAMD64XORQ, ssa.OpAMD64XORL,
+ ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL,
+ ssa.OpAMD64SHRQ, ssa.OpAMD64SHRL, ssa.OpAMD64SHRW, ssa.OpAMD64SHRB,
+ ssa.OpAMD64SARQ, ssa.OpAMD64SARL, ssa.OpAMD64SARW, ssa.OpAMD64SARB,
+ ssa.OpAMD64ROLQ, ssa.OpAMD64ROLL, ssa.OpAMD64ROLW, ssa.OpAMD64ROLB,
+ ssa.OpAMD64RORQ, ssa.OpAMD64RORL, ssa.OpAMD64RORW, ssa.OpAMD64RORB,
+ ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD, ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD,
+ ssa.OpAMD64MULSS, ssa.OpAMD64MULSD, ssa.OpAMD64DIVSS, ssa.OpAMD64DIVSD,
+ ssa.OpAMD64PXOR,
+ ssa.OpAMD64BTSL, ssa.OpAMD64BTSQ,
+ ssa.OpAMD64BTCL, ssa.OpAMD64BTCQ,
+ ssa.OpAMD64BTRL, ssa.OpAMD64BTRQ:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
+
+ case ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU:
+ // Arg[0] (the dividend) is in AX.
+ // Arg[1] (the divisor) can be in any other register.
+ // Result[0] (the quotient) is in AX.
+ // Result[1] (the remainder) is in DX.
+ r := v.Args[1].Reg()
+
+ // Zero extend dividend.
+ c := s.Prog(x86.AXORL)
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = x86.REG_DX
+ c.To.Type = obj.TYPE_REG
+ c.To.Reg = x86.REG_DX
+
+ // Issue divide.
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+
+ case ssa.OpAMD64DIVQ, ssa.OpAMD64DIVL, ssa.OpAMD64DIVW:
+ // Arg[0] (the dividend) is in AX.
+ // Arg[1] (the divisor) can be in any other register.
+ // Result[0] (the quotient) is in AX.
+ // Result[1] (the remainder) is in DX.
+ r := v.Args[1].Reg()
+ var j1 *obj.Prog
+
+ // CPU faults upon signed overflow, which occurs when the most
+ // negative int is divided by -1. Handle divide by -1 as a special case.
+ if ssa.DivisionNeedsFixUp(v) {
+ var c *obj.Prog
+ switch v.Op {
+ case ssa.OpAMD64DIVQ:
+ c = s.Prog(x86.ACMPQ)
+ case ssa.OpAMD64DIVL:
+ c = s.Prog(x86.ACMPL)
+ case ssa.OpAMD64DIVW:
+ c = s.Prog(x86.ACMPW)
+ }
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = r
+ c.To.Type = obj.TYPE_CONST
+ c.To.Offset = -1
+ j1 = s.Prog(x86.AJEQ)
+ j1.To.Type = obj.TYPE_BRANCH
+ }
+
+ // Sign extend dividend.
+ switch v.Op {
+ case ssa.OpAMD64DIVQ:
+ s.Prog(x86.ACQO)
+ case ssa.OpAMD64DIVL:
+ s.Prog(x86.ACDQ)
+ case ssa.OpAMD64DIVW:
+ s.Prog(x86.ACWD)
+ }
+
+ // Issue divide.
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+
+ if j1 != nil {
+ // Skip over -1 fixup code.
+ j2 := s.Prog(obj.AJMP)
+ j2.To.Type = obj.TYPE_BRANCH
+
+ // Issue -1 fixup code.
+ // n / -1 = -n
+ var n1 *obj.Prog
+ switch v.Op {
+ case ssa.OpAMD64DIVQ:
+ n1 = s.Prog(x86.ANEGQ)
+ case ssa.OpAMD64DIVL:
+ n1 = s.Prog(x86.ANEGL)
+ case ssa.OpAMD64DIVW:
+ n1 = s.Prog(x86.ANEGW)
+ }
+ n1.To.Type = obj.TYPE_REG
+ n1.To.Reg = x86.REG_AX
+
+ // n % -1 == 0
+ n2 := s.Prog(x86.AXORL)
+ n2.From.Type = obj.TYPE_REG
+ n2.From.Reg = x86.REG_DX
+ n2.To.Type = obj.TYPE_REG
+ n2.To.Reg = x86.REG_DX
+
+ // TODO(khr): issue only the -1 fixup code we need.
+ // For instance, if only the quotient is used, no point in zeroing the remainder.
+
+ j1.To.SetTarget(n1)
+ j2.To.SetTarget(s.Pc())
+ }
+
+ case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU:
+ // the frontend rewrites constant division by 8/16/32 bit integers into
+ // HMUL by a constant
+ // SSA rewrites generate the 64 bit versions
+
+ // Arg[0] is already in AX as it's the only register we allow
+ // and DX is the only output we care about (the high bits)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ // IMULB puts the high portion in AH instead of DL,
+ // so move it to DL for consistency
+ if v.Type.Size() == 1 {
+ m := s.Prog(x86.AMOVB)
+ m.From.Type = obj.TYPE_REG
+ m.From.Reg = x86.REG_AH
+ m.To.Type = obj.TYPE_REG
+ m.To.Reg = x86.REG_DX
+ }
+
+ case ssa.OpAMD64MULQU, ssa.OpAMD64MULLU:
+ // Arg[0] is already in AX as it's the only register we allow
+ // results lo in AX
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.OpAMD64MULQU2:
+ // Arg[0] is already in AX as it's the only register we allow
+ // results hi in DX, lo in AX
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.OpAMD64DIVQU2:
+ // Arg[0], Arg[1] are already in Dx, AX, as they're the only registers we allow
+ // results q in AX, r in DX
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+
+ case ssa.OpAMD64AVGQU:
+ // compute (x+y)/2 unsigned.
+ // Do a 64-bit add, the overflow goes into the carry.
+ // Shift right once and pull the carry back into the 63rd bit.
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(x86.AADDQ)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Reg = v.Args[1].Reg()
+ p = s.Prog(x86.ARCRQ)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpAMD64ADDQcarry, ssa.OpAMD64ADCQ:
+ r := v.Reg0()
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ switch r {
+ case r0:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case r1:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ default:
+ v.Fatalf("output not in same register as an input %s", v.LongString())
+ }
+
+ case ssa.OpAMD64SUBQborrow, ssa.OpAMD64SBBQ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpAMD64ADDQconstcarry, ssa.OpAMD64ADCQconst, ssa.OpAMD64SUBQconstborrow, ssa.OpAMD64SBBQconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst:
+ r := v.Reg()
+ a := v.Args[0].Reg()
+ if r == a {
+ switch v.AuxInt {
+ case 1:
+ var asm obj.As
+ // Software optimization manual recommends add $1,reg.
+ // But inc/dec is 1 byte smaller. ICC always uses inc
+ // Clang/GCC choose depending on flags, but prefer add.
+ // Experiments show that inc/dec is both a little faster
+ // and make a binary a little smaller.
+ if v.Op == ssa.OpAMD64ADDQconst {
+ asm = x86.AINCQ
+ } else {
+ asm = x86.AINCL
+ }
+ p := s.Prog(asm)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ case -1:
+ var asm obj.As
+ if v.Op == ssa.OpAMD64ADDQconst {
+ asm = x86.ADECQ
+ } else {
+ asm = x86.ADECL
+ }
+ p := s.Prog(asm)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ case 0x80:
+ // 'SUBQ $-0x80, r' is shorter to encode than
+ // and functionally equivalent to 'ADDQ $0x80, r'.
+ asm := x86.ASUBL
+ if v.Op == ssa.OpAMD64ADDQconst {
+ asm = x86.ASUBQ
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = -0x80
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ var asm obj.As
+ if v.Op == ssa.OpAMD64ADDQconst {
+ asm = x86.ALEAQ
+ } else {
+ asm = x86.ALEAL
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = a
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpAMD64CMOVQEQ, ssa.OpAMD64CMOVLEQ, ssa.OpAMD64CMOVWEQ,
+ ssa.OpAMD64CMOVQLT, ssa.OpAMD64CMOVLLT, ssa.OpAMD64CMOVWLT,
+ ssa.OpAMD64CMOVQNE, ssa.OpAMD64CMOVLNE, ssa.OpAMD64CMOVWNE,
+ ssa.OpAMD64CMOVQGT, ssa.OpAMD64CMOVLGT, ssa.OpAMD64CMOVWGT,
+ ssa.OpAMD64CMOVQLE, ssa.OpAMD64CMOVLLE, ssa.OpAMD64CMOVWLE,
+ ssa.OpAMD64CMOVQGE, ssa.OpAMD64CMOVLGE, ssa.OpAMD64CMOVWGE,
+ ssa.OpAMD64CMOVQHI, ssa.OpAMD64CMOVLHI, ssa.OpAMD64CMOVWHI,
+ ssa.OpAMD64CMOVQLS, ssa.OpAMD64CMOVLLS, ssa.OpAMD64CMOVWLS,
+ ssa.OpAMD64CMOVQCC, ssa.OpAMD64CMOVLCC, ssa.OpAMD64CMOVWCC,
+ ssa.OpAMD64CMOVQCS, ssa.OpAMD64CMOVLCS, ssa.OpAMD64CMOVWCS,
+ ssa.OpAMD64CMOVQGTF, ssa.OpAMD64CMOVLGTF, ssa.OpAMD64CMOVWGTF,
+ ssa.OpAMD64CMOVQGEF, ssa.OpAMD64CMOVLGEF, ssa.OpAMD64CMOVWGEF:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpAMD64CMOVQNEF, ssa.OpAMD64CMOVLNEF, ssa.OpAMD64CMOVWNEF:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ // Flag condition: ^ZERO || PARITY
+ // Generate:
+ // CMOV*NE SRC,DST
+ // CMOV*PS SRC,DST
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ var q *obj.Prog
+ if v.Op == ssa.OpAMD64CMOVQNEF {
+ q = s.Prog(x86.ACMOVQPS)
+ } else if v.Op == ssa.OpAMD64CMOVLNEF {
+ q = s.Prog(x86.ACMOVLPS)
+ } else {
+ q = s.Prog(x86.ACMOVWPS)
+ }
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = v.Args[1].Reg()
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = r
+
+ case ssa.OpAMD64CMOVQEQF, ssa.OpAMD64CMOVLEQF, ssa.OpAMD64CMOVWEQF:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+
+ // Flag condition: ZERO && !PARITY
+ // Generate:
+ // MOV SRC,AX
+ // CMOV*NE DST,AX
+ // CMOV*PC AX,DST
+ //
+ // TODO(rasky): we could generate:
+ // CMOV*NE DST,SRC
+ // CMOV*PC SRC,DST
+ // But this requires a way for regalloc to know that SRC might be
+ // clobbered by this instruction.
+ if v.Args[1].Reg() != x86.REG_AX {
+ opregreg(s, moveByType(v.Type), x86.REG_AX, v.Args[1].Reg())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ var q *obj.Prog
+ if v.Op == ssa.OpAMD64CMOVQEQF {
+ q = s.Prog(x86.ACMOVQPC)
+ } else if v.Op == ssa.OpAMD64CMOVLEQF {
+ q = s.Prog(x86.ACMOVLPC)
+ } else {
+ q = s.Prog(x86.ACMOVWPC)
+ }
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = x86.REG_AX
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = r
+
+ case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()})
+
+ case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst,
+ ssa.OpAMD64ANDQconst, ssa.OpAMD64ANDLconst,
+ ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst,
+ ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst,
+ ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst,
+ ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, ssa.OpAMD64SHRBconst,
+ ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, ssa.OpAMD64SARBconst,
+ ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, ssa.OpAMD64ROLBconst:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8,
+ ssa.OpAMD64LEAL1, ssa.OpAMD64LEAL2, ssa.OpAMD64LEAL4, ssa.OpAMD64LEAL8,
+ ssa.OpAMD64LEAW1, ssa.OpAMD64LEAW2, ssa.OpAMD64LEAW4, ssa.OpAMD64LEAW8:
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.From, v)
+ o := v.Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = o
+ if v.AuxInt != 0 && v.Aux == nil {
+ // Emit an additional LEA to add the displacement instead of creating a slow 3 operand LEA.
+ switch v.Op {
+ case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8:
+ p = s.Prog(x86.ALEAQ)
+ case ssa.OpAMD64LEAL1, ssa.OpAMD64LEAL2, ssa.OpAMD64LEAL4, ssa.OpAMD64LEAL8:
+ p = s.Prog(x86.ALEAL)
+ case ssa.OpAMD64LEAW1, ssa.OpAMD64LEAW2, ssa.OpAMD64LEAW4, ssa.OpAMD64LEAW8:
+ p = s.Prog(x86.ALEAW)
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = o
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = o
+ }
+ gc.AddAux(&p.From, v)
+ case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
+ ssa.OpAMD64TESTQ, ssa.OpAMD64TESTL, ssa.OpAMD64TESTW, ssa.OpAMD64TESTB,
+ ssa.OpAMD64BTL, ssa.OpAMD64BTQ:
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ case ssa.OpAMD64UCOMISS, ssa.OpAMD64UCOMISD:
+ // Go assembler has swapped operands for UCOMISx relative to CMP,
+ // must account for that right here.
+ opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
+ case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+ case ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst,
+ ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
+ ssa.OpAMD64BTSLconst, ssa.OpAMD64BTSQconst,
+ ssa.OpAMD64BTCLconst, ssa.OpAMD64BTCQconst,
+ ssa.OpAMD64BTRLconst, ssa.OpAMD64BTRQconst:
+ op := v.Op
+ if op == ssa.OpAMD64BTQconst && v.AuxInt < 32 {
+ // Emit 32-bit version because it's shorter
+ op = ssa.OpAMD64BTLconst
+ }
+ p := s.Prog(op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[0].Reg()
+ case ssa.OpAMD64CMPQload, ssa.OpAMD64CMPLload, ssa.OpAMD64CMPWload, ssa.OpAMD64CMPBload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[1].Reg()
+ case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload:
+ sc := v.AuxValAndOff()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.From, v, sc.Off())
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = sc.Val()
+ case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1:
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.From, v)
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[2].Reg()
+ case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1:
+ sc := v.AuxValAndOff()
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.From, v)
+ gc.AddAux2(&p.From, v, sc.Off())
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = sc.Val()
+ case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
+ x := v.Reg()
+
+ // If flags aren't live (indicated by v.Aux == nil),
+ // then we can rewrite MOV $0, AX into XOR AX, AX.
+ if v.AuxInt == 0 && v.Aux == nil {
+ p := s.Prog(x86.AXORL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ break
+ }
+
+ asm := v.Op.Asm()
+ // Use MOVL to move a small constant into a register
+ // when the constant is positive and fits into 32 bits.
+ if 0 <= v.AuxInt && v.AuxInt <= (1<<32-1) {
+ // The upper 32bit are zeroed automatically when using MOVL.
+ asm = x86.AMOVL
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst:
+ x := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVOload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
+ ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.From, v)
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
+ ssa.OpAMD64BTCQmodify, ssa.OpAMD64BTCLmodify, ssa.OpAMD64BTRQmodify, ssa.OpAMD64BTRLmodify, ssa.OpAMD64BTSQmodify, ssa.OpAMD64BTSLmodify,
+ ssa.OpAMD64ADDQmodify, ssa.OpAMD64SUBQmodify, ssa.OpAMD64ANDQmodify, ssa.OpAMD64ORQmodify, ssa.OpAMD64XORQmodify,
+ ssa.OpAMD64ADDLmodify, ssa.OpAMD64SUBLmodify, ssa.OpAMD64ANDLmodify, ssa.OpAMD64ORLmodify, ssa.OpAMD64XORLmodify:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
+ ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2,
+ ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8,
+ ssa.OpAMD64SUBLmodifyidx1, ssa.OpAMD64SUBLmodifyidx4, ssa.OpAMD64SUBLmodifyidx8, ssa.OpAMD64SUBQmodifyidx1, ssa.OpAMD64SUBQmodifyidx8,
+ ssa.OpAMD64ANDLmodifyidx1, ssa.OpAMD64ANDLmodifyidx4, ssa.OpAMD64ANDLmodifyidx8, ssa.OpAMD64ANDQmodifyidx1, ssa.OpAMD64ANDQmodifyidx8,
+ ssa.OpAMD64ORLmodifyidx1, ssa.OpAMD64ORLmodifyidx4, ssa.OpAMD64ORLmodifyidx8, ssa.OpAMD64ORQmodifyidx1, ssa.OpAMD64ORQmodifyidx8,
+ ssa.OpAMD64XORLmodifyidx1, ssa.OpAMD64XORLmodifyidx4, ssa.OpAMD64XORLmodifyidx8, ssa.OpAMD64XORQmodifyidx1, ssa.OpAMD64XORQmodifyidx8:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ memIdx(&p.To, v)
+ gc.AddAux(&p.To, v)
+ case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
+ sc := v.AuxValAndOff()
+ off := sc.Off()
+ val := sc.Val()
+ if val == 1 || val == -1 {
+ var asm obj.As
+ if v.Op == ssa.OpAMD64ADDQconstmodify {
+ if val == 1 {
+ asm = x86.AINCQ
+ } else {
+ asm = x86.ADECQ
+ }
+ } else {
+ if val == 1 {
+ asm = x86.AINCL
+ } else {
+ asm = x86.ADECL
+ }
+ }
+ p := s.Prog(asm)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.To, v, off)
+ break
+ }
+ fallthrough
+ case ssa.OpAMD64ANDQconstmodify, ssa.OpAMD64ANDLconstmodify, ssa.OpAMD64ORQconstmodify, ssa.OpAMD64ORLconstmodify,
+ ssa.OpAMD64BTCQconstmodify, ssa.OpAMD64BTCLconstmodify, ssa.OpAMD64BTSQconstmodify, ssa.OpAMD64BTSLconstmodify,
+ ssa.OpAMD64BTRQconstmodify, ssa.OpAMD64BTRLconstmodify, ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify:
+ sc := v.AuxValAndOff()
+ off := sc.Off()
+ val := sc.Val()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = val
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.To, v, off)
+
+ case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.To, v, sc.Off())
+ case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
+ ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
+ ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
+ ssa.OpAMD64ORLconstmodifyidx1, ssa.OpAMD64ORLconstmodifyidx4, ssa.OpAMD64ORLconstmodifyidx8, ssa.OpAMD64ORQconstmodifyidx1, ssa.OpAMD64ORQconstmodifyidx8,
+ ssa.OpAMD64XORLconstmodifyidx1, ssa.OpAMD64XORLconstmodifyidx4, ssa.OpAMD64XORLconstmodifyidx8, ssa.OpAMD64XORQconstmodifyidx1, ssa.OpAMD64XORQconstmodifyidx8:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val()
+ switch {
+ case p.As == x86.AADDQ && p.From.Offset == 1:
+ p.As = x86.AINCQ
+ p.From.Type = obj.TYPE_NONE
+ case p.As == x86.AADDQ && p.From.Offset == -1:
+ p.As = x86.ADECQ
+ p.From.Type = obj.TYPE_NONE
+ case p.As == x86.AADDL && p.From.Offset == 1:
+ p.As = x86.AINCL
+ p.From.Type = obj.TYPE_NONE
+ case p.As == x86.AADDL && p.From.Offset == -1:
+ p.As = x86.ADECL
+ p.From.Type = obj.TYPE_NONE
+ }
+ memIdx(&p.To, v)
+ gc.AddAux2(&p.To, v, sc.Off())
+ case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
+ ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
+ ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
+ case ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSL2SS:
+ r := v.Reg()
+ // Break false dependency on destination register.
+ opregreg(s, x86.AXORPS, r, r)
+ opregreg(s, v.Op.Asm(), r, v.Args[0].Reg())
+ case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i, ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i:
+ var p *obj.Prog
+ switch v.Op {
+ case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i:
+ p = s.Prog(x86.AMOVQ)
+ case ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i:
+ p = s.Prog(x86.AMOVL)
+ }
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64ADDQload, ssa.OpAMD64ADDLload, ssa.OpAMD64SUBQload, ssa.OpAMD64SUBLload,
+ ssa.OpAMD64ANDQload, ssa.OpAMD64ANDLload, ssa.OpAMD64ORQload, ssa.OpAMD64ORLload,
+ ssa.OpAMD64XORQload, ssa.OpAMD64XORLload, ssa.OpAMD64ADDSDload, ssa.OpAMD64ADDSSload,
+ ssa.OpAMD64SUBSDload, ssa.OpAMD64SUBSSload, ssa.OpAMD64MULSDload, ssa.OpAMD64MULSSload,
+ ssa.OpAMD64DIVSDload, ssa.OpAMD64DIVSSload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ case ssa.OpAMD64ADDLloadidx1, ssa.OpAMD64ADDLloadidx4, ssa.OpAMD64ADDLloadidx8, ssa.OpAMD64ADDQloadidx1, ssa.OpAMD64ADDQloadidx8,
+ ssa.OpAMD64SUBLloadidx1, ssa.OpAMD64SUBLloadidx4, ssa.OpAMD64SUBLloadidx8, ssa.OpAMD64SUBQloadidx1, ssa.OpAMD64SUBQloadidx8,
+ ssa.OpAMD64ANDLloadidx1, ssa.OpAMD64ANDLloadidx4, ssa.OpAMD64ANDLloadidx8, ssa.OpAMD64ANDQloadidx1, ssa.OpAMD64ANDQloadidx8,
+ ssa.OpAMD64ORLloadidx1, ssa.OpAMD64ORLloadidx4, ssa.OpAMD64ORLloadidx8, ssa.OpAMD64ORQloadidx1, ssa.OpAMD64ORQloadidx8,
+ ssa.OpAMD64XORLloadidx1, ssa.OpAMD64XORLloadidx4, ssa.OpAMD64XORLloadidx8, ssa.OpAMD64XORQloadidx1, ssa.OpAMD64XORQloadidx8,
+ ssa.OpAMD64ADDSSloadidx1, ssa.OpAMD64ADDSSloadidx4, ssa.OpAMD64ADDSDloadidx1, ssa.OpAMD64ADDSDloadidx8,
+ ssa.OpAMD64SUBSSloadidx1, ssa.OpAMD64SUBSSloadidx4, ssa.OpAMD64SUBSDloadidx1, ssa.OpAMD64SUBSDloadidx8,
+ ssa.OpAMD64MULSSloadidx1, ssa.OpAMD64MULSSloadidx4, ssa.OpAMD64MULSDloadidx1, ssa.OpAMD64MULSDloadidx8,
+ ssa.OpAMD64DIVSSloadidx1, ssa.OpAMD64DIVSSloadidx4, ssa.OpAMD64DIVSDloadidx1, ssa.OpAMD64DIVSDloadidx8:
+ p := s.Prog(v.Op.Asm())
+
+ r, i := v.Args[1].Reg(), v.Args[2].Reg()
+ p.From.Type = obj.TYPE_MEM
+ p.From.Scale = v.Op.Scale()
+ if p.From.Scale == 1 && i == x86.REG_SP {
+ r, i = i, r
+ }
+ p.From.Reg = r
+ p.From.Index = i
+
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ case ssa.OpAMD64DUFFZERO:
+ off := duffStart(v.AuxInt)
+ adj := duffAdj(v.AuxInt)
+ var p *obj.Prog
+ if adj != 0 {
+ p = s.Prog(x86.ALEAQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = adj
+ p.From.Reg = x86.REG_DI
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_DI
+ }
+ p = s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Duffzero
+ p.To.Offset = off
+ case ssa.OpAMD64MOVOconst:
+ if v.AuxInt != 0 {
+ v.Fatalf("MOVOconst can only do constant=0")
+ }
+ r := v.Reg()
+ opregreg(s, x86.AXORPS, r, r)
+ case ssa.OpAMD64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Duffcopy
+ if v.AuxInt%16 != 0 {
+ v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt)
+ }
+ p.To.Offset = 14 * (64 - v.AuxInt/16)
+ // 14 and 64 are magic constants. 14 is the number of bytes to encode:
+ // MOVUPS (SI), X0
+ // ADDQ $16, SI
+ // MOVUPS X0, (DI)
+ // ADDQ $16, DI
+ // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
+
+ case ssa.OpCopy: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x != y {
+ opregreg(s, moveByType(v.Type), y, x)
+ }
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ gc.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddrAuto(&p.To, v)
+ case ssa.OpAMD64LoweredHasCPUFeature:
+ p := s.Prog(x86.AMOVBQZX)
+ p.From.Type = obj.TYPE_MEM
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64LoweredGetClosurePtr:
+ // Closure pointer is DX.
+ gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpAMD64LoweredGetG:
+ r := v.Reg()
+ // See the comments in cmd/internal/obj/x86/obj6.go
+ // near CanUse1InsnTLS for a detailed explanation of these instructions.
+ if x86.CanUse1InsnTLS(gc.Ctxt) {
+ // MOVQ (TLS), r
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ } else {
+ // MOVQ TLS, r
+ // MOVQ (r)(TLS*1), r
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ q := s.Prog(x86.AMOVQ)
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = r
+ q.From.Index = x86.REG_TLS
+ q.From.Scale = 1
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = r
+ }
+ case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
+ s.Call(v)
+
+ case ssa.OpAMD64LoweredGetCallerPC:
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = -8 // PC is stored 8 bytes below first parameter.
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64LoweredGetCallerSP:
+ // caller's SP is the address of the first arg
+ mov := x86.AMOVQ
+ if gc.Widthptr == 4 {
+ mov = x86.AMOVL
+ }
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ // arg0 is in DI. Set sym to match where regalloc put arg1.
+ p.To.Sym = gc.GCWriteBarrierReg[v.Args[1].Reg()]
+
+ case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(int64(2 * gc.Widthptr)) // space used in callee args area by assembly stubs
+
+ case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
+ ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
+ ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpAMD64NEGLflags:
+ r := v.Reg0()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ switch v.Op {
+ case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ:
+ p.To.Reg = v.Reg0()
+ case ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD:
+ p.To.Reg = v.Reg()
+ }
+ case ssa.OpAMD64ROUNDSD:
+ p := s.Prog(v.Op.Asm())
+ val := v.AuxInt
+ // 0 means math.RoundToEven, 1 Floor, 2 Ceil, 3 Trunc
+ if val < 0 || val > 3 {
+ v.Fatalf("Invalid rounding mode")
+ }
+ p.From.Offset = val
+ p.From.Type = obj.TYPE_CONST
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()})
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64POPCNTQ, ssa.OpAMD64POPCNTL:
+ if v.Args[0].Reg() != v.Reg() {
+ // POPCNT on Intel has a false dependency on the destination register.
+ // Xor register with itself to break the dependency.
+ p := s.Prog(x86.AXORQ)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64SETEQ, ssa.OpAMD64SETNE,
+ ssa.OpAMD64SETL, ssa.OpAMD64SETLE,
+ ssa.OpAMD64SETG, ssa.OpAMD64SETGE,
+ ssa.OpAMD64SETGF, ssa.OpAMD64SETGEF,
+ ssa.OpAMD64SETB, ssa.OpAMD64SETBE,
+ ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN,
+ ssa.OpAMD64SETA, ssa.OpAMD64SETAE,
+ ssa.OpAMD64SETO:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64SETEQstore, ssa.OpAMD64SETNEstore,
+ ssa.OpAMD64SETLstore, ssa.OpAMD64SETLEstore,
+ ssa.OpAMD64SETGstore, ssa.OpAMD64SETGEstore,
+ ssa.OpAMD64SETBstore, ssa.OpAMD64SETBEstore,
+ ssa.OpAMD64SETAstore, ssa.OpAMD64SETAEstore:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+
+ case ssa.OpAMD64SETNEF:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ q := s.Prog(x86.ASETPS)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = x86.REG_AX
+ // ORL avoids partial register write and is smaller than ORQ, used by old compiler
+ opregreg(s, x86.AORL, v.Reg(), x86.REG_AX)
+
+ case ssa.OpAMD64SETEQF:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ q := s.Prog(x86.ASETPC)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = x86.REG_AX
+ // ANDL avoids partial register write and is smaller than ANDQ, used by old compiler
+ opregreg(s, x86.AANDL, v.Reg(), x86.REG_AX)
+
+ case ssa.OpAMD64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpAMD64FlagEQ, ssa.OpAMD64FlagLT_ULT, ssa.OpAMD64FlagLT_UGT, ssa.OpAMD64FlagGT_ULT, ssa.OpAMD64FlagGT_UGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.OpAMD64AddTupleFirst32, ssa.OpAMD64AddTupleFirst64:
+ v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
+ case ssa.OpAMD64REPSTOSQ:
+ s.Prog(x86.AREP)
+ s.Prog(x86.ASTOSQ)
+ case ssa.OpAMD64REPMOVSQ:
+ s.Prog(x86.AREP)
+ s.Prog(x86.AMOVSQ)
+ case ssa.OpAMD64LoweredNilCheck:
+ // Issue a load which will fault if the input is nil.
+ // TODO: We currently use the 2-byte instruction TESTB AX, (reg).
+ // Should we use the 3-byte TESTB $0, (reg) instead? It is larger
+ // but it doesn't have false dependency on AX.
+ // Or maybe allocate an output register and use MOVL (reg),reg2 ?
+ // That trades clobbering flags for clobbering a register.
+ p := s.Prog(x86.ATESTB)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ gc.Warnl(v.Pos, "generated nil check")
+ }
+ case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
+ r := v.Reg0()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[1].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
+ r := v.Reg0()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
+ }
+ s.Prog(x86.ALOCK)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[1].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
+ if v.Args[1].Reg() != x86.REG_AX {
+ v.Fatalf("input[1] not in AX %s", v.LongString())
+ }
+ s.Prog(x86.ALOCK)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ p = s.Prog(x86.ASETEQ)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpAMD64ANDBlock, ssa.OpAMD64ANDLlock, ssa.OpAMD64ORBlock, ssa.OpAMD64ORLlock:
+ s.Prog(x86.ALOCK)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpClobber:
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = x86.REG_SP
+ gc.AddAux(&p.To, v)
+ p = s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = x86.REG_SP
+ gc.AddAux(&p.To, v)
+ p.To.Offset += 4
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = [...]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockAMD64EQ: {x86.AJEQ, x86.AJNE},
+ ssa.BlockAMD64NE: {x86.AJNE, x86.AJEQ},
+ ssa.BlockAMD64LT: {x86.AJLT, x86.AJGE},
+ ssa.BlockAMD64GE: {x86.AJGE, x86.AJLT},
+ ssa.BlockAMD64LE: {x86.AJLE, x86.AJGT},
+ ssa.BlockAMD64GT: {x86.AJGT, x86.AJLE},
+ ssa.BlockAMD64OS: {x86.AJOS, x86.AJOC},
+ ssa.BlockAMD64OC: {x86.AJOC, x86.AJOS},
+ ssa.BlockAMD64ULT: {x86.AJCS, x86.AJCC},
+ ssa.BlockAMD64UGE: {x86.AJCC, x86.AJCS},
+ ssa.BlockAMD64UGT: {x86.AJHI, x86.AJLS},
+ ssa.BlockAMD64ULE: {x86.AJLS, x86.AJHI},
+ ssa.BlockAMD64ORD: {x86.AJPC, x86.AJPS},
+ ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
+}
+
+var eqfJumps = [2][2]gc.IndexJump{
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
+}
+var nefJumps = [2][2]gc.IndexJump{
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in rax:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(x86.ATESTL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ p = s.Prog(x86.AJNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockRetJmp:
+ p := s.Prog(obj.ARET)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = b.Aux.(*obj.LSym)
+
+ case ssa.BlockAMD64EQF:
+ s.CombJump(b, next, &eqfJumps)
+
+ case ssa.BlockAMD64NEF:
+ s.CombJump(b, next, &nefJumps)
+
+ case ssa.BlockAMD64EQ, ssa.BlockAMD64NE,
+ ssa.BlockAMD64LT, ssa.BlockAMD64GE,
+ ssa.BlockAMD64LE, ssa.BlockAMD64GT,
+ ssa.BlockAMD64OS, ssa.BlockAMD64OC,
+ ssa.BlockAMD64ULT, ssa.BlockAMD64UGT,
+ ssa.BlockAMD64ULE, ssa.BlockAMD64UGE:
+ jmp := blockJump[b.Kind]
+ switch next {
+ case b.Succs[0].Block():
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go
new file mode 100644
index 0000000..20e2f43
--- /dev/null
+++ b/src/cmd/compile/internal/arm/galign.go
@@ -0,0 +1,26 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj/arm"
+ "cmd/internal/objabi"
+)
+
+func Init(arch *gc.Arch) {
+ arch.LinkArch = &arm.Linkarm
+ arch.REGSP = arm.REGSP
+ arch.MAXWIDTH = (1 << 32) - 1
+ arch.SoftFloat = objabi.GOARM == 5
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+ arch.Ginsnopdefer = ginsnop
+
+ arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go
new file mode 100644
index 0000000..bd8d7ff
--- /dev/null
+++ b/src/cmd/compile/internal/arm/ggen.go
@@ -0,0 +1,58 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+
+func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if *r0 == 0 {
+ p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
+ *r0 = 1
+ }
+
+ if cnt < int64(4*gc.Widthptr) {
+ for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
+ p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
+ }
+ } else if cnt <= int64(128*gc.Widthptr) {
+ p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+ p.Reg = arm.REGSP
+ p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffzero
+ p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+ } else {
+ p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+ p.Reg = arm.REGSP
+ p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
+ p.Reg = arm.REG_R1
+ p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
+ p1 := p
+ p.Scond |= arm.C_PBIT
+ p = pp.Appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
+ p.Reg = arm.REG_R2
+ p = pp.Appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ gc.Patch(p, p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *gc.Progs) *obj.Prog {
+ p := pp.Prog(arm.AAND)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm.REG_R0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REG_R0
+ p.Scond = arm.C_SCOND_EQ
+ return p
+}
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
new file mode 100644
index 0000000..765a771
--- /dev/null
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -0,0 +1,981 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+ "fmt"
+ "math"
+ "math/bits"
+
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "cmd/internal/objabi"
+)
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm.AMOVF
+ case 8:
+ return arm.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return arm.AMOVB
+ } else {
+ return arm.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return arm.AMOVH
+ } else {
+ return arm.AMOVHU
+ }
+ case 4:
+ return arm.AMOVW
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm.AMOVF
+ case 8:
+ return arm.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return arm.AMOVB
+ case 2:
+ return arm.AMOVH
+ case 4:
+ return arm.AMOVW
+ }
+ }
+ panic("bad store type")
+}
+
+// shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands
+type shift int64
+
+// copied from ../../../internal/obj/util.go:/TYPE_SHIFT
+func (v shift) String() string {
+ op := "<<>>->@>"[((v>>5)&3)<<1:]
+ if v&(1<<4) != 0 {
+ // register shift
+ return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
+ } else {
+ // constant shift
+ return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
+ }
+}
+
+// makeshift encodes a register shifted by a constant
+func makeshift(reg int16, typ int64, s int64) shift {
+ return shift(int64(reg&0xf) | typ | (s&31)<<7)
+}
+
+// genshift generates a Prog for r = r0 op (r1 shifted by n)
+func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(makeshift(r1, typ, n))
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
+}
+
+// makeregshift encodes a register shifted by a register
+func makeregshift(r1 int16, typ int64, r2 int16) shift {
+ return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
+}
+
+// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
+func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(makeregshift(r1, typ, r2))
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
+}
+
+// find a (lsb, width) pair for BFC
+// lsb must be in [0, 31], width must be in [1, 32 - lsb]
+// return (0xffffffff, 0) if v is not a binary like 0...01...10...0
+func getBFC(v uint32) (uint32, uint32) {
+ var m, l uint32
+ // BFC is not applicable with zero
+ if v == 0 {
+ return 0xffffffff, 0
+ }
+ // find the lowest set bit, for example l=2 for 0x3ffffffc
+ l = uint32(bits.TrailingZeros32(v))
+ // m-1 represents the highest set bit index, for example m=30 for 0x3ffffffc
+ m = 32 - uint32(bits.LeadingZeros32(v))
+ // check if v is a binary like 0...01...10...0
+ if (1<<m)-(1<<l) == v {
+ // it must be m > l for non-zero v
+ return l, m - l
+ }
+ // invalid
+ return 0xffffffff, 0
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy, ssa.OpARMMOVWreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x == y {
+ return
+ }
+ as := arm.AMOVW
+ if v.Type.IsFloat() {
+ switch v.Type.Size() {
+ case 4:
+ as = arm.AMOVF
+ case 8:
+ as = arm.AMOVD
+ default:
+ panic("bad float size")
+ }
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ case ssa.OpARMMOVWnop:
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ gc.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddrAuto(&p.To, v)
+ case ssa.OpARMADD,
+ ssa.OpARMADC,
+ ssa.OpARMSUB,
+ ssa.OpARMSBC,
+ ssa.OpARMRSB,
+ ssa.OpARMAND,
+ ssa.OpARMOR,
+ ssa.OpARMXOR,
+ ssa.OpARMBIC,
+ ssa.OpARMMUL,
+ ssa.OpARMADDF,
+ ssa.OpARMADDD,
+ ssa.OpARMSUBF,
+ ssa.OpARMSUBD,
+ ssa.OpARMSLL,
+ ssa.OpARMSRL,
+ ssa.OpARMSRA,
+ ssa.OpARMMULF,
+ ssa.OpARMMULD,
+ ssa.OpARMNMULF,
+ ssa.OpARMNMULD,
+ ssa.OpARMDIVF,
+ ssa.OpARMDIVD:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMSRR:
+ genregshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR)
+ case ssa.OpARMMULAF, ssa.OpARMMULAD, ssa.OpARMMULSF, ssa.OpARMMULSD, ssa.OpARMFMULAD:
+ r := v.Reg()
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ if r != r0 {
+ v.Fatalf("result and addend are not in the same register: %v", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMADDS,
+ ssa.OpARMSUBS:
+ r := v.Reg0()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.Scond = arm.C_SBIT
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMSRAcond:
+ // ARM shift instructions uses only the low-order byte of the shift amount
+ // generate conditional instructions to deal with large shifts
+ // flag is already set
+ // SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit
+ // SRA.LO Rarg1, Rarg0, Rdst
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(arm.ASRA)
+ p.Scond = arm.C_SCOND_HS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 31
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p = s.Prog(arm.ASRA)
+ p.Scond = arm.C_SCOND_LO
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMBFX, ssa.OpARMBFXU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt >> 8
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff})
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMANDconst, ssa.OpARMBICconst:
+ // try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks
+ // BFC is only available on ARMv7, and its result and source are in the same register
+ if objabi.GOARM == 7 && v.Reg() == v.Args[0].Reg() {
+ var val uint32
+ if v.Op == ssa.OpARMANDconst {
+ val = ^uint32(v.AuxInt)
+ } else { // BICconst
+ val = uint32(v.AuxInt)
+ }
+ lsb, width := getBFC(val)
+ // omit BFC for ARM's imm12
+ if 8 < width && width < 24 {
+ p := s.Prog(arm.ABFC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(width)
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(lsb)})
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ break
+ }
+ }
+ // fall back to ordinary form
+ fallthrough
+ case ssa.OpARMADDconst,
+ ssa.OpARMADCconst,
+ ssa.OpARMSUBconst,
+ ssa.OpARMSBCconst,
+ ssa.OpARMRSBconst,
+ ssa.OpARMRSCconst,
+ ssa.OpARMORconst,
+ ssa.OpARMXORconst,
+ ssa.OpARMSLLconst,
+ ssa.OpARMSRLconst,
+ ssa.OpARMSRAconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMADDSconst,
+ ssa.OpARMSUBSconst,
+ ssa.OpARMRSBSconst:
+ p := s.Prog(v.Op.Asm())
+ p.Scond = arm.C_SBIT
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpARMSRRconst:
+ genshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
+ case ssa.OpARMADDshiftLL,
+ ssa.OpARMADCshiftLL,
+ ssa.OpARMSUBshiftLL,
+ ssa.OpARMSBCshiftLL,
+ ssa.OpARMRSBshiftLL,
+ ssa.OpARMRSCshiftLL,
+ ssa.OpARMANDshiftLL,
+ ssa.OpARMORshiftLL,
+ ssa.OpARMXORshiftLL,
+ ssa.OpARMBICshiftLL:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMADDSshiftLL,
+ ssa.OpARMSUBSshiftLL,
+ ssa.OpARMRSBSshiftLL:
+ p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRL,
+ ssa.OpARMADCshiftRL,
+ ssa.OpARMSUBshiftRL,
+ ssa.OpARMSBCshiftRL,
+ ssa.OpARMRSBshiftRL,
+ ssa.OpARMRSCshiftRL,
+ ssa.OpARMANDshiftRL,
+ ssa.OpARMORshiftRL,
+ ssa.OpARMXORshiftRL,
+ ssa.OpARMBICshiftRL:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMADDSshiftRL,
+ ssa.OpARMSUBSshiftRL,
+ ssa.OpARMRSBSshiftRL:
+ p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRA,
+ ssa.OpARMADCshiftRA,
+ ssa.OpARMSUBshiftRA,
+ ssa.OpARMSBCshiftRA,
+ ssa.OpARMRSBshiftRA,
+ ssa.OpARMRSCshiftRA,
+ ssa.OpARMANDshiftRA,
+ ssa.OpARMORshiftRA,
+ ssa.OpARMXORshiftRA,
+ ssa.OpARMBICshiftRA:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMADDSshiftRA,
+ ssa.OpARMSUBSshiftRA,
+ ssa.OpARMRSBSshiftRA:
+ p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMXORshiftRR:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
+ case ssa.OpARMMVNshiftLL:
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMMVNshiftRL:
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMMVNshiftRA:
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMMVNshiftLLreg:
+ genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
+ case ssa.OpARMMVNshiftRLreg:
+ genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
+ case ssa.OpARMMVNshiftRAreg:
+ genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
+ case ssa.OpARMADDshiftLLreg,
+ ssa.OpARMADCshiftLLreg,
+ ssa.OpARMSUBshiftLLreg,
+ ssa.OpARMSBCshiftLLreg,
+ ssa.OpARMRSBshiftLLreg,
+ ssa.OpARMRSCshiftLLreg,
+ ssa.OpARMANDshiftLLreg,
+ ssa.OpARMORshiftLLreg,
+ ssa.OpARMXORshiftLLreg,
+ ssa.OpARMBICshiftLLreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
+ case ssa.OpARMADDSshiftLLreg,
+ ssa.OpARMSUBSshiftLLreg,
+ ssa.OpARMRSBSshiftLLreg:
+ p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRLreg,
+ ssa.OpARMADCshiftRLreg,
+ ssa.OpARMSUBshiftRLreg,
+ ssa.OpARMSBCshiftRLreg,
+ ssa.OpARMRSBshiftRLreg,
+ ssa.OpARMRSCshiftRLreg,
+ ssa.OpARMANDshiftRLreg,
+ ssa.OpARMORshiftRLreg,
+ ssa.OpARMXORshiftRLreg,
+ ssa.OpARMBICshiftRLreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
+ case ssa.OpARMADDSshiftRLreg,
+ ssa.OpARMSUBSshiftRLreg,
+ ssa.OpARMRSBSshiftRLreg:
+ p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRAreg,
+ ssa.OpARMADCshiftRAreg,
+ ssa.OpARMSUBshiftRAreg,
+ ssa.OpARMSBCshiftRAreg,
+ ssa.OpARMRSBshiftRAreg,
+ ssa.OpARMRSCshiftRAreg,
+ ssa.OpARMANDshiftRAreg,
+ ssa.OpARMORshiftRAreg,
+ ssa.OpARMXORshiftRAreg,
+ ssa.OpARMBICshiftRAreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
+ case ssa.OpARMADDSshiftRAreg,
+ ssa.OpARMSUBSshiftRAreg,
+ ssa.OpARMRSBSshiftRAreg:
+ p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMHMUL,
+ ssa.OpARMHMULU:
+ // 32-bit high multiplication
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REGREG
+ p.To.Reg = v.Reg()
+ p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
+ case ssa.OpARMMULLU:
+ // 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REGREG
+ p.To.Reg = v.Reg0() // high 32-bit
+ p.To.Offset = int64(v.Reg1()) // low 32-bit
+ case ssa.OpARMMULA, ssa.OpARMMULS:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REGREG2
+ p.To.Reg = v.Reg() // result
+ p.To.Offset = int64(v.Args[2].Reg()) // addend
+ case ssa.OpARMMOVWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMMOVFconst,
+ ssa.OpARMMOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMCMP,
+ ssa.OpARMCMN,
+ ssa.OpARMTST,
+ ssa.OpARMTEQ,
+ ssa.OpARMCMPF,
+ ssa.OpARMCMPD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ // Special layout in ARM assembly
+ // Comparing to x86, the operands of ARM's CMP are reversed.
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARMCMPconst,
+ ssa.OpARMCMNconst,
+ ssa.OpARMTSTconst,
+ ssa.OpARMTEQconst:
+ // Special layout in ARM assembly
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARMCMPF0,
+ ssa.OpARMCMPD0:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpARMCMPshiftLL, ssa.OpARMCMNshiftLL, ssa.OpARMTSTshiftLL, ssa.OpARMTEQshiftLL:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMCMPshiftRL, ssa.OpARMCMNshiftRL, ssa.OpARMTSTshiftRL, ssa.OpARMTEQshiftRL:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMCMPshiftRA, ssa.OpARMCMNshiftRA, ssa.OpARMTSTshiftRA, ssa.OpARMTEQshiftRA:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMCMPshiftLLreg, ssa.OpARMCMNshiftLLreg, ssa.OpARMTSTshiftLLreg, ssa.OpARMTEQshiftLLreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
+ case ssa.OpARMCMPshiftRLreg, ssa.OpARMCMNshiftRLreg, ssa.OpARMTSTshiftRLreg, ssa.OpARMTEQshiftRLreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
+ case ssa.OpARMCMPshiftRAreg, ssa.OpARMCMNshiftRAreg, ssa.OpARMTSTshiftRAreg, ssa.OpARMTEQshiftRAreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
+ case ssa.OpARMMOVWaddr:
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ var wantreg string
+ // MOVW $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R13)
+ // when constant is large, tmp register (R11) may be used
+ // - base is SB: load external address from constant pool (use relocation)
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ gc.AddAux(&p.From, v)
+ case *gc.Node:
+ wantreg = "SP"
+ gc.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVW $off(SP), R
+ wantreg = "SP"
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+
+ case ssa.OpARMMOVBload,
+ ssa.OpARMMOVBUload,
+ ssa.OpARMMOVHload,
+ ssa.OpARMMOVHUload,
+ ssa.OpARMMOVWload,
+ ssa.OpARMMOVFload,
+ ssa.OpARMMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMMOVBstore,
+ ssa.OpARMMOVHstore,
+ ssa.OpARMMOVWstore,
+ ssa.OpARMMOVFstore,
+ ssa.OpARMMOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
+ // this is just shift 0 bits
+ fallthrough
+ case ssa.OpARMMOVWloadshiftLL:
+ p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpARMMOVWloadshiftRL:
+ p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpARMMOVWloadshiftRA:
+ p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpARMMOVWstoreidx, ssa.OpARMMOVBstoreidx, ssa.OpARMMOVHstoreidx:
+ // this is just shift 0 bits
+ fallthrough
+ case ssa.OpARMMOVWstoreshiftLL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt))
+ case ssa.OpARMMOVWstoreshiftRL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt))
+ case ssa.OpARMMOVWstoreshiftRA:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt))
+ case ssa.OpARMMOVBreg,
+ ssa.OpARMMOVBUreg,
+ ssa.OpARMMOVHreg,
+ ssa.OpARMMOVHUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if v.Reg() == v.Args[0].Reg() {
+ return
+ }
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ return
+ default:
+ }
+ }
+ if objabi.GOARM >= 6 {
+ // generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0)
+ return
+ }
+ fallthrough
+ case ssa.OpARMMVN,
+ ssa.OpARMCLZ,
+ ssa.OpARMREV,
+ ssa.OpARMREV16,
+ ssa.OpARMRBIT,
+ ssa.OpARMSQRTD,
+ ssa.OpARMNEGF,
+ ssa.OpARMNEGD,
+ ssa.OpARMABSD,
+ ssa.OpARMMOVWF,
+ ssa.OpARMMOVWD,
+ ssa.OpARMMOVFW,
+ ssa.OpARMMOVDW,
+ ssa.OpARMMOVFD,
+ ssa.OpARMMOVDF:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMMOVWUF,
+ ssa.OpARMMOVWUD,
+ ssa.OpARMMOVFWU,
+ ssa.OpARMMOVDWU:
+ p := s.Prog(v.Op.Asm())
+ p.Scond = arm.C_UBIT
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMCMOVWHSconst:
+ p := s.Prog(arm.AMOVW)
+ p.Scond = arm.C_SCOND_HS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMCMOVWLSconst:
+ p := s.Prog(arm.AMOVW)
+ p.Scond = arm.C_SCOND_LS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter:
+ s.Call(v)
+ case ssa.OpARMCALLudiv:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Udiv
+ case ssa.OpARMLoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+ case ssa.OpARMLoweredPanicBoundsA, ssa.OpARMLoweredPanicBoundsB, ssa.OpARMLoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(8) // space used in callee args area by assembly stubs
+ case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
+ s.UseArgs(12) // space used in callee args area by assembly stubs
+ case ssa.OpARMDUFFZERO:
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffzero
+ p.To.Offset = v.AuxInt
+ case ssa.OpARMDUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffcopy
+ p.To.Offset = v.AuxInt
+ case ssa.OpARMLoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(arm.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ gc.Warnl(v.Pos, "generated nil check")
+ }
+ case ssa.OpARMLoweredZero:
+ // MOVW.P Rarg2, 4(R1)
+ // CMP Rarg1, R1
+ // BLE -2(PC)
+ // arg1 is the address of the last element to zero
+ // arg2 is known to be zero
+ // auxint is alignment
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = arm.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = arm.AMOVH
+ default:
+ sz = 1
+ mov = arm.AMOVB
+ }
+ p := s.Prog(mov)
+ p.Scond = arm.C_PBIT
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm.REG_R1
+ p.To.Offset = sz
+ p2 := s.Prog(arm.ACMP)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Args[1].Reg()
+ p2.Reg = arm.REG_R1
+ p3 := s.Prog(arm.ABLE)
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+ case ssa.OpARMLoweredMove:
+ // MOVW.P 4(R1), Rtmp
+ // MOVW.P Rtmp, 4(R2)
+ // CMP Rarg2, R1
+ // BLE -3(PC)
+ // arg2 is the address of the last element of src
+ // auxint is alignment
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = arm.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = arm.AMOVH
+ default:
+ sz = 1
+ mov = arm.AMOVB
+ }
+ p := s.Prog(mov)
+ p.Scond = arm.C_PBIT
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = arm.REG_R1
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REGTMP
+ p2 := s.Prog(mov)
+ p2.Scond = arm.C_PBIT
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = arm.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = arm.REG_R2
+ p2.To.Offset = sz
+ p3 := s.Prog(arm.ACMP)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[2].Reg()
+ p3.Reg = arm.REG_R1
+ p4 := s.Prog(arm.ABLE)
+ p4.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p4, p)
+ case ssa.OpARMEqual,
+ ssa.OpARMNotEqual,
+ ssa.OpARMLessThan,
+ ssa.OpARMLessEqual,
+ ssa.OpARMGreaterThan,
+ ssa.OpARMGreaterEqual,
+ ssa.OpARMLessThanU,
+ ssa.OpARMLessEqualU,
+ ssa.OpARMGreaterThanU,
+ ssa.OpARMGreaterEqualU:
+ // generate boolean values
+ // use conditional move
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p = s.Prog(arm.AMOVW)
+ p.Scond = condBits[v.Op]
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMLoweredGetClosurePtr:
+ // Closure pointer is R7 (arm.REGCTXT).
+ gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARMLoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMLoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMFlagConstant:
+ v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
+ case ssa.OpARMInvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpClobber:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var condBits = map[ssa.Op]uint8{
+ ssa.OpARMEqual: arm.C_SCOND_EQ,
+ ssa.OpARMNotEqual: arm.C_SCOND_NE,
+ ssa.OpARMLessThan: arm.C_SCOND_LT,
+ ssa.OpARMLessThanU: arm.C_SCOND_LO,
+ ssa.OpARMLessEqual: arm.C_SCOND_LE,
+ ssa.OpARMLessEqualU: arm.C_SCOND_LS,
+ ssa.OpARMGreaterThan: arm.C_SCOND_GT,
+ ssa.OpARMGreaterThanU: arm.C_SCOND_HI,
+ ssa.OpARMGreaterEqual: arm.C_SCOND_GE,
+ ssa.OpARMGreaterEqualU: arm.C_SCOND_HS,
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockARMEQ: {arm.ABEQ, arm.ABNE},
+ ssa.BlockARMNE: {arm.ABNE, arm.ABEQ},
+ ssa.BlockARMLT: {arm.ABLT, arm.ABGE},
+ ssa.BlockARMGE: {arm.ABGE, arm.ABLT},
+ ssa.BlockARMLE: {arm.ABLE, arm.ABGT},
+ ssa.BlockARMGT: {arm.ABGT, arm.ABLE},
+ ssa.BlockARMULT: {arm.ABLO, arm.ABHS},
+ ssa.BlockARMUGE: {arm.ABHS, arm.ABLO},
+ ssa.BlockARMUGT: {arm.ABHI, arm.ABLS},
+ ssa.BlockARMULE: {arm.ABLS, arm.ABHI},
+ ssa.BlockARMLTnoov: {arm.ABMI, arm.ABPL},
+ ssa.BlockARMGEnoov: {arm.ABPL, arm.ABMI},
+}
+
+// To model a 'LEnoov' ('<=' without overflow checking) branching
+var leJumps = [2][2]gc.IndexJump{
+ {{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
+ {{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
+}
+
+// To model a 'GTnoov' ('>' without overflow checking) branching
+var gtJumps = [2][2]gc.IndexJump{
+ {{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
+ {{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockDefer:
+ // defer returns in R0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(arm.ACMP)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.Reg = arm.REG_R0
+ p = s.Prog(arm.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockExit:
+
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.BlockRetJmp:
+ p := s.Prog(obj.ARET)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = b.Aux.(*obj.LSym)
+
+ case ssa.BlockARMEQ, ssa.BlockARMNE,
+ ssa.BlockARMLT, ssa.BlockARMGE,
+ ssa.BlockARMLE, ssa.BlockARMGT,
+ ssa.BlockARMULT, ssa.BlockARMUGT,
+ ssa.BlockARMULE, ssa.BlockARMUGE,
+ ssa.BlockARMLTnoov, ssa.BlockARMGEnoov:
+ jmp := blockJump[b.Kind]
+ switch next {
+ case b.Succs[0].Block():
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+
+ case ssa.BlockARMLEnoov:
+ s.CombJump(b, next, &leJumps)
+
+ case ssa.BlockARMGTnoov:
+ s.CombJump(b, next, &gtJumps)
+
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go
new file mode 100644
index 0000000..40d6e17
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/galign.go
@@ -0,0 +1,26 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj/arm64"
+)
+
+func Init(arch *gc.Arch) {
+ arch.LinkArch = &arm64.Linkarm64
+ arch.REGSP = arm64.REGSP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.PadFrame = padframe
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+ arch.Ginsnopdefer = ginsnop
+
+ arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go
new file mode 100644
index 0000000..f3fec03
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/ggen.go
@@ -0,0 +1,74 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm64"
+ "cmd/internal/objabi"
+)
+
+var darwin = objabi.GOOS == "darwin" || objabi.GOOS == "ios"
+
+func padframe(frame int64) int64 {
+ // arm64 requires that the frame size (not counting saved FP&LR)
+ // be 16 bytes aligned. If not, pad it.
+ if frame%16 != 0 {
+ frame += 16 - (frame % 16)
+ }
+ return frame
+}
+
+func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*gc.Widthptr) {
+ for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
+ p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
+ }
+ } else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
+ if cnt%(2*int64(gc.Widthptr)) != 0 {
+ p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
+ off += int64(gc.Widthptr)
+ cnt -= int64(gc.Widthptr)
+ }
+ p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
+ p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
+ p.Reg = arm64.REG_R20
+ p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffzero
+ p.To.Offset = 4 * (64 - cnt/(2*int64(gc.Widthptr)))
+ } else {
+ // Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP).
+ // We are at the function entry, where no register is live, so it is okay to clobber
+ // other registers
+ const rtmp = arm64.REG_R20
+ p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
+ p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+ p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+ p.Reg = arm64.REGRT1
+ p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
+ p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
+ p.Reg = arm64.REGRT1
+ p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr))
+ p.Scond = arm64.C_XPRE
+ p1 := p
+ p = pp.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
+ p.Reg = arm64.REGRT2
+ p = pp.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ gc.Patch(p, p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *gc.Progs) *obj.Prog {
+ p := pp.Prog(arm64.AHINT)
+ p.From.Type = obj.TYPE_CONST
+ return p
+}
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
new file mode 100644
index 0000000..4358851
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -0,0 +1,1251 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+ "math"
+
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm64"
+)
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm64.AFMOVS
+ case 8:
+ return arm64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return arm64.AMOVB
+ } else {
+ return arm64.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return arm64.AMOVH
+ } else {
+ return arm64.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return arm64.AMOVW
+ } else {
+ return arm64.AMOVWU
+ }
+ case 8:
+ return arm64.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm64.AFMOVS
+ case 8:
+ return arm64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return arm64.AMOVB
+ case 2:
+ return arm64.AMOVH
+ case 4:
+ return arm64.AMOVW
+ case 8:
+ return arm64.AMOVD
+ }
+ }
+ panic("bad store type")
+}
+
+// makeshift encodes a register shifted by a constant, used as an Offset in Prog
+func makeshift(reg int16, typ int64, s int64) int64 {
+ return int64(reg&31)<<16 | typ | (s&63)<<10
+}
+
+// genshift generates a Prog for r = r0 op (r1 shifted by n)
+func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = makeshift(r1, typ, n)
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
+}
+
+// generate the memory operand for the indexed load/store instructions
+func genIndexedOperand(v *ssa.Value) obj.Addr {
+ // Reg: base register, Index: (shifted) index register
+ mop := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()}
+ switch v.Op {
+ case ssa.OpARM64MOVDloadidx8, ssa.OpARM64MOVDstoreidx8, ssa.OpARM64MOVDstorezeroidx8:
+ mop.Index = arm64.REG_LSL | 3<<5 | v.Args[1].Reg()&31
+ case ssa.OpARM64MOVWloadidx4, ssa.OpARM64MOVWUloadidx4, ssa.OpARM64MOVWstoreidx4, ssa.OpARM64MOVWstorezeroidx4:
+ mop.Index = arm64.REG_LSL | 2<<5 | v.Args[1].Reg()&31
+ case ssa.OpARM64MOVHloadidx2, ssa.OpARM64MOVHUloadidx2, ssa.OpARM64MOVHstoreidx2, ssa.OpARM64MOVHstorezeroidx2:
+ mop.Index = arm64.REG_LSL | 1<<5 | v.Args[1].Reg()&31
+ default: // not shifted
+ mop.Index = v.Args[1].Reg()
+ }
+ return mop
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy, ssa.OpARM64MOVDreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x == y {
+ return
+ }
+ as := arm64.AMOVD
+ if v.Type.IsFloat() {
+ switch v.Type.Size() {
+ case 4:
+ as = arm64.AFMOVS
+ case 8:
+ as = arm64.AFMOVD
+ default:
+ panic("bad float size")
+ }
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ case ssa.OpARM64MOVDnop:
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ gc.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddrAuto(&p.To, v)
+ case ssa.OpARM64ADD,
+ ssa.OpARM64SUB,
+ ssa.OpARM64AND,
+ ssa.OpARM64OR,
+ ssa.OpARM64XOR,
+ ssa.OpARM64BIC,
+ ssa.OpARM64EON,
+ ssa.OpARM64ORN,
+ ssa.OpARM64MUL,
+ ssa.OpARM64MULW,
+ ssa.OpARM64MNEG,
+ ssa.OpARM64MNEGW,
+ ssa.OpARM64MULH,
+ ssa.OpARM64UMULH,
+ ssa.OpARM64MULL,
+ ssa.OpARM64UMULL,
+ ssa.OpARM64DIV,
+ ssa.OpARM64UDIV,
+ ssa.OpARM64DIVW,
+ ssa.OpARM64UDIVW,
+ ssa.OpARM64MOD,
+ ssa.OpARM64UMOD,
+ ssa.OpARM64MODW,
+ ssa.OpARM64UMODW,
+ ssa.OpARM64SLL,
+ ssa.OpARM64SRL,
+ ssa.OpARM64SRA,
+ ssa.OpARM64FADDS,
+ ssa.OpARM64FADDD,
+ ssa.OpARM64FSUBS,
+ ssa.OpARM64FSUBD,
+ ssa.OpARM64FMULS,
+ ssa.OpARM64FMULD,
+ ssa.OpARM64FNMULS,
+ ssa.OpARM64FNMULD,
+ ssa.OpARM64FDIVS,
+ ssa.OpARM64FDIVD,
+ ssa.OpARM64ROR,
+ ssa.OpARM64RORW:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARM64FMADDS,
+ ssa.OpARM64FMADDD,
+ ssa.OpARM64FNMADDS,
+ ssa.OpARM64FNMADDD,
+ ssa.OpARM64FMSUBS,
+ ssa.OpARM64FMSUBD,
+ ssa.OpARM64FNMSUBS,
+ ssa.OpARM64FNMSUBD,
+ ssa.OpARM64MADD,
+ ssa.OpARM64MADDW,
+ ssa.OpARM64MSUB,
+ ssa.OpARM64MSUBW:
+ rt := v.Reg()
+ ra := v.Args[0].Reg()
+ rm := v.Args[1].Reg()
+ rn := v.Args[2].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.Reg = ra
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = rm
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: rn})
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = rt
+ case ssa.OpARM64ADDconst,
+ ssa.OpARM64SUBconst,
+ ssa.OpARM64ANDconst,
+ ssa.OpARM64ORconst,
+ ssa.OpARM64XORconst,
+ ssa.OpARM64SLLconst,
+ ssa.OpARM64SRLconst,
+ ssa.OpARM64SRAconst,
+ ssa.OpARM64RORconst,
+ ssa.OpARM64RORWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64ADDSconstflags:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpARM64ADCzerocarry:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ p.Reg = arm64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64ADCSflags,
+ ssa.OpARM64ADDSflags,
+ ssa.OpARM64SBCSflags,
+ ssa.OpARM64SUBSflags:
+ r := v.Reg0()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARM64NEGSflags:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpARM64NGCzerocarry:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64EXTRconst,
+ ssa.OpARM64EXTRWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()})
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64MVNshiftLL, ssa.OpARM64NEGshiftLL:
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
+ case ssa.OpARM64MVNshiftRL, ssa.OpARM64NEGshiftRL:
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
+ case ssa.OpARM64MVNshiftRA, ssa.OpARM64NEGshiftRA:
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64ADDshiftLL,
+ ssa.OpARM64SUBshiftLL,
+ ssa.OpARM64ANDshiftLL,
+ ssa.OpARM64ORshiftLL,
+ ssa.OpARM64XORshiftLL,
+ ssa.OpARM64EONshiftLL,
+ ssa.OpARM64ORNshiftLL,
+ ssa.OpARM64BICshiftLL:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
+ case ssa.OpARM64ADDshiftRL,
+ ssa.OpARM64SUBshiftRL,
+ ssa.OpARM64ANDshiftRL,
+ ssa.OpARM64ORshiftRL,
+ ssa.OpARM64XORshiftRL,
+ ssa.OpARM64EONshiftRL,
+ ssa.OpARM64ORNshiftRL,
+ ssa.OpARM64BICshiftRL:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
+ case ssa.OpARM64ADDshiftRA,
+ ssa.OpARM64SUBshiftRA,
+ ssa.OpARM64ANDshiftRA,
+ ssa.OpARM64ORshiftRA,
+ ssa.OpARM64XORshiftRA,
+ ssa.OpARM64EONshiftRA,
+ ssa.OpARM64ORNshiftRA,
+ ssa.OpARM64BICshiftRA:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64FMOVSconst,
+ ssa.OpARM64FMOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64FCMPS0,
+ ssa.OpARM64FCMPD0:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(0)
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARM64CMP,
+ ssa.OpARM64CMPW,
+ ssa.OpARM64CMN,
+ ssa.OpARM64CMNW,
+ ssa.OpARM64TST,
+ ssa.OpARM64TSTW,
+ ssa.OpARM64FCMPS,
+ ssa.OpARM64FCMPD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARM64CMPconst,
+ ssa.OpARM64CMPWconst,
+ ssa.OpARM64CMNconst,
+ ssa.OpARM64CMNWconst,
+ ssa.OpARM64TSTconst,
+ ssa.OpARM64TSTWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARM64CMPshiftLL, ssa.OpARM64CMNshiftLL, ssa.OpARM64TSTshiftLL:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LL, v.AuxInt)
+ case ssa.OpARM64CMPshiftRL, ssa.OpARM64CMNshiftRL, ssa.OpARM64TSTshiftRL:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt)
+ case ssa.OpARM64CMPshiftRA, ssa.OpARM64CMNshiftRA, ssa.OpARM64TSTshiftRA:
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64MOVDaddr:
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ var wantreg string
+ // MOVD $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R13)
+ // when constant is large, tmp register (R11) may be used
+ // - base is SB: load external address from constant pool (use relocation)
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ gc.AddAux(&p.From, v)
+ case *gc.Node:
+ wantreg = "SP"
+ gc.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVD $off(SP), R
+ wantreg = "SP"
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ case ssa.OpARM64MOVBload,
+ ssa.OpARM64MOVBUload,
+ ssa.OpARM64MOVHload,
+ ssa.OpARM64MOVHUload,
+ ssa.OpARM64MOVWload,
+ ssa.OpARM64MOVWUload,
+ ssa.OpARM64MOVDload,
+ ssa.OpARM64FMOVSload,
+ ssa.OpARM64FMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64MOVBloadidx,
+ ssa.OpARM64MOVBUloadidx,
+ ssa.OpARM64MOVHloadidx,
+ ssa.OpARM64MOVHUloadidx,
+ ssa.OpARM64MOVWloadidx,
+ ssa.OpARM64MOVWUloadidx,
+ ssa.OpARM64MOVDloadidx,
+ ssa.OpARM64FMOVSloadidx,
+ ssa.OpARM64FMOVDloadidx,
+ ssa.OpARM64MOVHloadidx2,
+ ssa.OpARM64MOVHUloadidx2,
+ ssa.OpARM64MOVWloadidx4,
+ ssa.OpARM64MOVWUloadidx4,
+ ssa.OpARM64MOVDloadidx8:
+ p := s.Prog(v.Op.Asm())
+ p.From = genIndexedOperand(v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LDAR,
+ ssa.OpARM64LDARB,
+ ssa.OpARM64LDARW:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpARM64MOVBstore,
+ ssa.OpARM64MOVHstore,
+ ssa.OpARM64MOVWstore,
+ ssa.OpARM64MOVDstore,
+ ssa.OpARM64FMOVSstore,
+ ssa.OpARM64FMOVDstore,
+ ssa.OpARM64STLRB,
+ ssa.OpARM64STLR,
+ ssa.OpARM64STLRW:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpARM64MOVBstoreidx,
+ ssa.OpARM64MOVHstoreidx,
+ ssa.OpARM64MOVWstoreidx,
+ ssa.OpARM64MOVDstoreidx,
+ ssa.OpARM64FMOVSstoreidx,
+ ssa.OpARM64FMOVDstoreidx,
+ ssa.OpARM64MOVHstoreidx2,
+ ssa.OpARM64MOVWstoreidx4,
+ ssa.OpARM64MOVDstoreidx8:
+ p := s.Prog(v.Op.Asm())
+ p.To = genIndexedOperand(v)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ case ssa.OpARM64STP:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REGREG
+ p.From.Reg = v.Args[1].Reg()
+ p.From.Offset = int64(v.Args[2].Reg())
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpARM64MOVBstorezero,
+ ssa.OpARM64MOVHstorezero,
+ ssa.OpARM64MOVWstorezero,
+ ssa.OpARM64MOVDstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpARM64MOVBstorezeroidx,
+ ssa.OpARM64MOVHstorezeroidx,
+ ssa.OpARM64MOVWstorezeroidx,
+ ssa.OpARM64MOVDstorezeroidx,
+ ssa.OpARM64MOVHstorezeroidx2,
+ ssa.OpARM64MOVWstorezeroidx4,
+ ssa.OpARM64MOVDstorezeroidx8:
+ p := s.Prog(v.Op.Asm())
+ p.To = genIndexedOperand(v)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ case ssa.OpARM64MOVQstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REGREG
+ p.From.Reg = arm64.REGZERO
+ p.From.Offset = int64(arm64.REGZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpARM64BFI,
+ ssa.OpARM64BFXIL:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt >> 8
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff})
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARM64SBFIZ,
+ ssa.OpARM64SBFX,
+ ssa.OpARM64UBFIZ,
+ ssa.OpARM64UBFX:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt >> 8
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff})
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LoweredMuluhilo:
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ p := s.Prog(arm64.AUMULH)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(arm64.AMUL)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg1()
+ case ssa.OpARM64LoweredAtomicExchange64,
+ ssa.OpARM64LoweredAtomicExchange32:
+ // LDAXR (Rarg0), Rout
+ // STLXR Rarg1, (Rarg0), Rtmp
+ // CBNZ Rtmp, -2(PC)
+ ld := arm64.ALDAXR
+ st := arm64.ASTLXR
+ if v.Op == ssa.OpARM64LoweredAtomicExchange32 {
+ ld = arm64.ALDAXRW
+ st = arm64.ASTLXRW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ p1 := s.Prog(st)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = r0
+ p1.RegTo2 = arm64.REGTMP
+ p2 := s.Prog(arm64.ACBNZ)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = arm64.REGTMP
+ p2.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p2, p)
+ case ssa.OpARM64LoweredAtomicExchange64Variant,
+ ssa.OpARM64LoweredAtomicExchange32Variant:
+ swap := arm64.ASWPALD
+ if v.Op == ssa.OpARM64LoweredAtomicExchange32Variant {
+ swap = arm64.ASWPALW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+
+ // SWPALD Rarg1, (Rarg0), Rout
+ p := s.Prog(swap)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r0
+ p.RegTo2 = out
+
+ case ssa.OpARM64LoweredAtomicAdd64,
+ ssa.OpARM64LoweredAtomicAdd32:
+ // LDAXR (Rarg0), Rout
+ // ADD Rarg1, Rout
+ // STLXR Rout, (Rarg0), Rtmp
+ // CBNZ Rtmp, -3(PC)
+ ld := arm64.ALDAXR
+ st := arm64.ASTLXR
+ if v.Op == ssa.OpARM64LoweredAtomicAdd32 {
+ ld = arm64.ALDAXRW
+ st = arm64.ASTLXRW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ p1 := s.Prog(arm64.AADD)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = out
+ p2 := s.Prog(st)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = out
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = r0
+ p2.RegTo2 = arm64.REGTMP
+ p3 := s.Prog(arm64.ACBNZ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = arm64.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+ case ssa.OpARM64LoweredAtomicAdd64Variant,
+ ssa.OpARM64LoweredAtomicAdd32Variant:
+ // LDADDAL Rarg1, (Rarg0), Rout
+ // ADD Rarg1, Rout
+ op := arm64.ALDADDALD
+ if v.Op == ssa.OpARM64LoweredAtomicAdd32Variant {
+ op = arm64.ALDADDALW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r0
+ p.RegTo2 = out
+ p1 := s.Prog(arm64.AADD)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = out
+ case ssa.OpARM64LoweredAtomicCas64,
+ ssa.OpARM64LoweredAtomicCas32:
+ // LDAXR (Rarg0), Rtmp
+ // CMP Rarg1, Rtmp
+ // BNE 3(PC)
+ // STLXR Rarg2, (Rarg0), Rtmp
+ // CBNZ Rtmp, -4(PC)
+ // CSET EQ, Rout
+ ld := arm64.ALDAXR
+ st := arm64.ASTLXR
+ cmp := arm64.ACMP
+ if v.Op == ssa.OpARM64LoweredAtomicCas32 {
+ ld = arm64.ALDAXRW
+ st = arm64.ASTLXRW
+ cmp = arm64.ACMPW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ out := v.Reg0()
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ p1 := s.Prog(cmp)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = arm64.REGTMP
+ p2 := s.Prog(arm64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+ p3 := s.Prog(st)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = r2
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = r0
+ p3.RegTo2 = arm64.REGTMP
+ p4 := s.Prog(arm64.ACBNZ)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = arm64.REGTMP
+ p4.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p4, p)
+ p5 := s.Prog(arm64.ACSET)
+ p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
+ p5.From.Reg = arm64.COND_EQ
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = out
+ gc.Patch(p2, p5)
+ case ssa.OpARM64LoweredAtomicCas64Variant,
+ ssa.OpARM64LoweredAtomicCas32Variant:
+ // Rarg0: ptr
+ // Rarg1: old
+ // Rarg2: new
+ // MOV Rarg1, Rtmp
+ // CASAL Rtmp, (Rarg0), Rarg2
+ // CMP Rarg1, Rtmp
+ // CSET EQ, Rout
+ cas := arm64.ACASALD
+ cmp := arm64.ACMP
+ mov := arm64.AMOVD
+ if v.Op == ssa.OpARM64LoweredAtomicCas32Variant {
+ cas = arm64.ACASALW
+ cmp = arm64.ACMPW
+ mov = arm64.AMOVW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ out := v.Reg0()
+
+ // MOV Rarg1, Rtmp
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+
+ // CASAL Rtmp, (Rarg0), Rarg2
+ p1 := s.Prog(cas)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = arm64.REGTMP
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = r0
+ p1.RegTo2 = r2
+
+ // CMP Rarg1, Rtmp
+ p2 := s.Prog(cmp)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = r1
+ p2.Reg = arm64.REGTMP
+
+ // CSET EQ, Rout
+ p3 := s.Prog(arm64.ACSET)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = arm64.COND_EQ
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = out
+
+ case ssa.OpARM64LoweredAtomicAnd8,
+ ssa.OpARM64LoweredAtomicAnd32,
+ ssa.OpARM64LoweredAtomicOr8,
+ ssa.OpARM64LoweredAtomicOr32:
+ // LDAXRB/LDAXRW (Rarg0), Rout
+ // AND/OR Rarg1, Rout
+ // STLXRB/STLXRB Rout, (Rarg0), Rtmp
+ // CBNZ Rtmp, -3(PC)
+ ld := arm64.ALDAXRB
+ st := arm64.ASTLXRB
+ if v.Op == ssa.OpARM64LoweredAtomicAnd32 || v.Op == ssa.OpARM64LoweredAtomicOr32 {
+ ld = arm64.ALDAXRW
+ st = arm64.ASTLXRW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ p1 := s.Prog(v.Op.Asm())
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = out
+ p2 := s.Prog(st)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = out
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = r0
+ p2.RegTo2 = arm64.REGTMP
+ p3 := s.Prog(arm64.ACBNZ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = arm64.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+ case ssa.OpARM64LoweredAtomicAnd8Variant,
+ ssa.OpARM64LoweredAtomicAnd32Variant:
+ atomic_clear := arm64.ALDCLRALW
+ if v.Op == ssa.OpARM64LoweredAtomicAnd8Variant {
+ atomic_clear = arm64.ALDCLRALB
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+
+ // MNV Rarg1 Rtemp
+ p := s.Prog(arm64.AMVN)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+
+ // LDCLRALW Rtemp, (Rarg0), Rout
+ p1 := s.Prog(atomic_clear)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = arm64.REGTMP
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = r0
+ p1.RegTo2 = out
+
+ // AND Rarg1, Rout
+ p2 := s.Prog(arm64.AAND)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = r1
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = out
+
+ case ssa.OpARM64LoweredAtomicOr8Variant,
+ ssa.OpARM64LoweredAtomicOr32Variant:
+ atomic_or := arm64.ALDORALW
+ if v.Op == ssa.OpARM64LoweredAtomicOr8Variant {
+ atomic_or = arm64.ALDORALB
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+
+ // LDORALW Rarg1, (Rarg0), Rout
+ p := s.Prog(atomic_or)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r0
+ p.RegTo2 = out
+
+ // ORR Rarg1, Rout
+ p2 := s.Prog(arm64.AORR)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = r1
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = out
+
+ case ssa.OpARM64MOVBreg,
+ ssa.OpARM64MOVBUreg,
+ ssa.OpARM64MOVHreg,
+ ssa.OpARM64MOVHUreg,
+ ssa.OpARM64MOVWreg,
+ ssa.OpARM64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpARM64MOVDreg {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpARM64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if v.Reg() == v.Args[0].Reg() {
+ return
+ }
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpARM64MVN,
+ ssa.OpARM64NEG,
+ ssa.OpARM64FABSD,
+ ssa.OpARM64FMOVDfpgp,
+ ssa.OpARM64FMOVDgpfp,
+ ssa.OpARM64FMOVSfpgp,
+ ssa.OpARM64FMOVSgpfp,
+ ssa.OpARM64FNEGS,
+ ssa.OpARM64FNEGD,
+ ssa.OpARM64FSQRTD,
+ ssa.OpARM64FCVTZSSW,
+ ssa.OpARM64FCVTZSDW,
+ ssa.OpARM64FCVTZUSW,
+ ssa.OpARM64FCVTZUDW,
+ ssa.OpARM64FCVTZSS,
+ ssa.OpARM64FCVTZSD,
+ ssa.OpARM64FCVTZUS,
+ ssa.OpARM64FCVTZUD,
+ ssa.OpARM64SCVTFWS,
+ ssa.OpARM64SCVTFWD,
+ ssa.OpARM64SCVTFS,
+ ssa.OpARM64SCVTFD,
+ ssa.OpARM64UCVTFWS,
+ ssa.OpARM64UCVTFWD,
+ ssa.OpARM64UCVTFS,
+ ssa.OpARM64UCVTFD,
+ ssa.OpARM64FCVTSD,
+ ssa.OpARM64FCVTDS,
+ ssa.OpARM64REV,
+ ssa.OpARM64REVW,
+ ssa.OpARM64REV16W,
+ ssa.OpARM64RBIT,
+ ssa.OpARM64RBITW,
+ ssa.OpARM64CLZ,
+ ssa.OpARM64CLZW,
+ ssa.OpARM64FRINTAD,
+ ssa.OpARM64FRINTMD,
+ ssa.OpARM64FRINTND,
+ ssa.OpARM64FRINTPD,
+ ssa.OpARM64FRINTZD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LoweredRound32F, ssa.OpARM64LoweredRound64F:
+ // input is already rounded
+ case ssa.OpARM64VCNT:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = (v.Args[0].Reg()-arm64.REG_F0)&31 + arm64.REG_ARNG + ((arm64.ARNG_8B & 15) << 5)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = (v.Reg()-arm64.REG_F0)&31 + arm64.REG_ARNG + ((arm64.ARNG_8B & 15) << 5)
+ case ssa.OpARM64VUADDLV:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = (v.Args[0].Reg()-arm64.REG_F0)&31 + arm64.REG_ARNG + ((arm64.ARNG_8B & 15) << 5)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg() - arm64.REG_F0 + arm64.REG_V0
+ case ssa.OpARM64CSEL, ssa.OpARM64CSEL0:
+ r1 := int16(arm64.REGZERO)
+ if v.Op != ssa.OpARM64CSEL0 {
+ r1 = v.Args[1].Reg()
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
+ p.From.Reg = condBits[ssa.Op(v.AuxInt)]
+ p.Reg = v.Args[0].Reg()
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r1})
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64DUFFZERO:
+ // runtime.duffzero expects start address in R20
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffzero
+ p.To.Offset = v.AuxInt
+ case ssa.OpARM64LoweredZero:
+ // STP.P (ZR,ZR), 16(R16)
+ // CMP Rarg1, R16
+ // BLE -2(PC)
+ // arg1 is the address of the last 16-byte unit to zero
+ p := s.Prog(arm64.ASTP)
+ p.Scond = arm64.C_XPOST
+ p.From.Type = obj.TYPE_REGREG
+ p.From.Reg = arm64.REGZERO
+ p.From.Offset = int64(arm64.REGZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REG_R16
+ p.To.Offset = 16
+ p2 := s.Prog(arm64.ACMP)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Args[1].Reg()
+ p2.Reg = arm64.REG_R16
+ p3 := s.Prog(arm64.ABLE)
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+ case ssa.OpARM64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffcopy
+ p.To.Offset = v.AuxInt
+ case ssa.OpARM64LoweredMove:
+ // MOVD.P 8(R16), Rtmp
+ // MOVD.P Rtmp, 8(R17)
+ // CMP Rarg2, R16
+ // BLE -3(PC)
+ // arg2 is the address of the last element of src
+ p := s.Prog(arm64.AMOVD)
+ p.Scond = arm64.C_XPOST
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = arm64.REG_R16
+ p.From.Offset = 8
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ p2 := s.Prog(arm64.AMOVD)
+ p2.Scond = arm64.C_XPOST
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = arm64.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = arm64.REG_R17
+ p2.To.Offset = 8
+ p3 := s.Prog(arm64.ACMP)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[2].Reg()
+ p3.Reg = arm64.REG_R16
+ p4 := s.Prog(arm64.ABLE)
+ p4.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p4, p)
+ case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
+ s.Call(v)
+ case ssa.OpARM64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+ case ssa.OpARM64LoweredPanicBoundsA, ssa.OpARM64LoweredPanicBoundsB, ssa.OpARM64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+ case ssa.OpARM64LoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(arm64.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
+ gc.Warnl(v.Pos, "generated nil check")
+ }
+ case ssa.OpARM64Equal,
+ ssa.OpARM64NotEqual,
+ ssa.OpARM64LessThan,
+ ssa.OpARM64LessEqual,
+ ssa.OpARM64GreaterThan,
+ ssa.OpARM64GreaterEqual,
+ ssa.OpARM64LessThanU,
+ ssa.OpARM64LessEqualU,
+ ssa.OpARM64GreaterThanU,
+ ssa.OpARM64GreaterEqualU,
+ ssa.OpARM64LessThanF,
+ ssa.OpARM64LessEqualF,
+ ssa.OpARM64GreaterThanF,
+ ssa.OpARM64GreaterEqualF,
+ ssa.OpARM64NotLessThanF,
+ ssa.OpARM64NotLessEqualF,
+ ssa.OpARM64NotGreaterThanF,
+ ssa.OpARM64NotGreaterEqualF:
+ // generate boolean values using CSET
+ p := s.Prog(arm64.ACSET)
+ p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
+ p.From.Reg = condBits[v.Op]
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LoweredGetClosurePtr:
+ // Closure pointer is R26 (arm64.REGCTXT).
+ gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARM64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64FlagConstant:
+ v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
+ case ssa.OpARM64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpClobber:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var condBits = map[ssa.Op]int16{
+ ssa.OpARM64Equal: arm64.COND_EQ,
+ ssa.OpARM64NotEqual: arm64.COND_NE,
+ ssa.OpARM64LessThan: arm64.COND_LT,
+ ssa.OpARM64LessThanU: arm64.COND_LO,
+ ssa.OpARM64LessEqual: arm64.COND_LE,
+ ssa.OpARM64LessEqualU: arm64.COND_LS,
+ ssa.OpARM64GreaterThan: arm64.COND_GT,
+ ssa.OpARM64GreaterThanU: arm64.COND_HI,
+ ssa.OpARM64GreaterEqual: arm64.COND_GE,
+ ssa.OpARM64GreaterEqualU: arm64.COND_HS,
+ ssa.OpARM64LessThanF: arm64.COND_MI, // Less than
+ ssa.OpARM64LessEqualF: arm64.COND_LS, // Less than or equal to
+ ssa.OpARM64GreaterThanF: arm64.COND_GT, // Greater than
+ ssa.OpARM64GreaterEqualF: arm64.COND_GE, // Greater than or equal to
+
+ // The following condition codes have unordered to handle comparisons related to NaN.
+ ssa.OpARM64NotLessThanF: arm64.COND_PL, // Greater than, equal to, or unordered
+ ssa.OpARM64NotLessEqualF: arm64.COND_HI, // Greater than or unordered
+ ssa.OpARM64NotGreaterThanF: arm64.COND_LE, // Less than, equal to or unordered
+ ssa.OpARM64NotGreaterEqualF: arm64.COND_LT, // Less than or unordered
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockARM64EQ: {arm64.ABEQ, arm64.ABNE},
+ ssa.BlockARM64NE: {arm64.ABNE, arm64.ABEQ},
+ ssa.BlockARM64LT: {arm64.ABLT, arm64.ABGE},
+ ssa.BlockARM64GE: {arm64.ABGE, arm64.ABLT},
+ ssa.BlockARM64LE: {arm64.ABLE, arm64.ABGT},
+ ssa.BlockARM64GT: {arm64.ABGT, arm64.ABLE},
+ ssa.BlockARM64ULT: {arm64.ABLO, arm64.ABHS},
+ ssa.BlockARM64UGE: {arm64.ABHS, arm64.ABLO},
+ ssa.BlockARM64UGT: {arm64.ABHI, arm64.ABLS},
+ ssa.BlockARM64ULE: {arm64.ABLS, arm64.ABHI},
+ ssa.BlockARM64Z: {arm64.ACBZ, arm64.ACBNZ},
+ ssa.BlockARM64NZ: {arm64.ACBNZ, arm64.ACBZ},
+ ssa.BlockARM64ZW: {arm64.ACBZW, arm64.ACBNZW},
+ ssa.BlockARM64NZW: {arm64.ACBNZW, arm64.ACBZW},
+ ssa.BlockARM64TBZ: {arm64.ATBZ, arm64.ATBNZ},
+ ssa.BlockARM64TBNZ: {arm64.ATBNZ, arm64.ATBZ},
+ ssa.BlockARM64FLT: {arm64.ABMI, arm64.ABPL},
+ ssa.BlockARM64FGE: {arm64.ABGE, arm64.ABLT},
+ ssa.BlockARM64FLE: {arm64.ABLS, arm64.ABHI},
+ ssa.BlockARM64FGT: {arm64.ABGT, arm64.ABLE},
+ ssa.BlockARM64LTnoov: {arm64.ABMI, arm64.ABPL},
+ ssa.BlockARM64GEnoov: {arm64.ABPL, arm64.ABMI},
+}
+
+// To model a 'LEnoov' ('<=' without overflow checking) branching
+var leJumps = [2][2]gc.IndexJump{
+ {{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0]
+ {{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1]
+}
+
+// To model a 'GTnoov' ('>' without overflow checking) branching
+var gtJumps = [2][2]gc.IndexJump{
+ {{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0]
+ {{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1]
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockDefer:
+ // defer returns in R0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(arm64.ACMP)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.Reg = arm64.REG_R0
+ p = s.Prog(arm64.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockExit:
+
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.BlockRetJmp:
+ p := s.Prog(obj.ARET)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = b.Aux.(*obj.LSym)
+
+ case ssa.BlockARM64EQ, ssa.BlockARM64NE,
+ ssa.BlockARM64LT, ssa.BlockARM64GE,
+ ssa.BlockARM64LE, ssa.BlockARM64GT,
+ ssa.BlockARM64ULT, ssa.BlockARM64UGT,
+ ssa.BlockARM64ULE, ssa.BlockARM64UGE,
+ ssa.BlockARM64Z, ssa.BlockARM64NZ,
+ ssa.BlockARM64ZW, ssa.BlockARM64NZW,
+ ssa.BlockARM64FLT, ssa.BlockARM64FGE,
+ ssa.BlockARM64FLE, ssa.BlockARM64FGT,
+ ssa.BlockARM64LTnoov, ssa.BlockARM64GEnoov:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ if !b.Controls[0].Type.IsFlags() {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = b.Controls[0].Reg()
+ }
+ case ssa.BlockARM64TBZ, ssa.BlockARM64TBNZ:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ p.From.Offset = b.AuxInt
+ p.From.Type = obj.TYPE_CONST
+ p.Reg = b.Controls[0].Reg()
+
+ case ssa.BlockARM64LEnoov:
+ s.CombJump(b, next, &leJumps)
+ case ssa.BlockARM64GTnoov:
+ s.CombJump(b, next, &gtJumps)
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go
new file mode 100644
index 0000000..2f7fa27
--- /dev/null
+++ b/src/cmd/compile/internal/gc/alg.go
@@ -0,0 +1,959 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "fmt"
+ "sort"
+)
+
+// AlgKind describes the kind of algorithms used for comparing and
+// hashing a Type.
+type AlgKind int
+
+//go:generate stringer -type AlgKind -trimprefix A
+
+const (
+ // These values are known by runtime.
+ ANOEQ AlgKind = iota
+ AMEM0
+ AMEM8
+ AMEM16
+ AMEM32
+ AMEM64
+ AMEM128
+ ASTRING
+ AINTER
+ ANILINTER
+ AFLOAT32
+ AFLOAT64
+ ACPLX64
+ ACPLX128
+
+ // Type can be compared/hashed as regular memory.
+ AMEM AlgKind = 100
+
+ // Type needs special comparison/hashing functions.
+ ASPECIAL AlgKind = -1
+)
+
+// IsComparable reports whether t is a comparable type.
+func IsComparable(t *types.Type) bool {
+ a, _ := algtype1(t)
+ return a != ANOEQ
+}
+
+// IsRegularMemory reports whether t can be compared/hashed as regular memory.
+func IsRegularMemory(t *types.Type) bool {
+ a, _ := algtype1(t)
+ return a == AMEM
+}
+
+// IncomparableField returns an incomparable Field of struct Type t, if any.
+func IncomparableField(t *types.Type) *types.Field {
+ for _, f := range t.FieldSlice() {
+ if !IsComparable(f.Type) {
+ return f
+ }
+ }
+ return nil
+}
+
+// EqCanPanic reports whether == on type t could panic (has an interface somewhere).
+// t must be comparable.
+func EqCanPanic(t *types.Type) bool {
+ switch t.Etype {
+ default:
+ return false
+ case TINTER:
+ return true
+ case TARRAY:
+ return EqCanPanic(t.Elem())
+ case TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// algtype is like algtype1, except it returns the fixed-width AMEMxx variants
+// instead of the general AMEM kind when possible.
+func algtype(t *types.Type) AlgKind {
+ a, _ := algtype1(t)
+ if a == AMEM {
+ switch t.Width {
+ case 0:
+ return AMEM0
+ case 1:
+ return AMEM8
+ case 2:
+ return AMEM16
+ case 4:
+ return AMEM32
+ case 8:
+ return AMEM64
+ case 16:
+ return AMEM128
+ }
+ }
+
+ return a
+}
+
+// algtype1 returns the AlgKind used for comparing and hashing Type t.
+// If it returns ANOEQ, it also returns the component type of t that
+// makes it incomparable.
+func algtype1(t *types.Type) (AlgKind, *types.Type) {
+ if t.Broke() {
+ return AMEM, nil
+ }
+ if t.Noalg() {
+ return ANOEQ, t
+ }
+
+ switch t.Etype {
+ case TANY, TFORW:
+ // will be defined later.
+ return ANOEQ, t
+
+ case TINT8, TUINT8, TINT16, TUINT16,
+ TINT32, TUINT32, TINT64, TUINT64,
+ TINT, TUINT, TUINTPTR,
+ TBOOL, TPTR,
+ TCHAN, TUNSAFEPTR:
+ return AMEM, nil
+
+ case TFUNC, TMAP:
+ return ANOEQ, t
+
+ case TFLOAT32:
+ return AFLOAT32, nil
+
+ case TFLOAT64:
+ return AFLOAT64, nil
+
+ case TCOMPLEX64:
+ return ACPLX64, nil
+
+ case TCOMPLEX128:
+ return ACPLX128, nil
+
+ case TSTRING:
+ return ASTRING, nil
+
+ case TINTER:
+ if t.IsEmptyInterface() {
+ return ANILINTER, nil
+ }
+ return AINTER, nil
+
+ case TSLICE:
+ return ANOEQ, t
+
+ case TARRAY:
+ a, bad := algtype1(t.Elem())
+ switch a {
+ case AMEM:
+ return AMEM, nil
+ case ANOEQ:
+ return ANOEQ, bad
+ }
+
+ switch t.NumElem() {
+ case 0:
+ // We checked above that the element type is comparable.
+ return AMEM, nil
+ case 1:
+ // Single-element array is same as its lone element.
+ return a, nil
+ }
+
+ return ASPECIAL, nil
+
+ case TSTRUCT:
+ fields := t.FieldSlice()
+
+ // One-field struct is same as that one field alone.
+ if len(fields) == 1 && !fields[0].Sym.IsBlank() {
+ return algtype1(fields[0].Type)
+ }
+
+ ret := AMEM
+ for i, f := range fields {
+ // All fields must be comparable.
+ a, bad := algtype1(f.Type)
+ if a == ANOEQ {
+ return ANOEQ, bad
+ }
+
+ // Blank fields, padded fields, fields with non-memory
+ // equality need special compare.
+ if a != AMEM || f.Sym.IsBlank() || ispaddedfield(t, i) {
+ ret = ASPECIAL
+ }
+ }
+
+ return ret, nil
+ }
+
+ Fatalf("algtype1: unexpected type %v", t)
+ return 0, nil
+}
+
+// genhash returns a symbol which is the closure used to compute
+// the hash of a value of type t.
+// Note: the generated function must match runtime.typehash exactly.
+func genhash(t *types.Type) *obj.LSym {
+ switch algtype(t) {
+ default:
+ // genhash is only called for types that have equality
+ Fatalf("genhash %v", t)
+ case AMEM0:
+ return sysClosure("memhash0")
+ case AMEM8:
+ return sysClosure("memhash8")
+ case AMEM16:
+ return sysClosure("memhash16")
+ case AMEM32:
+ return sysClosure("memhash32")
+ case AMEM64:
+ return sysClosure("memhash64")
+ case AMEM128:
+ return sysClosure("memhash128")
+ case ASTRING:
+ return sysClosure("strhash")
+ case AINTER:
+ return sysClosure("interhash")
+ case ANILINTER:
+ return sysClosure("nilinterhash")
+ case AFLOAT32:
+ return sysClosure("f32hash")
+ case AFLOAT64:
+ return sysClosure("f64hash")
+ case ACPLX64:
+ return sysClosure("c64hash")
+ case ACPLX128:
+ return sysClosure("c128hash")
+ case AMEM:
+ // For other sizes of plain memory, we build a closure
+ // that calls memhash_varlen. The size of the memory is
+ // encoded in the first slot of the closure.
+ closure := typeLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym()
+ if len(closure.P) > 0 { // already generated
+ return closure
+ }
+ if memhashvarlen == nil {
+ memhashvarlen = sysfunc("memhash_varlen")
+ }
+ ot := 0
+ ot = dsymptr(closure, ot, memhashvarlen, 0)
+ ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure
+ ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
+ return closure
+ case ASPECIAL:
+ break
+ }
+
+ closure := typesymprefix(".hashfunc", t).Linksym()
+ if len(closure.P) > 0 { // already generated
+ return closure
+ }
+
+ // Generate hash functions for subtypes.
+ // There are cases where we might not use these hashes,
+ // but in that case they will get dead-code eliminated.
+ // (And the closure generated by genhash will also get
+ // dead-code eliminated, as we call the subtype hashers
+ // directly.)
+ switch t.Etype {
+ case types.TARRAY:
+ genhash(t.Elem())
+ case types.TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ genhash(f.Type)
+ }
+ }
+
+ sym := typesymprefix(".hash", t)
+ if Debug.r != 0 {
+ fmt.Printf("genhash %v %v %v\n", closure, sym, t)
+ }
+
+ lineno = autogeneratedPos // less confusing than end of input
+ dclcontext = PEXTERN
+
+ // func sym(p *T, h uintptr) uintptr
+ tfn := nod(OTFUNC, nil, nil)
+ tfn.List.Set2(
+ namedfield("p", types.NewPtr(t)),
+ namedfield("h", types.Types[TUINTPTR]),
+ )
+ tfn.Rlist.Set1(anonfield(types.Types[TUINTPTR]))
+
+ fn := dclfunc(sym, tfn)
+ np := asNode(tfn.Type.Params().Field(0).Nname)
+ nh := asNode(tfn.Type.Params().Field(1).Nname)
+
+ switch t.Etype {
+ case types.TARRAY:
+ // An array of pure memory would be handled by the
+ // standard algorithm, so the element type must not be
+ // pure memory.
+ hashel := hashfor(t.Elem())
+
+ n := nod(ORANGE, nil, nod(ODEREF, np, nil))
+ ni := newname(lookup("i"))
+ ni.Type = types.Types[TINT]
+ n.List.Set1(ni)
+ n.SetColas(true)
+ colasdefn(n.List.Slice(), n)
+ ni = n.List.First()
+
+ // h = hashel(&p[i], h)
+ call := nod(OCALL, hashel, nil)
+
+ nx := nod(OINDEX, np, ni)
+ nx.SetBounded(true)
+ na := nod(OADDR, nx, nil)
+ call.List.Append(na)
+ call.List.Append(nh)
+ n.Nbody.Append(nod(OAS, nh, call))
+
+ fn.Nbody.Append(n)
+
+ case types.TSTRUCT:
+ // Walk the struct using memhash for runs of AMEM
+ // and calling specific hash functions for the others.
+ for i, fields := 0, t.FieldSlice(); i < len(fields); {
+ f := fields[i]
+
+ // Skip blank fields.
+ if f.Sym.IsBlank() {
+ i++
+ continue
+ }
+
+ // Hash non-memory fields with appropriate hash function.
+ if !IsRegularMemory(f.Type) {
+ hashel := hashfor(f.Type)
+ call := nod(OCALL, hashel, nil)
+ nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
+ na := nod(OADDR, nx, nil)
+ call.List.Append(na)
+ call.List.Append(nh)
+ fn.Nbody.Append(nod(OAS, nh, call))
+ i++
+ continue
+ }
+
+ // Otherwise, hash a maximal length run of raw memory.
+ size, next := memrun(t, i)
+
+ // h = hashel(&p.first, size, h)
+ hashel := hashmem(f.Type)
+ call := nod(OCALL, hashel, nil)
+ nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
+ na := nod(OADDR, nx, nil)
+ call.List.Append(na)
+ call.List.Append(nh)
+ call.List.Append(nodintconst(size))
+ fn.Nbody.Append(nod(OAS, nh, call))
+
+ i = next
+ }
+ }
+
+ r := nod(ORETURN, nil, nil)
+ r.List.Append(nh)
+ fn.Nbody.Append(r)
+
+ if Debug.r != 0 {
+ dumplist("genhash body", fn.Nbody)
+ }
+
+ funcbody()
+
+ fn.Func.SetDupok(true)
+ fn = typecheck(fn, ctxStmt)
+
+ Curfn = fn
+ typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ Curfn = nil
+
+ if debug_dclstack != 0 {
+ testdclstack()
+ }
+
+ fn.Func.SetNilCheckDisabled(true)
+ xtop = append(xtop, fn)
+
+ // Build closure. It doesn't close over any variables, so
+ // it contains just the function pointer.
+ dsymptr(closure, 0, sym.Linksym(), 0)
+ ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
+
+ return closure
+}
+
+func hashfor(t *types.Type) *Node {
+ var sym *types.Sym
+
+ switch a, _ := algtype1(t); a {
+ case AMEM:
+ Fatalf("hashfor with AMEM type")
+ case AINTER:
+ sym = Runtimepkg.Lookup("interhash")
+ case ANILINTER:
+ sym = Runtimepkg.Lookup("nilinterhash")
+ case ASTRING:
+ sym = Runtimepkg.Lookup("strhash")
+ case AFLOAT32:
+ sym = Runtimepkg.Lookup("f32hash")
+ case AFLOAT64:
+ sym = Runtimepkg.Lookup("f64hash")
+ case ACPLX64:
+ sym = Runtimepkg.Lookup("c64hash")
+ case ACPLX128:
+ sym = Runtimepkg.Lookup("c128hash")
+ default:
+ // Note: the caller of hashfor ensured that this symbol
+ // exists and has a body by calling genhash for t.
+ sym = typesymprefix(".hash", t)
+ }
+
+ n := newname(sym)
+ setNodeNameFunc(n)
+ n.Type = functype(nil, []*Node{
+ anonfield(types.NewPtr(t)),
+ anonfield(types.Types[TUINTPTR]),
+ }, []*Node{
+ anonfield(types.Types[TUINTPTR]),
+ })
+ return n
+}
+
+// sysClosure returns a closure which will call the
+// given runtime function (with no closed-over variables).
+func sysClosure(name string) *obj.LSym {
+ s := sysvar(name + "·f")
+ if len(s.P) == 0 {
+ f := sysfunc(name)
+ dsymptr(s, 0, f, 0)
+ ggloblsym(s, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ }
+ return s
+}
+
+// geneq returns a symbol which is the closure used to compute
+// equality for two objects of type t.
+func geneq(t *types.Type) *obj.LSym {
+ switch algtype(t) {
+ case ANOEQ:
+ // The runtime will panic if it tries to compare
+ // a type with a nil equality function.
+ return nil
+ case AMEM0:
+ return sysClosure("memequal0")
+ case AMEM8:
+ return sysClosure("memequal8")
+ case AMEM16:
+ return sysClosure("memequal16")
+ case AMEM32:
+ return sysClosure("memequal32")
+ case AMEM64:
+ return sysClosure("memequal64")
+ case AMEM128:
+ return sysClosure("memequal128")
+ case ASTRING:
+ return sysClosure("strequal")
+ case AINTER:
+ return sysClosure("interequal")
+ case ANILINTER:
+ return sysClosure("nilinterequal")
+ case AFLOAT32:
+ return sysClosure("f32equal")
+ case AFLOAT64:
+ return sysClosure("f64equal")
+ case ACPLX64:
+ return sysClosure("c64equal")
+ case ACPLX128:
+ return sysClosure("c128equal")
+ case AMEM:
+ // make equality closure. The size of the type
+ // is encoded in the closure.
+ closure := typeLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym()
+ if len(closure.P) != 0 {
+ return closure
+ }
+ if memequalvarlen == nil {
+ memequalvarlen = sysvar("memequal_varlen") // asm func
+ }
+ ot := 0
+ ot = dsymptr(closure, ot, memequalvarlen, 0)
+ ot = duintptr(closure, ot, uint64(t.Width))
+ ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
+ return closure
+ case ASPECIAL:
+ break
+ }
+
+ closure := typesymprefix(".eqfunc", t).Linksym()
+ if len(closure.P) > 0 { // already generated
+ return closure
+ }
+ sym := typesymprefix(".eq", t)
+ if Debug.r != 0 {
+ fmt.Printf("geneq %v\n", t)
+ }
+
+ // Autogenerate code for equality of structs and arrays.
+
+ lineno = autogeneratedPos // less confusing than end of input
+ dclcontext = PEXTERN
+
+ // func sym(p, q *T) bool
+ tfn := nod(OTFUNC, nil, nil)
+ tfn.List.Set2(
+ namedfield("p", types.NewPtr(t)),
+ namedfield("q", types.NewPtr(t)),
+ )
+ tfn.Rlist.Set1(namedfield("r", types.Types[TBOOL]))
+
+ fn := dclfunc(sym, tfn)
+ np := asNode(tfn.Type.Params().Field(0).Nname)
+ nq := asNode(tfn.Type.Params().Field(1).Nname)
+ nr := asNode(tfn.Type.Results().Field(0).Nname)
+
+ // Label to jump to if an equality test fails.
+ neq := autolabel(".neq")
+
+ // We reach here only for types that have equality but
+ // cannot be handled by the standard algorithms,
+ // so t must be either an array or a struct.
+ switch t.Etype {
+ default:
+ Fatalf("geneq %v", t)
+
+ case TARRAY:
+ nelem := t.NumElem()
+
+ // checkAll generates code to check the equality of all array elements.
+ // If unroll is greater than nelem, checkAll generates:
+ //
+ // if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
+ // } else {
+ // return
+ // }
+ //
+ // And so on.
+ //
+ // Otherwise it generates:
+ //
+ // for i := 0; i < nelem; i++ {
+ // if eq(p[i], q[i]) {
+ // } else {
+ // goto neq
+ // }
+ // }
+ //
+ // TODO(josharian): consider doing some loop unrolling
+ // for larger nelem as well, processing a few elements at a time in a loop.
+ checkAll := func(unroll int64, last bool, eq func(pi, qi *Node) *Node) {
+ // checkIdx generates a node to check for equality at index i.
+ checkIdx := func(i *Node) *Node {
+ // pi := p[i]
+ pi := nod(OINDEX, np, i)
+ pi.SetBounded(true)
+ pi.Type = t.Elem()
+ // qi := q[i]
+ qi := nod(OINDEX, nq, i)
+ qi.SetBounded(true)
+ qi.Type = t.Elem()
+ return eq(pi, qi)
+ }
+
+ if nelem <= unroll {
+ if last {
+ // Do last comparison in a different manner.
+ nelem--
+ }
+ // Generate a series of checks.
+ for i := int64(0); i < nelem; i++ {
+ // if check {} else { goto neq }
+ nif := nod(OIF, checkIdx(nodintconst(i)), nil)
+ nif.Rlist.Append(nodSym(OGOTO, nil, neq))
+ fn.Nbody.Append(nif)
+ }
+ if last {
+ fn.Nbody.Append(nod(OAS, nr, checkIdx(nodintconst(nelem))))
+ }
+ } else {
+ // Generate a for loop.
+ // for i := 0; i < nelem; i++
+ i := temp(types.Types[TINT])
+ init := nod(OAS, i, nodintconst(0))
+ cond := nod(OLT, i, nodintconst(nelem))
+ post := nod(OAS, i, nod(OADD, i, nodintconst(1)))
+ loop := nod(OFOR, cond, post)
+ loop.Ninit.Append(init)
+ // if eq(pi, qi) {} else { goto neq }
+ nif := nod(OIF, checkIdx(i), nil)
+ nif.Rlist.Append(nodSym(OGOTO, nil, neq))
+ loop.Nbody.Append(nif)
+ fn.Nbody.Append(loop)
+ if last {
+ fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
+ }
+ }
+ }
+
+ switch t.Elem().Etype {
+ case TSTRING:
+ // Do two loops. First, check that all the lengths match (cheap).
+ // Second, check that all the contents match (expensive).
+ // TODO: when the array size is small, unroll the length match checks.
+ checkAll(3, false, func(pi, qi *Node) *Node {
+ // Compare lengths.
+ eqlen, _ := eqstring(pi, qi)
+ return eqlen
+ })
+ checkAll(1, true, func(pi, qi *Node) *Node {
+ // Compare contents.
+ _, eqmem := eqstring(pi, qi)
+ return eqmem
+ })
+ case TFLOAT32, TFLOAT64:
+ checkAll(2, true, func(pi, qi *Node) *Node {
+ // p[i] == q[i]
+ return nod(OEQ, pi, qi)
+ })
+ // TODO: pick apart structs, do them piecemeal too
+ default:
+ checkAll(1, true, func(pi, qi *Node) *Node {
+ // p[i] == q[i]
+ return nod(OEQ, pi, qi)
+ })
+ }
+
+ case TSTRUCT:
+ // Build a list of conditions to satisfy.
+ // The conditions are a list-of-lists. Conditions are reorderable
+ // within each inner list. The outer lists must be evaluated in order.
+ var conds [][]*Node
+ conds = append(conds, []*Node{})
+ and := func(n *Node) {
+ i := len(conds) - 1
+ conds[i] = append(conds[i], n)
+ }
+
+ // Walk the struct using memequal for runs of AMEM
+ // and calling specific equality tests for the others.
+ for i, fields := 0, t.FieldSlice(); i < len(fields); {
+ f := fields[i]
+
+ // Skip blank-named fields.
+ if f.Sym.IsBlank() {
+ i++
+ continue
+ }
+
+ // Compare non-memory fields with field equality.
+ if !IsRegularMemory(f.Type) {
+ if EqCanPanic(f.Type) {
+ // Enforce ordering by starting a new set of reorderable conditions.
+ conds = append(conds, []*Node{})
+ }
+ p := nodSym(OXDOT, np, f.Sym)
+ q := nodSym(OXDOT, nq, f.Sym)
+ switch {
+ case f.Type.IsString():
+ eqlen, eqmem := eqstring(p, q)
+ and(eqlen)
+ and(eqmem)
+ default:
+ and(nod(OEQ, p, q))
+ }
+ if EqCanPanic(f.Type) {
+ // Also enforce ordering after something that can panic.
+ conds = append(conds, []*Node{})
+ }
+ i++
+ continue
+ }
+
+ // Find maximal length run of memory-only fields.
+ size, next := memrun(t, i)
+
+ // TODO(rsc): All the calls to newname are wrong for
+ // cross-package unexported fields.
+ if s := fields[i:next]; len(s) <= 2 {
+ // Two or fewer fields: use plain field equality.
+ for _, f := range s {
+ and(eqfield(np, nq, f.Sym))
+ }
+ } else {
+ // More than two fields: use memequal.
+ and(eqmem(np, nq, f.Sym, size))
+ }
+ i = next
+ }
+
+ // Sort conditions to put runtime calls last.
+ // Preserve the rest of the ordering.
+ var flatConds []*Node
+ for _, c := range conds {
+ isCall := func(n *Node) bool {
+ return n.Op == OCALL || n.Op == OCALLFUNC
+ }
+ sort.SliceStable(c, func(i, j int) bool {
+ return !isCall(c[i]) && isCall(c[j])
+ })
+ flatConds = append(flatConds, c...)
+ }
+
+ if len(flatConds) == 0 {
+ fn.Nbody.Append(nod(OAS, nr, nodbool(true)))
+ } else {
+ for _, c := range flatConds[:len(flatConds)-1] {
+ // if cond {} else { goto neq }
+ n := nod(OIF, c, nil)
+ n.Rlist.Append(nodSym(OGOTO, nil, neq))
+ fn.Nbody.Append(n)
+ }
+ fn.Nbody.Append(nod(OAS, nr, flatConds[len(flatConds)-1]))
+ }
+ }
+
+ // ret:
+ // return
+ ret := autolabel(".ret")
+ fn.Nbody.Append(nodSym(OLABEL, nil, ret))
+ fn.Nbody.Append(nod(ORETURN, nil, nil))
+
+ // neq:
+ // r = false
+ // return (or goto ret)
+ fn.Nbody.Append(nodSym(OLABEL, nil, neq))
+ fn.Nbody.Append(nod(OAS, nr, nodbool(false)))
+ if EqCanPanic(t) || hasCall(fn) {
+ // Epilogue is large, so share it with the equal case.
+ fn.Nbody.Append(nodSym(OGOTO, nil, ret))
+ } else {
+ // Epilogue is small, so don't bother sharing.
+ fn.Nbody.Append(nod(ORETURN, nil, nil))
+ }
+ // TODO(khr): the epilogue size detection condition above isn't perfect.
+ // We should really do a generic CL that shares epilogues across
+ // the board. See #24936.
+
+ if Debug.r != 0 {
+ dumplist("geneq body", fn.Nbody)
+ }
+
+ funcbody()
+
+ fn.Func.SetDupok(true)
+ fn = typecheck(fn, ctxStmt)
+
+ Curfn = fn
+ typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ Curfn = nil
+
+ if debug_dclstack != 0 {
+ testdclstack()
+ }
+
+ // Disable checknils while compiling this code.
+ // We are comparing a struct or an array,
+ // neither of which can be nil, and our comparisons
+ // are shallow.
+ fn.Func.SetNilCheckDisabled(true)
+ xtop = append(xtop, fn)
+
+ // Generate a closure which points at the function we just generated.
+ dsymptr(closure, 0, sym.Linksym(), 0)
+ ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ return closure
+}
+
+func hasCall(n *Node) bool {
+ if n.Op == OCALL || n.Op == OCALLFUNC {
+ return true
+ }
+ if n.Left != nil && hasCall(n.Left) {
+ return true
+ }
+ if n.Right != nil && hasCall(n.Right) {
+ return true
+ }
+ for _, x := range n.Ninit.Slice() {
+ if hasCall(x) {
+ return true
+ }
+ }
+ for _, x := range n.Nbody.Slice() {
+ if hasCall(x) {
+ return true
+ }
+ }
+ for _, x := range n.List.Slice() {
+ if hasCall(x) {
+ return true
+ }
+ }
+ for _, x := range n.Rlist.Slice() {
+ if hasCall(x) {
+ return true
+ }
+ }
+ return false
+}
+
+// eqfield returns the node
+// p.field == q.field
+func eqfield(p *Node, q *Node, field *types.Sym) *Node {
+ nx := nodSym(OXDOT, p, field)
+ ny := nodSym(OXDOT, q, field)
+ ne := nod(OEQ, nx, ny)
+ return ne
+}
+
+// eqstring returns the nodes
+// len(s) == len(t)
+// and
+// memequal(s.ptr, t.ptr, len(s))
+// which can be used to construct string equality comparison.
+// eqlen must be evaluated before eqmem, and shortcircuiting is required.
+func eqstring(s, t *Node) (eqlen, eqmem *Node) {
+ s = conv(s, types.Types[TSTRING])
+ t = conv(t, types.Types[TSTRING])
+ sptr := nod(OSPTR, s, nil)
+ tptr := nod(OSPTR, t, nil)
+ slen := conv(nod(OLEN, s, nil), types.Types[TUINTPTR])
+ tlen := conv(nod(OLEN, t, nil), types.Types[TUINTPTR])
+
+ fn := syslook("memequal")
+ fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8])
+ call := nod(OCALL, fn, nil)
+ call.List.Append(sptr, tptr, slen.copy())
+ call = typecheck(call, ctxExpr|ctxMultiOK)
+
+ cmp := nod(OEQ, slen, tlen)
+ cmp = typecheck(cmp, ctxExpr)
+ cmp.Type = types.Types[TBOOL]
+ return cmp, call
+}
+
+// eqinterface returns the nodes
+// s.tab == t.tab (or s.typ == t.typ, as appropriate)
+// and
+// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
+// which can be used to construct interface equality comparison.
+// eqtab must be evaluated before eqdata, and shortcircuiting is required.
+func eqinterface(s, t *Node) (eqtab, eqdata *Node) {
+ if !types.Identical(s.Type, t.Type) {
+ Fatalf("eqinterface %v %v", s.Type, t.Type)
+ }
+ // func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
+ // func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
+ var fn *Node
+ if s.Type.IsEmptyInterface() {
+ fn = syslook("efaceeq")
+ } else {
+ fn = syslook("ifaceeq")
+ }
+
+ stab := nod(OITAB, s, nil)
+ ttab := nod(OITAB, t, nil)
+ sdata := nod(OIDATA, s, nil)
+ tdata := nod(OIDATA, t, nil)
+ sdata.Type = types.Types[TUNSAFEPTR]
+ tdata.Type = types.Types[TUNSAFEPTR]
+ sdata.SetTypecheck(1)
+ tdata.SetTypecheck(1)
+
+ call := nod(OCALL, fn, nil)
+ call.List.Append(stab, sdata, tdata)
+ call = typecheck(call, ctxExpr|ctxMultiOK)
+
+ cmp := nod(OEQ, stab, ttab)
+ cmp = typecheck(cmp, ctxExpr)
+ cmp.Type = types.Types[TBOOL]
+ return cmp, call
+}
+
+// eqmem returns the node
+// memequal(&p.field, &q.field [, size])
+func eqmem(p *Node, q *Node, field *types.Sym, size int64) *Node {
+ nx := nod(OADDR, nodSym(OXDOT, p, field), nil)
+ ny := nod(OADDR, nodSym(OXDOT, q, field), nil)
+ nx = typecheck(nx, ctxExpr)
+ ny = typecheck(ny, ctxExpr)
+
+ fn, needsize := eqmemfunc(size, nx.Type.Elem())
+ call := nod(OCALL, fn, nil)
+ call.List.Append(nx)
+ call.List.Append(ny)
+ if needsize {
+ call.List.Append(nodintconst(size))
+ }
+
+ return call
+}
+
+func eqmemfunc(size int64, t *types.Type) (fn *Node, needsize bool) {
+ switch size {
+ default:
+ fn = syslook("memequal")
+ needsize = true
+ case 1, 2, 4, 8, 16:
+ buf := fmt.Sprintf("memequal%d", int(size)*8)
+ fn = syslook(buf)
+ }
+
+ fn = substArgTypes(fn, t, t)
+ return fn, needsize
+}
+
+// memrun finds runs of struct fields for which memory-only algs are appropriate.
+// t is the parent struct type, and start is the field index at which to start the run.
+// size is the length in bytes of the memory included in the run.
+// next is the index just after the end of the memory run.
+func memrun(t *types.Type, start int) (size int64, next int) {
+ next = start
+ for {
+ next++
+ if next == t.NumFields() {
+ break
+ }
+ // Stop run after a padded field.
+ if ispaddedfield(t, next-1) {
+ break
+ }
+ // Also, stop before a blank or non-memory field.
+ if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) {
+ break
+ }
+ }
+ return t.Field(next-1).End() - t.Field(start).Offset, next
+}
+
+// ispaddedfield reports whether the i'th field of struct type t is followed
+// by padding.
+func ispaddedfield(t *types.Type, i int) bool {
+ if !t.IsStruct() {
+ Fatalf("ispaddedfield called non-struct %v", t)
+ }
+ end := t.Width
+ if i+1 < t.NumFields() {
+ end = t.Field(i + 1).Offset
+ }
+ return t.Field(i).End() != end
+}
diff --git a/src/cmd/compile/internal/gc/algkind_string.go b/src/cmd/compile/internal/gc/algkind_string.go
new file mode 100644
index 0000000..52b5399
--- /dev/null
+++ b/src/cmd/compile/internal/gc/algkind_string.go
@@ -0,0 +1,48 @@
+// Code generated by "stringer -type AlgKind -trimprefix A"; DO NOT EDIT.
+
+package gc
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ANOEQ-0]
+ _ = x[AMEM0-1]
+ _ = x[AMEM8-2]
+ _ = x[AMEM16-3]
+ _ = x[AMEM32-4]
+ _ = x[AMEM64-5]
+ _ = x[AMEM128-6]
+ _ = x[ASTRING-7]
+ _ = x[AINTER-8]
+ _ = x[ANILINTER-9]
+ _ = x[AFLOAT32-10]
+ _ = x[AFLOAT64-11]
+ _ = x[ACPLX64-12]
+ _ = x[ACPLX128-13]
+ _ = x[AMEM-100]
+ _ = x[ASPECIAL - -1]
+}
+
+const (
+ _AlgKind_name_0 = "SPECIALNOEQMEM0MEM8MEM16MEM32MEM64MEM128STRINGINTERNILINTERFLOAT32FLOAT64CPLX64CPLX128"
+ _AlgKind_name_1 = "MEM"
+)
+
+var (
+ _AlgKind_index_0 = [...]uint8{0, 7, 11, 15, 19, 24, 29, 34, 40, 46, 51, 59, 66, 73, 79, 86}
+)
+
+func (i AlgKind) String() string {
+ switch {
+ case -1 <= i && i <= 13:
+ i -= -1
+ return _AlgKind_name_0[_AlgKind_index_0[i]:_AlgKind_index_0[i+1]]
+ case i == 100:
+ return _AlgKind_name_1
+ default:
+ return "AlgKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go
new file mode 100644
index 0000000..a3a0c8f
--- /dev/null
+++ b/src/cmd/compile/internal/gc/align.go
@@ -0,0 +1,531 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "cmd/compile/internal/types"
+ "fmt"
+ "sort"
+)
+
+// sizeCalculationDisabled indicates whether it is safe
+// to calculate Types' widths and alignments. See dowidth.
+var sizeCalculationDisabled bool
+
+// machine size and rounding alignment is dictated around
+// the size of a pointer, set in betypeinit (see ../amd64/galign.go).
+var defercalc int
+
+func Rnd(o int64, r int64) int64 {
+ if r < 1 || r > 8 || r&(r-1) != 0 {
+ Fatalf("rnd %d", r)
+ }
+ return (o + r - 1) &^ (r - 1)
+}
+
+// expandiface computes the method set for interface type t by
+// expanding embedded interfaces.
+func expandiface(t *types.Type) {
+ seen := make(map[*types.Sym]*types.Field)
+ var methods []*types.Field
+
+ addMethod := func(m *types.Field, explicit bool) {
+ switch prev := seen[m.Sym]; {
+ case prev == nil:
+ seen[m.Sym] = m
+ case langSupported(1, 14, t.Pkg()) && !explicit && types.Identical(m.Type, prev.Type):
+ return
+ default:
+ yyerrorl(m.Pos, "duplicate method %s", m.Sym.Name)
+ }
+ methods = append(methods, m)
+ }
+
+ for _, m := range t.Methods().Slice() {
+ if m.Sym == nil {
+ continue
+ }
+
+ checkwidth(m.Type)
+ addMethod(m, true)
+ }
+
+ for _, m := range t.Methods().Slice() {
+ if m.Sym != nil {
+ continue
+ }
+
+ if !m.Type.IsInterface() {
+ yyerrorl(m.Pos, "interface contains embedded non-interface %v", m.Type)
+ m.SetBroke(true)
+ t.SetBroke(true)
+ // Add to fields so that error messages
+ // include the broken embedded type when
+ // printing t.
+ // TODO(mdempsky): Revisit this.
+ methods = append(methods, m)
+ continue
+ }
+
+ // Embedded interface: duplicate all methods
+ // (including broken ones, if any) and add to t's
+ // method set.
+ for _, t1 := range m.Type.Fields().Slice() {
+ f := types.NewField()
+ f.Pos = m.Pos // preserve embedding position
+ f.Sym = t1.Sym
+ f.Type = t1.Type
+ f.SetBroke(t1.Broke())
+ addMethod(f, false)
+ }
+ }
+
+ sort.Sort(methcmp(methods))
+
+ if int64(len(methods)) >= thearch.MAXWIDTH/int64(Widthptr) {
+ yyerrorl(typePos(t), "interface too large")
+ }
+ for i, m := range methods {
+ m.Offset = int64(i) * int64(Widthptr)
+ }
+
+ // Access fields directly to avoid recursively calling dowidth
+ // within Type.Fields().
+ t.Extra.(*types.Interface).Fields.Set(methods)
+}
+
+func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 {
+ starto := o
+ maxalign := int32(flag)
+ if maxalign < 1 {
+ maxalign = 1
+ }
+ lastzero := int64(0)
+ for _, f := range t.Fields().Slice() {
+ if f.Type == nil {
+ // broken field, just skip it so that other valid fields
+ // get a width.
+ continue
+ }
+
+ dowidth(f.Type)
+ if int32(f.Type.Align) > maxalign {
+ maxalign = int32(f.Type.Align)
+ }
+ if f.Type.Align > 0 {
+ o = Rnd(o, int64(f.Type.Align))
+ }
+ f.Offset = o
+ if n := asNode(f.Nname); n != nil {
+ // addrescapes has similar code to update these offsets.
+ // Usually addrescapes runs after widstruct,
+ // in which case we could drop this,
+ // but function closure functions are the exception.
+ // NOTE(rsc): This comment may be stale.
+ // It's possible the ordering has changed and this is
+ // now the common case. I'm not sure.
+ if n.Name.Param.Stackcopy != nil {
+ n.Name.Param.Stackcopy.Xoffset = o
+ n.Xoffset = 0
+ } else {
+ n.Xoffset = o
+ }
+ }
+
+ w := f.Type.Width
+ if w < 0 {
+ Fatalf("invalid width %d", f.Type.Width)
+ }
+ if w == 0 {
+ lastzero = o
+ }
+ o += w
+ maxwidth := thearch.MAXWIDTH
+ // On 32-bit systems, reflect tables impose an additional constraint
+ // that each field start offset must fit in 31 bits.
+ if maxwidth < 1<<32 {
+ maxwidth = 1<<31 - 1
+ }
+ if o >= maxwidth {
+ yyerrorl(typePos(errtype), "type %L too large", errtype)
+ o = 8 // small but nonzero
+ }
+ }
+
+ // For nonzero-sized structs which end in a zero-sized thing, we add
+ // an extra byte of padding to the type. This padding ensures that
+ // taking the address of the zero-sized thing can't manufacture a
+ // pointer to the next object in the heap. See issue 9401.
+ if flag == 1 && o > starto && o == lastzero {
+ o++
+ }
+
+ // final width is rounded
+ if flag != 0 {
+ o = Rnd(o, int64(maxalign))
+ }
+ t.Align = uint8(maxalign)
+
+ // type width only includes back to first field's offset
+ t.Width = o - starto
+
+ return o
+}
+
+// findTypeLoop searches for an invalid type declaration loop involving
+// type t and reports whether one is found. If so, path contains the
+// loop.
+//
+// path points to a slice used for tracking the sequence of types
+// visited. Using a pointer to a slice allows the slice capacity to
+// grow and limit reallocations.
+func findTypeLoop(t *types.Type, path *[]*types.Type) bool {
+ // We implement a simple DFS loop-finding algorithm. This
+ // could be faster, but type cycles are rare.
+
+ if t.Sym != nil {
+ // Declared type. Check for loops and otherwise
+ // recurse on the type expression used in the type
+ // declaration.
+
+ for i, x := range *path {
+ if x == t {
+ *path = (*path)[i:]
+ return true
+ }
+ }
+
+ *path = append(*path, t)
+ if p := asNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) {
+ return true
+ }
+ *path = (*path)[:len(*path)-1]
+ } else {
+ // Anonymous type. Recurse on contained types.
+
+ switch t.Etype {
+ case TARRAY:
+ if findTypeLoop(t.Elem(), path) {
+ return true
+ }
+ case TSTRUCT:
+ for _, f := range t.Fields().Slice() {
+ if findTypeLoop(f.Type, path) {
+ return true
+ }
+ }
+ case TINTER:
+ for _, m := range t.Methods().Slice() {
+ if m.Type.IsInterface() { // embedded interface
+ if findTypeLoop(m.Type, path) {
+ return true
+ }
+ }
+ }
+ }
+ }
+
+ return false
+}
+
+func reportTypeLoop(t *types.Type) {
+ if t.Broke() {
+ return
+ }
+
+ var l []*types.Type
+ if !findTypeLoop(t, &l) {
+ Fatalf("failed to find type loop for: %v", t)
+ }
+
+ // Rotate loop so that the earliest type declaration is first.
+ i := 0
+ for j, t := range l[1:] {
+ if typePos(t).Before(typePos(l[i])) {
+ i = j + 1
+ }
+ }
+ l = append(l[i:], l[:i]...)
+
+ var msg bytes.Buffer
+ fmt.Fprintf(&msg, "invalid recursive type %v\n", l[0])
+ for _, t := range l {
+ fmt.Fprintf(&msg, "\t%v: %v refers to\n", linestr(typePos(t)), t)
+ t.SetBroke(true)
+ }
+ fmt.Fprintf(&msg, "\t%v: %v", linestr(typePos(l[0])), l[0])
+ yyerrorl(typePos(l[0]), msg.String())
+}
+
+// dowidth calculates and stores the size and alignment for t.
+// If sizeCalculationDisabled is set, and the size/alignment
+// have not already been calculated, it calls Fatal.
+// This is used to prevent data races in the back end.
+func dowidth(t *types.Type) {
+ // Calling dowidth when typecheck tracing enabled is not safe.
+ // See issue #33658.
+ if enableTrace && skipDowidthForTracing {
+ return
+ }
+ if Widthptr == 0 {
+ Fatalf("dowidth without betypeinit")
+ }
+
+ if t == nil {
+ return
+ }
+
+ if t.Width == -2 {
+ reportTypeLoop(t)
+ t.Width = 0
+ t.Align = 1
+ return
+ }
+
+ if t.WidthCalculated() {
+ return
+ }
+
+ if sizeCalculationDisabled {
+ if t.Broke() {
+ // break infinite recursion from Fatal call below
+ return
+ }
+ t.SetBroke(true)
+ Fatalf("width not calculated: %v", t)
+ }
+
+ // break infinite recursion if the broken recursive type
+ // is referenced again
+ if t.Broke() && t.Width == 0 {
+ return
+ }
+
+ // defer checkwidth calls until after we're done
+ defercheckwidth()
+
+ lno := lineno
+ if asNode(t.Nod) != nil {
+ lineno = asNode(t.Nod).Pos
+ }
+
+ t.Width = -2
+ t.Align = 0 // 0 means use t.Width, below
+
+ et := t.Etype
+ switch et {
+ case TFUNC, TCHAN, TMAP, TSTRING:
+ break
+
+ // simtype == 0 during bootstrap
+ default:
+ if simtype[t.Etype] != 0 {
+ et = simtype[t.Etype]
+ }
+ }
+
+ var w int64
+ switch et {
+ default:
+ Fatalf("dowidth: unknown type: %v", t)
+
+ // compiler-specific stuff
+ case TINT8, TUINT8, TBOOL:
+ // bool is int8
+ w = 1
+
+ case TINT16, TUINT16:
+ w = 2
+
+ case TINT32, TUINT32, TFLOAT32:
+ w = 4
+
+ case TINT64, TUINT64, TFLOAT64:
+ w = 8
+ t.Align = uint8(Widthreg)
+
+ case TCOMPLEX64:
+ w = 8
+ t.Align = 4
+
+ case TCOMPLEX128:
+ w = 16
+ t.Align = uint8(Widthreg)
+
+ case TPTR:
+ w = int64(Widthptr)
+ checkwidth(t.Elem())
+
+ case TUNSAFEPTR:
+ w = int64(Widthptr)
+
+ case TINTER: // implemented as 2 pointers
+ w = 2 * int64(Widthptr)
+ t.Align = uint8(Widthptr)
+ expandiface(t)
+
+ case TCHAN: // implemented as pointer
+ w = int64(Widthptr)
+
+ checkwidth(t.Elem())
+
+ // make fake type to check later to
+ // trigger channel argument check.
+ t1 := types.NewChanArgs(t)
+ checkwidth(t1)
+
+ case TCHANARGS:
+ t1 := t.ChanArgs()
+ dowidth(t1) // just in case
+ if t1.Elem().Width >= 1<<16 {
+ yyerrorl(typePos(t1), "channel element type too large (>64kB)")
+ }
+ w = 1 // anything will do
+
+ case TMAP: // implemented as pointer
+ w = int64(Widthptr)
+ checkwidth(t.Elem())
+ checkwidth(t.Key())
+
+ case TFORW: // should have been filled in
+ reportTypeLoop(t)
+ w = 1 // anything will do
+
+ case TANY:
+ // dummy type; should be replaced before use.
+ Fatalf("dowidth any")
+
+ case TSTRING:
+ if sizeofString == 0 {
+ Fatalf("early dowidth string")
+ }
+ w = sizeofString
+ t.Align = uint8(Widthptr)
+
+ case TARRAY:
+ if t.Elem() == nil {
+ break
+ }
+
+ dowidth(t.Elem())
+ if t.Elem().Width != 0 {
+ cap := (uint64(thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width)
+ if uint64(t.NumElem()) > cap {
+ yyerrorl(typePos(t), "type %L larger than address space", t)
+ }
+ }
+ w = t.NumElem() * t.Elem().Width
+ t.Align = t.Elem().Align
+
+ case TSLICE:
+ if t.Elem() == nil {
+ break
+ }
+ w = sizeofSlice
+ checkwidth(t.Elem())
+ t.Align = uint8(Widthptr)
+
+ case TSTRUCT:
+ if t.IsFuncArgStruct() {
+ Fatalf("dowidth fn struct %v", t)
+ }
+ w = widstruct(t, t, 0, 1)
+
+ // make fake type to check later to
+ // trigger function argument computation.
+ case TFUNC:
+ t1 := types.NewFuncArgs(t)
+ checkwidth(t1)
+ w = int64(Widthptr) // width of func type is pointer
+
+ // function is 3 cated structures;
+ // compute their widths as side-effect.
+ case TFUNCARGS:
+ t1 := t.FuncArgs()
+ w = widstruct(t1, t1.Recvs(), 0, 0)
+ w = widstruct(t1, t1.Params(), w, Widthreg)
+ w = widstruct(t1, t1.Results(), w, Widthreg)
+ t1.Extra.(*types.Func).Argwid = w
+ if w%int64(Widthreg) != 0 {
+ Warn("bad type %v %d\n", t1, w)
+ }
+ t.Align = 1
+ }
+
+ if Widthptr == 4 && w != int64(int32(w)) {
+ yyerrorl(typePos(t), "type %v too large", t)
+ }
+
+ t.Width = w
+ if t.Align == 0 {
+ if w == 0 || w > 8 || w&(w-1) != 0 {
+ Fatalf("invalid alignment for %v", t)
+ }
+ t.Align = uint8(w)
+ }
+
+ lineno = lno
+
+ resumecheckwidth()
+}
+
+// when a type's width should be known, we call checkwidth
+// to compute it. during a declaration like
+//
+// type T *struct { next T }
+//
+// it is necessary to defer the calculation of the struct width
+// until after T has been initialized to be a pointer to that struct.
+// similarly, during import processing structs may be used
+// before their definition. in those situations, calling
+// defercheckwidth() stops width calculations until
+// resumecheckwidth() is called, at which point all the
+// checkwidths that were deferred are executed.
+// dowidth should only be called when the type's size
+// is needed immediately. checkwidth makes sure the
+// size is evaluated eventually.
+
+var deferredTypeStack []*types.Type
+
+func checkwidth(t *types.Type) {
+ if t == nil {
+ return
+ }
+
+ // function arg structs should not be checked
+ // outside of the enclosing function.
+ if t.IsFuncArgStruct() {
+ Fatalf("checkwidth %v", t)
+ }
+
+ if defercalc == 0 {
+ dowidth(t)
+ return
+ }
+
+ // if type has not yet been pushed on deferredTypeStack yet, do it now
+ if !t.Deferwidth() {
+ t.SetDeferwidth(true)
+ deferredTypeStack = append(deferredTypeStack, t)
+ }
+}
+
+func defercheckwidth() {
+ defercalc++
+}
+
+func resumecheckwidth() {
+ if defercalc == 1 {
+ for len(deferredTypeStack) > 0 {
+ t := deferredTypeStack[len(deferredTypeStack)-1]
+ deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1]
+ t.SetDeferwidth(false)
+ dowidth(t)
+ }
+ }
+
+ defercalc--
+}
diff --git a/src/cmd/compile/internal/gc/bench_test.go b/src/cmd/compile/internal/gc/bench_test.go
new file mode 100644
index 0000000..8c42881
--- /dev/null
+++ b/src/cmd/compile/internal/gc/bench_test.go
@@ -0,0 +1,64 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "testing"
+
+var globl int64
+var globl32 int32
+
+func BenchmarkLoadAdd(b *testing.B) {
+ x := make([]int64, 1024)
+ y := make([]int64, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int64
+ for i := range x {
+ s ^= x[i] + y[i]
+ }
+ globl = s
+ }
+}
+
+// Added for ppc64 extswsli on power9
+func BenchmarkExtShift(b *testing.B) {
+ x := make([]int32, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int64
+ for i := range x {
+ s ^= int64(x[i]+32) * 8
+ }
+ globl = s
+ }
+}
+
+func BenchmarkModify(b *testing.B) {
+ a := make([]int64, 1024)
+ v := globl
+ for i := 0; i < b.N; i++ {
+ for j := range a {
+ a[j] += v
+ }
+ }
+}
+
+func BenchmarkMullImm(b *testing.B) {
+ x := make([]int32, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int32
+ for i := range x {
+ s += x[i] * 100
+ }
+ globl32 = s
+ }
+}
+
+func BenchmarkConstModify(b *testing.B) {
+ a := make([]int64, 1024)
+ for i := 0; i < b.N; i++ {
+ for j := range a {
+ a[j] += 3
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go
new file mode 100644
index 0000000..10f21f8
--- /dev/null
+++ b/src/cmd/compile/internal/gc/bexport.go
@@ -0,0 +1,177 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+)
+
+type exporter struct {
+ marked map[*types.Type]bool // types already seen by markType
+}
+
+// markType recursively visits types reachable from t to identify
+// functions whose inline bodies may be needed.
+func (p *exporter) markType(t *types.Type) {
+ if p.marked[t] {
+ return
+ }
+ p.marked[t] = true
+
+ // If this is a named type, mark all of its associated
+ // methods. Skip interface types because t.Methods contains
+ // only their unexpanded method set (i.e., exclusive of
+ // interface embeddings), and the switch statement below
+ // handles their full method set.
+ if t.Sym != nil && t.Etype != TINTER {
+ for _, m := range t.Methods().Slice() {
+ if types.IsExported(m.Sym.Name) {
+ p.markType(m.Type)
+ }
+ }
+ }
+
+ // Recursively mark any types that can be produced given a
+ // value of type t: dereferencing a pointer; indexing or
+ // iterating over an array, slice, or map; receiving from a
+ // channel; accessing a struct field or interface method; or
+ // calling a function.
+ //
+ // Notably, we don't mark function parameter types, because
+ // the user already needs some way to construct values of
+ // those types.
+ switch t.Etype {
+ case TPTR, TARRAY, TSLICE:
+ p.markType(t.Elem())
+
+ case TCHAN:
+ if t.ChanDir().CanRecv() {
+ p.markType(t.Elem())
+ }
+
+ case TMAP:
+ p.markType(t.Key())
+ p.markType(t.Elem())
+
+ case TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
+ p.markType(f.Type)
+ }
+ }
+
+ case TFUNC:
+ // If t is the type of a function or method, then
+ // t.Nname() is its ONAME. Mark its inline body and
+ // any recursively called functions for export.
+ inlFlood(asNode(t.Nname()))
+
+ for _, f := range t.Results().FieldSlice() {
+ p.markType(f.Type)
+ }
+
+ case TINTER:
+ for _, f := range t.FieldSlice() {
+ if types.IsExported(f.Sym.Name) {
+ p.markType(f.Type)
+ }
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Export format
+
+// Tags. Must be < 0.
+const (
+ // Objects
+ packageTag = -(iota + 1)
+ constTag
+ typeTag
+ varTag
+ funcTag
+ endTag
+
+ // Types
+ namedTag
+ arrayTag
+ sliceTag
+ dddTag
+ structTag
+ pointerTag
+ signatureTag
+ interfaceTag
+ mapTag
+ chanTag
+
+ // Values
+ falseTag
+ trueTag
+ int64Tag
+ floatTag
+ fractionTag // not used by gc
+ complexTag
+ stringTag
+ nilTag
+ unknownTag // not used by gc (only appears in packages with errors)
+
+ // Type aliases
+ aliasTag
+)
+
+var predecl []*types.Type // initialized lazily
+
+func predeclared() []*types.Type {
+ if predecl == nil {
+ // initialize lazily to be sure that all
+ // elements have been initialized before
+ predecl = []*types.Type{
+ // basic types
+ types.Types[TBOOL],
+ types.Types[TINT],
+ types.Types[TINT8],
+ types.Types[TINT16],
+ types.Types[TINT32],
+ types.Types[TINT64],
+ types.Types[TUINT],
+ types.Types[TUINT8],
+ types.Types[TUINT16],
+ types.Types[TUINT32],
+ types.Types[TUINT64],
+ types.Types[TUINTPTR],
+ types.Types[TFLOAT32],
+ types.Types[TFLOAT64],
+ types.Types[TCOMPLEX64],
+ types.Types[TCOMPLEX128],
+ types.Types[TSTRING],
+
+ // basic type aliases
+ types.Bytetype,
+ types.Runetype,
+
+ // error
+ types.Errortype,
+
+ // untyped types
+ types.UntypedBool,
+ types.UntypedInt,
+ types.UntypedRune,
+ types.UntypedFloat,
+ types.UntypedComplex,
+ types.UntypedString,
+ types.Types[TNIL],
+
+ // package unsafe
+ types.Types[TUNSAFEPTR],
+
+ // invalid type (package contains errors)
+ types.Types[Txxx],
+
+ // any type, for builtin export data
+ types.Types[TANY],
+ }
+ }
+ return predecl
+}
diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go
new file mode 100644
index 0000000..911ac4c
--- /dev/null
+++ b/src/cmd/compile/internal/gc/bimport.go
@@ -0,0 +1,24 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/src"
+)
+
+// numImport tracks how often a package with a given name is imported.
+// It is used to provide a better error message (by using the package
+// path to disambiguate) if a package that appears multiple times with
+// the same name appears in an error message.
+var numImport = make(map[string]int)
+
+func npos(pos src.XPos, n *Node) *Node {
+ n.Pos = pos
+ return n
+}
+
+func builtinCall(op Op) *Node {
+ return nod(OCALL, mkname(builtinpkg.Lookup(goopnames[op])), nil)
+}
diff --git a/src/cmd/compile/internal/gc/bitset.go b/src/cmd/compile/internal/gc/bitset.go
new file mode 100644
index 0000000..ed5eea0
--- /dev/null
+++ b/src/cmd/compile/internal/gc/bitset.go
@@ -0,0 +1,59 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+type bitset8 uint8
+
+func (f *bitset8) set(mask uint8, b bool) {
+ if b {
+ *(*uint8)(f) |= mask
+ } else {
+ *(*uint8)(f) &^= mask
+ }
+}
+
+type bitset16 uint16
+
+func (f *bitset16) set(mask uint16, b bool) {
+ if b {
+ *(*uint16)(f) |= mask
+ } else {
+ *(*uint16)(f) &^= mask
+ }
+}
+
+type bitset32 uint32
+
+func (f *bitset32) set(mask uint32, b bool) {
+ if b {
+ *(*uint32)(f) |= mask
+ } else {
+ *(*uint32)(f) &^= mask
+ }
+}
+
+func (f bitset32) get2(shift uint8) uint8 {
+ return uint8(f>>shift) & 3
+}
+
+// set2 sets two bits in f using the bottom two bits of b.
+func (f *bitset32) set2(shift uint8, b uint8) {
+ // Clear old bits.
+ *(*uint32)(f) &^= 3 << shift
+ // Set new bits.
+ *(*uint32)(f) |= uint32(b&3) << shift
+}
+
+func (f bitset32) get3(shift uint8) uint8 {
+ return uint8(f>>shift) & 7
+}
+
+// set3 sets three bits in f using the bottom three bits of b.
+func (f *bitset32) set3(shift uint8, b uint8) {
+ // Clear old bits.
+ *(*uint32)(f) &^= 7 << shift
+ // Set new bits.
+ *(*uint32)(f) |= uint32(b&7) << shift
+}
diff --git a/src/cmd/compile/internal/gc/bootstrap.go b/src/cmd/compile/internal/gc/bootstrap.go
new file mode 100644
index 0000000..967f75a
--- /dev/null
+++ b/src/cmd/compile/internal/gc/bootstrap.go
@@ -0,0 +1,13 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package gc
+
+import "runtime"
+
+func startMutexProfiling() {
+ Fatalf("mutex profiling unavailable in version %v", runtime.Version())
+}
diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go
new file mode 100644
index 0000000..e04f23e
--- /dev/null
+++ b/src/cmd/compile/internal/gc/builtin.go
@@ -0,0 +1,340 @@
+// Code generated by mkbuiltin.go. DO NOT EDIT.
+
+package gc
+
+import "cmd/compile/internal/types"
+
+var runtimeDecls = [...]struct {
+ name string
+ tag int
+ typ int
+}{
+ {"newobject", funcTag, 4},
+ {"mallocgc", funcTag, 8},
+ {"panicdivide", funcTag, 9},
+ {"panicshift", funcTag, 9},
+ {"panicmakeslicelen", funcTag, 9},
+ {"panicmakeslicecap", funcTag, 9},
+ {"throwinit", funcTag, 9},
+ {"panicwrap", funcTag, 9},
+ {"gopanic", funcTag, 11},
+ {"gorecover", funcTag, 14},
+ {"goschedguarded", funcTag, 9},
+ {"goPanicIndex", funcTag, 16},
+ {"goPanicIndexU", funcTag, 18},
+ {"goPanicSliceAlen", funcTag, 16},
+ {"goPanicSliceAlenU", funcTag, 18},
+ {"goPanicSliceAcap", funcTag, 16},
+ {"goPanicSliceAcapU", funcTag, 18},
+ {"goPanicSliceB", funcTag, 16},
+ {"goPanicSliceBU", funcTag, 18},
+ {"goPanicSlice3Alen", funcTag, 16},
+ {"goPanicSlice3AlenU", funcTag, 18},
+ {"goPanicSlice3Acap", funcTag, 16},
+ {"goPanicSlice3AcapU", funcTag, 18},
+ {"goPanicSlice3B", funcTag, 16},
+ {"goPanicSlice3BU", funcTag, 18},
+ {"goPanicSlice3C", funcTag, 16},
+ {"goPanicSlice3CU", funcTag, 18},
+ {"printbool", funcTag, 19},
+ {"printfloat", funcTag, 21},
+ {"printint", funcTag, 23},
+ {"printhex", funcTag, 25},
+ {"printuint", funcTag, 25},
+ {"printcomplex", funcTag, 27},
+ {"printstring", funcTag, 29},
+ {"printpointer", funcTag, 30},
+ {"printuintptr", funcTag, 31},
+ {"printiface", funcTag, 30},
+ {"printeface", funcTag, 30},
+ {"printslice", funcTag, 30},
+ {"printnl", funcTag, 9},
+ {"printsp", funcTag, 9},
+ {"printlock", funcTag, 9},
+ {"printunlock", funcTag, 9},
+ {"concatstring2", funcTag, 34},
+ {"concatstring3", funcTag, 35},
+ {"concatstring4", funcTag, 36},
+ {"concatstring5", funcTag, 37},
+ {"concatstrings", funcTag, 39},
+ {"cmpstring", funcTag, 40},
+ {"intstring", funcTag, 43},
+ {"slicebytetostring", funcTag, 44},
+ {"slicebytetostringtmp", funcTag, 45},
+ {"slicerunetostring", funcTag, 48},
+ {"stringtoslicebyte", funcTag, 50},
+ {"stringtoslicerune", funcTag, 53},
+ {"slicecopy", funcTag, 54},
+ {"decoderune", funcTag, 55},
+ {"countrunes", funcTag, 56},
+ {"convI2I", funcTag, 57},
+ {"convT16", funcTag, 58},
+ {"convT32", funcTag, 58},
+ {"convT64", funcTag, 58},
+ {"convTstring", funcTag, 58},
+ {"convTslice", funcTag, 58},
+ {"convT2E", funcTag, 59},
+ {"convT2Enoptr", funcTag, 59},
+ {"convT2I", funcTag, 59},
+ {"convT2Inoptr", funcTag, 59},
+ {"assertE2I", funcTag, 57},
+ {"assertE2I2", funcTag, 60},
+ {"assertI2I", funcTag, 57},
+ {"assertI2I2", funcTag, 60},
+ {"panicdottypeE", funcTag, 61},
+ {"panicdottypeI", funcTag, 61},
+ {"panicnildottype", funcTag, 62},
+ {"ifaceeq", funcTag, 64},
+ {"efaceeq", funcTag, 64},
+ {"fastrand", funcTag, 66},
+ {"makemap64", funcTag, 68},
+ {"makemap", funcTag, 69},
+ {"makemap_small", funcTag, 70},
+ {"mapaccess1", funcTag, 71},
+ {"mapaccess1_fast32", funcTag, 72},
+ {"mapaccess1_fast64", funcTag, 72},
+ {"mapaccess1_faststr", funcTag, 72},
+ {"mapaccess1_fat", funcTag, 73},
+ {"mapaccess2", funcTag, 74},
+ {"mapaccess2_fast32", funcTag, 75},
+ {"mapaccess2_fast64", funcTag, 75},
+ {"mapaccess2_faststr", funcTag, 75},
+ {"mapaccess2_fat", funcTag, 76},
+ {"mapassign", funcTag, 71},
+ {"mapassign_fast32", funcTag, 72},
+ {"mapassign_fast32ptr", funcTag, 72},
+ {"mapassign_fast64", funcTag, 72},
+ {"mapassign_fast64ptr", funcTag, 72},
+ {"mapassign_faststr", funcTag, 72},
+ {"mapiterinit", funcTag, 77},
+ {"mapdelete", funcTag, 77},
+ {"mapdelete_fast32", funcTag, 78},
+ {"mapdelete_fast64", funcTag, 78},
+ {"mapdelete_faststr", funcTag, 78},
+ {"mapiternext", funcTag, 79},
+ {"mapclear", funcTag, 80},
+ {"makechan64", funcTag, 82},
+ {"makechan", funcTag, 83},
+ {"chanrecv1", funcTag, 85},
+ {"chanrecv2", funcTag, 86},
+ {"chansend1", funcTag, 88},
+ {"closechan", funcTag, 30},
+ {"writeBarrier", varTag, 90},
+ {"typedmemmove", funcTag, 91},
+ {"typedmemclr", funcTag, 92},
+ {"typedslicecopy", funcTag, 93},
+ {"selectnbsend", funcTag, 94},
+ {"selectnbrecv", funcTag, 95},
+ {"selectnbrecv2", funcTag, 97},
+ {"selectsetpc", funcTag, 98},
+ {"selectgo", funcTag, 99},
+ {"block", funcTag, 9},
+ {"makeslice", funcTag, 100},
+ {"makeslice64", funcTag, 101},
+ {"makeslicecopy", funcTag, 102},
+ {"growslice", funcTag, 104},
+ {"memmove", funcTag, 105},
+ {"memclrNoHeapPointers", funcTag, 106},
+ {"memclrHasPointers", funcTag, 106},
+ {"memequal", funcTag, 107},
+ {"memequal0", funcTag, 108},
+ {"memequal8", funcTag, 108},
+ {"memequal16", funcTag, 108},
+ {"memequal32", funcTag, 108},
+ {"memequal64", funcTag, 108},
+ {"memequal128", funcTag, 108},
+ {"f32equal", funcTag, 109},
+ {"f64equal", funcTag, 109},
+ {"c64equal", funcTag, 109},
+ {"c128equal", funcTag, 109},
+ {"strequal", funcTag, 109},
+ {"interequal", funcTag, 109},
+ {"nilinterequal", funcTag, 109},
+ {"memhash", funcTag, 110},
+ {"memhash0", funcTag, 111},
+ {"memhash8", funcTag, 111},
+ {"memhash16", funcTag, 111},
+ {"memhash32", funcTag, 111},
+ {"memhash64", funcTag, 111},
+ {"memhash128", funcTag, 111},
+ {"f32hash", funcTag, 111},
+ {"f64hash", funcTag, 111},
+ {"c64hash", funcTag, 111},
+ {"c128hash", funcTag, 111},
+ {"strhash", funcTag, 111},
+ {"interhash", funcTag, 111},
+ {"nilinterhash", funcTag, 111},
+ {"int64div", funcTag, 112},
+ {"uint64div", funcTag, 113},
+ {"int64mod", funcTag, 112},
+ {"uint64mod", funcTag, 113},
+ {"float64toint64", funcTag, 114},
+ {"float64touint64", funcTag, 115},
+ {"float64touint32", funcTag, 116},
+ {"int64tofloat64", funcTag, 117},
+ {"uint64tofloat64", funcTag, 118},
+ {"uint32tofloat64", funcTag, 119},
+ {"complex128div", funcTag, 120},
+ {"racefuncenter", funcTag, 31},
+ {"racefuncenterfp", funcTag, 9},
+ {"racefuncexit", funcTag, 9},
+ {"raceread", funcTag, 31},
+ {"racewrite", funcTag, 31},
+ {"racereadrange", funcTag, 121},
+ {"racewriterange", funcTag, 121},
+ {"msanread", funcTag, 121},
+ {"msanwrite", funcTag, 121},
+ {"msanmove", funcTag, 122},
+ {"checkptrAlignment", funcTag, 123},
+ {"checkptrArithmetic", funcTag, 125},
+ {"libfuzzerTraceCmp1", funcTag, 127},
+ {"libfuzzerTraceCmp2", funcTag, 129},
+ {"libfuzzerTraceCmp4", funcTag, 130},
+ {"libfuzzerTraceCmp8", funcTag, 131},
+ {"libfuzzerTraceConstCmp1", funcTag, 127},
+ {"libfuzzerTraceConstCmp2", funcTag, 129},
+ {"libfuzzerTraceConstCmp4", funcTag, 130},
+ {"libfuzzerTraceConstCmp8", funcTag, 131},
+ {"x86HasPOPCNT", varTag, 6},
+ {"x86HasSSE41", varTag, 6},
+ {"x86HasFMA", varTag, 6},
+ {"armHasVFPv4", varTag, 6},
+ {"arm64HasATOMICS", varTag, 6},
+}
+
+func runtimeTypes() []*types.Type {
+ var typs [132]*types.Type
+ typs[0] = types.Bytetype
+ typs[1] = types.NewPtr(typs[0])
+ typs[2] = types.Types[TANY]
+ typs[3] = types.NewPtr(typs[2])
+ typs[4] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[3])})
+ typs[5] = types.Types[TUINTPTR]
+ typs[6] = types.Types[TBOOL]
+ typs[7] = types.Types[TUNSAFEPTR]
+ typs[8] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*Node{anonfield(typs[7])})
+ typs[9] = functype(nil, nil, nil)
+ typs[10] = types.Types[TINTER]
+ typs[11] = functype(nil, []*Node{anonfield(typs[10])}, nil)
+ typs[12] = types.Types[TINT32]
+ typs[13] = types.NewPtr(typs[12])
+ typs[14] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[10])})
+ typs[15] = types.Types[TINT]
+ typs[16] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
+ typs[17] = types.Types[TUINT]
+ typs[18] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
+ typs[19] = functype(nil, []*Node{anonfield(typs[6])}, nil)
+ typs[20] = types.Types[TFLOAT64]
+ typs[21] = functype(nil, []*Node{anonfield(typs[20])}, nil)
+ typs[22] = types.Types[TINT64]
+ typs[23] = functype(nil, []*Node{anonfield(typs[22])}, nil)
+ typs[24] = types.Types[TUINT64]
+ typs[25] = functype(nil, []*Node{anonfield(typs[24])}, nil)
+ typs[26] = types.Types[TCOMPLEX128]
+ typs[27] = functype(nil, []*Node{anonfield(typs[26])}, nil)
+ typs[28] = types.Types[TSTRING]
+ typs[29] = functype(nil, []*Node{anonfield(typs[28])}, nil)
+ typs[30] = functype(nil, []*Node{anonfield(typs[2])}, nil)
+ typs[31] = functype(nil, []*Node{anonfield(typs[5])}, nil)
+ typs[32] = types.NewArray(typs[0], 32)
+ typs[33] = types.NewPtr(typs[32])
+ typs[34] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[35] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[36] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[37] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[28])})
+ typs[38] = types.NewSlice(typs[28])
+ typs[39] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[38])}, []*Node{anonfield(typs[28])})
+ typs[40] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
+ typs[41] = types.NewArray(typs[0], 4)
+ typs[42] = types.NewPtr(typs[41])
+ typs[43] = functype(nil, []*Node{anonfield(typs[42]), anonfield(typs[22])}, []*Node{anonfield(typs[28])})
+ typs[44] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
+ typs[45] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[28])})
+ typs[46] = types.Runetype
+ typs[47] = types.NewSlice(typs[46])
+ typs[48] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[47])}, []*Node{anonfield(typs[28])})
+ typs[49] = types.NewSlice(typs[0])
+ typs[50] = functype(nil, []*Node{anonfield(typs[33]), anonfield(typs[28])}, []*Node{anonfield(typs[49])})
+ typs[51] = types.NewArray(typs[46], 32)
+ typs[52] = types.NewPtr(typs[51])
+ typs[53] = functype(nil, []*Node{anonfield(typs[52]), anonfield(typs[28])}, []*Node{anonfield(typs[47])})
+ typs[54] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
+ typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[46]), anonfield(typs[15])})
+ typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
+ typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
+ typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
+ typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
+ typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
+ typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
+ typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
+ typs[63] = types.NewPtr(typs[5])
+ typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
+ typs[65] = types.Types[TUINT32]
+ typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
+ typs[67] = types.NewMap(typs[2], typs[2])
+ typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
+ typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
+ typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
+ typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
+ typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
+ typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
+ typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
+ typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
+ typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
+ typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
+ typs[81] = types.NewChan(typs[2], types.Cboth)
+ typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
+ typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
+ typs[84] = types.NewChan(typs[2], types.Crecv)
+ typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
+ typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
+ typs[87] = types.NewChan(typs[2], types.Csend)
+ typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
+ typs[89] = types.NewArray(typs[0], 3)
+ typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
+ typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
+ typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
+ typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
+ typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
+ typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
+ typs[96] = types.NewPtr(typs[6])
+ typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
+ typs[98] = functype(nil, []*Node{anonfield(typs[63])}, nil)
+ typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
+ typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
+ typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
+ typs[102] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
+ typs[103] = types.NewSlice(typs[2])
+ typs[104] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*Node{anonfield(typs[103])})
+ typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
+ typs[106] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
+ typs[107] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*Node{anonfield(typs[6])})
+ typs[108] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
+ typs[109] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
+ typs[110] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
+ typs[111] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[5])}, []*Node{anonfield(typs[5])})
+ typs[112] = functype(nil, []*Node{anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[22])})
+ typs[113] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
+ typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
+ typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
+ typs[116] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
+ typs[117] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
+ typs[118] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
+ typs[119] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
+ typs[120] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
+ typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
+ typs[122] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5]), anonfield(typs[5])}, nil)
+ typs[123] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
+ typs[124] = types.NewSlice(typs[7])
+ typs[125] = functype(nil, []*Node{anonfield(typs[7]), anonfield(typs[124])}, nil)
+ typs[126] = types.Types[TUINT8]
+ typs[127] = functype(nil, []*Node{anonfield(typs[126]), anonfield(typs[126])}, nil)
+ typs[128] = types.Types[TUINT16]
+ typs[129] = functype(nil, []*Node{anonfield(typs[128]), anonfield(typs[128])}, nil)
+ typs[130] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
+ typs[131] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
+ return typs[:]
+}
diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go
new file mode 100644
index 0000000..acb69c7
--- /dev/null
+++ b/src/cmd/compile/internal/gc/builtin/runtime.go
@@ -0,0 +1,259 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// NOTE: If you change this file you must run "go generate"
+// to update builtin.go. This is not done automatically
+// to avoid depending on having a working compiler binary.
+
+// +build ignore
+
+package runtime
+
+// emitted by compiler, not referred to by go programs
+
+import "unsafe"
+
+func newobject(typ *byte) *any
+func mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
+func panicdivide()
+func panicshift()
+func panicmakeslicelen()
+func panicmakeslicecap()
+func throwinit()
+func panicwrap()
+
+func gopanic(interface{})
+func gorecover(*int32) interface{}
+func goschedguarded()
+
+// Note: these declarations are just for wasm port.
+// Other ports call assembly stubs instead.
+func goPanicIndex(x int, y int)
+func goPanicIndexU(x uint, y int)
+func goPanicSliceAlen(x int, y int)
+func goPanicSliceAlenU(x uint, y int)
+func goPanicSliceAcap(x int, y int)
+func goPanicSliceAcapU(x uint, y int)
+func goPanicSliceB(x int, y int)
+func goPanicSliceBU(x uint, y int)
+func goPanicSlice3Alen(x int, y int)
+func goPanicSlice3AlenU(x uint, y int)
+func goPanicSlice3Acap(x int, y int)
+func goPanicSlice3AcapU(x uint, y int)
+func goPanicSlice3B(x int, y int)
+func goPanicSlice3BU(x uint, y int)
+func goPanicSlice3C(x int, y int)
+func goPanicSlice3CU(x uint, y int)
+
+func printbool(bool)
+func printfloat(float64)
+func printint(int64)
+func printhex(uint64)
+func printuint(uint64)
+func printcomplex(complex128)
+func printstring(string)
+func printpointer(any)
+func printuintptr(uintptr)
+func printiface(any)
+func printeface(any)
+func printslice(any)
+func printnl()
+func printsp()
+func printlock()
+func printunlock()
+
+func concatstring2(*[32]byte, string, string) string
+func concatstring3(*[32]byte, string, string, string) string
+func concatstring4(*[32]byte, string, string, string, string) string
+func concatstring5(*[32]byte, string, string, string, string, string) string
+func concatstrings(*[32]byte, []string) string
+
+func cmpstring(string, string) int
+func intstring(*[4]byte, int64) string
+func slicebytetostring(buf *[32]byte, ptr *byte, n int) string
+func slicebytetostringtmp(ptr *byte, n int) string
+func slicerunetostring(*[32]byte, []rune) string
+func stringtoslicebyte(*[32]byte, string) []byte
+func stringtoslicerune(*[32]rune, string) []rune
+func slicecopy(toPtr *any, toLen int, fromPtr *any, fromLen int, wid uintptr) int
+
+func decoderune(string, int) (retv rune, retk int)
+func countrunes(string) int
+
+// Non-empty-interface to non-empty-interface conversion.
+func convI2I(typ *byte, elem any) (ret any)
+
+// Specialized type-to-interface conversion.
+// These return only a data pointer.
+func convT16(val any) unsafe.Pointer // val must be uint16-like (same size and alignment as a uint16)
+func convT32(val any) unsafe.Pointer // val must be uint32-like (same size and alignment as a uint32)
+func convT64(val any) unsafe.Pointer // val must be uint64-like (same size and alignment as a uint64 and contains no pointers)
+func convTstring(val any) unsafe.Pointer // val must be a string
+func convTslice(val any) unsafe.Pointer // val must be a slice
+
+// Type to empty-interface conversion.
+func convT2E(typ *byte, elem *any) (ret any)
+func convT2Enoptr(typ *byte, elem *any) (ret any)
+
+// Type to non-empty-interface conversion.
+func convT2I(tab *byte, elem *any) (ret any)
+func convT2Inoptr(tab *byte, elem *any) (ret any)
+
+// interface type assertions x.(T)
+func assertE2I(typ *byte, iface any) (ret any)
+func assertE2I2(typ *byte, iface any) (ret any, b bool)
+func assertI2I(typ *byte, iface any) (ret any)
+func assertI2I2(typ *byte, iface any) (ret any, b bool)
+func panicdottypeE(have, want, iface *byte)
+func panicdottypeI(have, want, iface *byte)
+func panicnildottype(want *byte)
+
+// interface equality. Type/itab pointers are already known to be equal, so
+// we only need to pass one.
+func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
+func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
+
+func fastrand() uint32
+
+// *byte is really *runtime.Type
+func makemap64(mapType *byte, hint int64, mapbuf *any) (hmap map[any]any)
+func makemap(mapType *byte, hint int, mapbuf *any) (hmap map[any]any)
+func makemap_small() (hmap map[any]any)
+func mapaccess1(mapType *byte, hmap map[any]any, key *any) (val *any)
+func mapaccess1_fast32(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapaccess1_fast64(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapaccess1_faststr(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapaccess1_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any)
+func mapaccess2(mapType *byte, hmap map[any]any, key *any) (val *any, pres bool)
+func mapaccess2_fast32(mapType *byte, hmap map[any]any, key any) (val *any, pres bool)
+func mapaccess2_fast64(mapType *byte, hmap map[any]any, key any) (val *any, pres bool)
+func mapaccess2_faststr(mapType *byte, hmap map[any]any, key any) (val *any, pres bool)
+func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool)
+func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any)
+func mapassign_fast32(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapassign_fast64(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapassign_faststr(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapiterinit(mapType *byte, hmap map[any]any, hiter *any)
+func mapdelete(mapType *byte, hmap map[any]any, key *any)
+func mapdelete_fast32(mapType *byte, hmap map[any]any, key any)
+func mapdelete_fast64(mapType *byte, hmap map[any]any, key any)
+func mapdelete_faststr(mapType *byte, hmap map[any]any, key any)
+func mapiternext(hiter *any)
+func mapclear(mapType *byte, hmap map[any]any)
+
+// *byte is really *runtime.Type
+func makechan64(chanType *byte, size int64) (hchan chan any)
+func makechan(chanType *byte, size int) (hchan chan any)
+func chanrecv1(hchan <-chan any, elem *any)
+func chanrecv2(hchan <-chan any, elem *any) bool
+func chansend1(hchan chan<- any, elem *any)
+func closechan(hchan any)
+
+var writeBarrier struct {
+ enabled bool
+ pad [3]byte
+ needed bool
+ cgo bool
+ alignme uint64
+}
+
+// *byte is really *runtime.Type
+func typedmemmove(typ *byte, dst *any, src *any)
+func typedmemclr(typ *byte, dst *any)
+func typedslicecopy(typ *byte, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
+
+func selectnbsend(hchan chan<- any, elem *any) bool
+func selectnbrecv(elem *any, hchan <-chan any) bool
+func selectnbrecv2(elem *any, received *bool, hchan <-chan any) bool
+
+func selectsetpc(pc *uintptr)
+func selectgo(cas0 *byte, order0 *byte, pc0 *uintptr, nsends int, nrecvs int, block bool) (int, bool)
+func block()
+
+func makeslice(typ *byte, len int, cap int) unsafe.Pointer
+func makeslice64(typ *byte, len int64, cap int64) unsafe.Pointer
+func makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
+func growslice(typ *byte, old []any, cap int) (ary []any)
+func memmove(to *any, frm *any, length uintptr)
+func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
+func memclrHasPointers(ptr unsafe.Pointer, n uintptr)
+
+func memequal(x, y *any, size uintptr) bool
+func memequal0(x, y *any) bool
+func memequal8(x, y *any) bool
+func memequal16(x, y *any) bool
+func memequal32(x, y *any) bool
+func memequal64(x, y *any) bool
+func memequal128(x, y *any) bool
+func f32equal(p, q unsafe.Pointer) bool
+func f64equal(p, q unsafe.Pointer) bool
+func c64equal(p, q unsafe.Pointer) bool
+func c128equal(p, q unsafe.Pointer) bool
+func strequal(p, q unsafe.Pointer) bool
+func interequal(p, q unsafe.Pointer) bool
+func nilinterequal(p, q unsafe.Pointer) bool
+
+func memhash(p unsafe.Pointer, h uintptr, size uintptr) uintptr
+func memhash0(p unsafe.Pointer, h uintptr) uintptr
+func memhash8(p unsafe.Pointer, h uintptr) uintptr
+func memhash16(p unsafe.Pointer, h uintptr) uintptr
+func memhash32(p unsafe.Pointer, h uintptr) uintptr
+func memhash64(p unsafe.Pointer, h uintptr) uintptr
+func memhash128(p unsafe.Pointer, h uintptr) uintptr
+func f32hash(p unsafe.Pointer, h uintptr) uintptr
+func f64hash(p unsafe.Pointer, h uintptr) uintptr
+func c64hash(p unsafe.Pointer, h uintptr) uintptr
+func c128hash(p unsafe.Pointer, h uintptr) uintptr
+func strhash(a unsafe.Pointer, h uintptr) uintptr
+func interhash(p unsafe.Pointer, h uintptr) uintptr
+func nilinterhash(p unsafe.Pointer, h uintptr) uintptr
+
+// only used on 32-bit
+func int64div(int64, int64) int64
+func uint64div(uint64, uint64) uint64
+func int64mod(int64, int64) int64
+func uint64mod(uint64, uint64) uint64
+func float64toint64(float64) int64
+func float64touint64(float64) uint64
+func float64touint32(float64) uint32
+func int64tofloat64(int64) float64
+func uint64tofloat64(uint64) float64
+func uint32tofloat64(uint32) float64
+
+func complex128div(num complex128, den complex128) (quo complex128)
+
+// race detection
+func racefuncenter(uintptr)
+func racefuncenterfp()
+func racefuncexit()
+func raceread(uintptr)
+func racewrite(uintptr)
+func racereadrange(addr, size uintptr)
+func racewriterange(addr, size uintptr)
+
+// memory sanitizer
+func msanread(addr, size uintptr)
+func msanwrite(addr, size uintptr)
+func msanmove(dst, src, size uintptr)
+
+func checkptrAlignment(unsafe.Pointer, *byte, uintptr)
+func checkptrArithmetic(unsafe.Pointer, []unsafe.Pointer)
+
+func libfuzzerTraceCmp1(uint8, uint8)
+func libfuzzerTraceCmp2(uint16, uint16)
+func libfuzzerTraceCmp4(uint32, uint32)
+func libfuzzerTraceCmp8(uint64, uint64)
+func libfuzzerTraceConstCmp1(uint8, uint8)
+func libfuzzerTraceConstCmp2(uint16, uint16)
+func libfuzzerTraceConstCmp4(uint32, uint32)
+func libfuzzerTraceConstCmp8(uint64, uint64)
+
+// architecture variants
+var x86HasPOPCNT bool
+var x86HasSSE41 bool
+var x86HasFMA bool
+var armHasVFPv4 bool
+var arm64HasATOMICS bool
diff --git a/src/cmd/compile/internal/gc/builtin_test.go b/src/cmd/compile/internal/gc/builtin_test.go
new file mode 100644
index 0000000..57f24b2
--- /dev/null
+++ b/src/cmd/compile/internal/gc/builtin_test.go
@@ -0,0 +1,32 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc_test
+
+import (
+ "bytes"
+ "internal/testenv"
+ "io/ioutil"
+ "os/exec"
+ "testing"
+)
+
+func TestBuiltin(t *testing.T) {
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ old, err := ioutil.ReadFile("builtin.go")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ new, err := exec.Command(testenv.GoToolPath(t), "run", "mkbuiltin.go", "-stdout").Output()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(old, new) {
+ t.Fatal("builtin.go out of date; run mkbuiltin.go")
+ }
+}
diff --git a/src/cmd/compile/internal/gc/bv.go b/src/cmd/compile/internal/gc/bv.go
new file mode 100644
index 0000000..e32ab97
--- /dev/null
+++ b/src/cmd/compile/internal/gc/bv.go
@@ -0,0 +1,278 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "math/bits"
+)
+
+const (
+ wordBits = 32
+ wordMask = wordBits - 1
+ wordShift = 5
+)
+
+// A bvec is a bit vector.
+type bvec struct {
+ n int32 // number of bits in vector
+ b []uint32 // words holding bits
+}
+
+func bvalloc(n int32) bvec {
+ nword := (n + wordBits - 1) / wordBits
+ return bvec{n, make([]uint32, nword)}
+}
+
+type bulkBvec struct {
+ words []uint32
+ nbit int32
+ nword int32
+}
+
+func bvbulkalloc(nbit int32, count int32) bulkBvec {
+ nword := (nbit + wordBits - 1) / wordBits
+ size := int64(nword) * int64(count)
+ if int64(int32(size*4)) != size*4 {
+ Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
+ }
+ return bulkBvec{
+ words: make([]uint32, size),
+ nbit: nbit,
+ nword: nword,
+ }
+}
+
+func (b *bulkBvec) next() bvec {
+ out := bvec{b.nbit, b.words[:b.nword]}
+ b.words = b.words[b.nword:]
+ return out
+}
+
+func (bv1 bvec) Eq(bv2 bvec) bool {
+ if bv1.n != bv2.n {
+ Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
+ }
+ for i, x := range bv1.b {
+ if x != bv2.b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (dst bvec) Copy(src bvec) {
+ copy(dst.b, src.b)
+}
+
+func (bv bvec) Get(i int32) bool {
+ if i < 0 || i >= bv.n {
+ Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ return bv.b[i>>wordShift]&mask != 0
+}
+
+func (bv bvec) Set(i int32) {
+ if i < 0 || i >= bv.n {
+ Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ bv.b[i/wordBits] |= mask
+}
+
+func (bv bvec) Unset(i int32) {
+ if i < 0 || i >= bv.n {
+ Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ bv.b[i/wordBits] &^= mask
+}
+
+// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
+// If there is no such index, bvnext returns -1.
+func (bv bvec) Next(i int32) int32 {
+ if i >= bv.n {
+ return -1
+ }
+
+ // Jump i ahead to next word with bits.
+ if bv.b[i>>wordShift]>>uint(i&wordMask) == 0 {
+ i &^= wordMask
+ i += wordBits
+ for i < bv.n && bv.b[i>>wordShift] == 0 {
+ i += wordBits
+ }
+ }
+
+ if i >= bv.n {
+ return -1
+ }
+
+ // Find 1 bit.
+ w := bv.b[i>>wordShift] >> uint(i&wordMask)
+ i += int32(bits.TrailingZeros32(w))
+
+ return i
+}
+
+func (bv bvec) IsEmpty() bool {
+ for _, x := range bv.b {
+ if x != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func (bv bvec) Not() {
+ for i, x := range bv.b {
+ bv.b[i] = ^x
+ }
+}
+
+// union
+func (dst bvec) Or(src1, src2 bvec) {
+ if len(src1.b) == 0 {
+ return
+ }
+ _, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.b {
+ dst.b[i] = x | src2.b[i]
+ }
+}
+
+// intersection
+func (dst bvec) And(src1, src2 bvec) {
+ if len(src1.b) == 0 {
+ return
+ }
+ _, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.b {
+ dst.b[i] = x & src2.b[i]
+ }
+}
+
+// difference
+func (dst bvec) AndNot(src1, src2 bvec) {
+ if len(src1.b) == 0 {
+ return
+ }
+ _, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.b {
+ dst.b[i] = x &^ src2.b[i]
+ }
+}
+
+func (bv bvec) String() string {
+ s := make([]byte, 2+bv.n)
+ copy(s, "#*")
+ for i := int32(0); i < bv.n; i++ {
+ ch := byte('0')
+ if bv.Get(i) {
+ ch = '1'
+ }
+ s[2+i] = ch
+ }
+ return string(s)
+}
+
+func (bv bvec) Clear() {
+ for i := range bv.b {
+ bv.b[i] = 0
+ }
+}
+
+// FNV-1 hash function constants.
+const (
+ H0 = 2166136261
+ Hp = 16777619
+)
+
+func hashbitmap(h uint32, bv bvec) uint32 {
+ n := int((bv.n + 31) / 32)
+ for i := 0; i < n; i++ {
+ w := bv.b[i]
+ h = (h * Hp) ^ (w & 0xff)
+ h = (h * Hp) ^ ((w >> 8) & 0xff)
+ h = (h * Hp) ^ ((w >> 16) & 0xff)
+ h = (h * Hp) ^ ((w >> 24) & 0xff)
+ }
+
+ return h
+}
+
+// bvecSet is a set of bvecs, in initial insertion order.
+type bvecSet struct {
+ index []int // hash -> uniq index. -1 indicates empty slot.
+ uniq []bvec // unique bvecs, in insertion order
+}
+
+func (m *bvecSet) grow() {
+ // Allocate new index.
+ n := len(m.index) * 2
+ if n == 0 {
+ n = 32
+ }
+ newIndex := make([]int, n)
+ for i := range newIndex {
+ newIndex[i] = -1
+ }
+
+ // Rehash into newIndex.
+ for i, bv := range m.uniq {
+ h := hashbitmap(H0, bv) % uint32(len(newIndex))
+ for {
+ j := newIndex[h]
+ if j < 0 {
+ newIndex[h] = i
+ break
+ }
+ h++
+ if h == uint32(len(newIndex)) {
+ h = 0
+ }
+ }
+ }
+ m.index = newIndex
+}
+
+// add adds bv to the set and returns its index in m.extractUniqe.
+// The caller must not modify bv after this.
+func (m *bvecSet) add(bv bvec) int {
+ if len(m.uniq)*4 >= len(m.index) {
+ m.grow()
+ }
+
+ index := m.index
+ h := hashbitmap(H0, bv) % uint32(len(index))
+ for {
+ j := index[h]
+ if j < 0 {
+ // New bvec.
+ index[h] = len(m.uniq)
+ m.uniq = append(m.uniq, bv)
+ return len(m.uniq) - 1
+ }
+ jlive := m.uniq[j]
+ if bv.Eq(jlive) {
+ // Existing bvec.
+ return j
+ }
+
+ h++
+ if h == uint32(len(index)) {
+ h = 0
+ }
+ }
+}
+
+// extractUniqe returns this slice of unique bit vectors in m, as
+// indexed by the result of bvecSet.add.
+func (m *bvecSet) extractUniqe() []bvec {
+ return m.uniq
+}
diff --git a/src/cmd/compile/internal/gc/class_string.go b/src/cmd/compile/internal/gc/class_string.go
new file mode 100644
index 0000000..a4084a7
--- /dev/null
+++ b/src/cmd/compile/internal/gc/class_string.go
@@ -0,0 +1,29 @@
+// Code generated by "stringer -type=Class"; DO NOT EDIT.
+
+package gc
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Pxxx-0]
+ _ = x[PEXTERN-1]
+ _ = x[PAUTO-2]
+ _ = x[PAUTOHEAP-3]
+ _ = x[PPARAM-4]
+ _ = x[PPARAMOUT-5]
+ _ = x[PFUNC-6]
+}
+
+const _Class_name = "PxxxPEXTERNPAUTOPAUTOHEAPPPARAMPPARAMOUTPFUNC"
+
+var _Class_index = [...]uint8{0, 4, 11, 16, 25, 31, 40, 45}
+
+func (i Class) String() string {
+ if i >= Class(len(_Class_index)-1) {
+ return "Class(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Class_name[_Class_index[i]:_Class_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go
new file mode 100644
index 0000000..bd350f6
--- /dev/null
+++ b/src/cmd/compile/internal/gc/closure.go
@@ -0,0 +1,594 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+func (p *noder) funcLit(expr *syntax.FuncLit) *Node {
+ xtype := p.typeExpr(expr.Type)
+ ntype := p.typeExpr(expr.Type)
+
+ xfunc := p.nod(expr, ODCLFUNC, nil, nil)
+ xfunc.Func.SetIsHiddenClosure(Curfn != nil)
+ xfunc.Func.Nname = newfuncnamel(p.pos(expr), nblank.Sym) // filled in by typecheckclosure
+ xfunc.Func.Nname.Name.Param.Ntype = xtype
+ xfunc.Func.Nname.Name.Defn = xfunc
+
+ clo := p.nod(expr, OCLOSURE, nil, nil)
+ clo.Func.Ntype = ntype
+
+ xfunc.Func.Closure = clo
+ clo.Func.Closure = xfunc
+
+ p.funcBody(xfunc, expr.Body)
+
+ // closure-specific variables are hanging off the
+ // ordinary ones in the symbol table; see oldname.
+ // unhook them.
+ // make the list of pointers for the closure call.
+ for _, v := range xfunc.Func.Cvars.Slice() {
+ // Unlink from v1; see comment in syntax.go type Param for these fields.
+ v1 := v.Name.Defn
+ v1.Name.Param.Innermost = v.Name.Param.Outer
+
+ // If the closure usage of v is not dense,
+ // we need to make it dense; now that we're out
+ // of the function in which v appeared,
+ // look up v.Sym in the enclosing function
+ // and keep it around for use in the compiled code.
+ //
+ // That is, suppose we just finished parsing the innermost
+ // closure f4 in this code:
+ //
+ // func f() {
+ // v := 1
+ // func() { // f2
+ // use(v)
+ // func() { // f3
+ // func() { // f4
+ // use(v)
+ // }()
+ // }()
+ // }()
+ // }
+ //
+ // At this point v.Outer is f2's v; there is no f3's v.
+ // To construct the closure f4 from within f3,
+ // we need to use f3's v and in this case we need to create f3's v.
+ // We are now in the context of f3, so calling oldname(v.Sym)
+ // obtains f3's v, creating it if necessary (as it is in the example).
+ //
+ // capturevars will decide whether to use v directly or &v.
+ v.Name.Param.Outer = oldname(v.Sym)
+ }
+
+ return clo
+}
+
+// typecheckclosure typechecks an OCLOSURE node. It also creates the named
+// function associated with the closure.
+// TODO: This creation of the named function should probably really be done in a
+// separate pass from type-checking.
+func typecheckclosure(clo *Node, top int) {
+ xfunc := clo.Func.Closure
+ // Set current associated iota value, so iota can be used inside
+ // function in ConstSpec, see issue #22344
+ if x := getIotaValue(); x >= 0 {
+ xfunc.SetIota(x)
+ }
+
+ clo.Func.Ntype = typecheck(clo.Func.Ntype, ctxType)
+ clo.Type = clo.Func.Ntype.Type
+ clo.Func.Top = top
+
+ // Do not typecheck xfunc twice, otherwise, we will end up pushing
+ // xfunc to xtop multiple times, causing initLSym called twice.
+ // See #30709
+ if xfunc.Typecheck() == 1 {
+ return
+ }
+
+ for _, ln := range xfunc.Func.Cvars.Slice() {
+ n := ln.Name.Defn
+ if !n.Name.Captured() {
+ n.Name.SetCaptured(true)
+ if n.Name.Decldepth == 0 {
+ Fatalf("typecheckclosure: var %S does not have decldepth assigned", n)
+ }
+
+ // Ignore assignments to the variable in straightline code
+ // preceding the first capturing by a closure.
+ if n.Name.Decldepth == decldepth {
+ n.Name.SetAssigned(false)
+ }
+ }
+ }
+
+ xfunc.Func.Nname.Sym = closurename(Curfn)
+ setNodeNameFunc(xfunc.Func.Nname)
+ xfunc = typecheck(xfunc, ctxStmt)
+
+ // Type check the body now, but only if we're inside a function.
+ // At top level (in a variable initialization: curfn==nil) we're not
+ // ready to type check code yet; we'll check it later, because the
+ // underlying closure function we create is added to xtop.
+ if Curfn != nil && clo.Type != nil {
+ oldfn := Curfn
+ Curfn = xfunc
+ olddd := decldepth
+ decldepth = 1
+ typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
+ decldepth = olddd
+ Curfn = oldfn
+ }
+
+ xtop = append(xtop, xfunc)
+}
+
+// globClosgen is like Func.Closgen, but for the global scope.
+var globClosgen int
+
+// closurename generates a new unique name for a closure within
+// outerfunc.
+func closurename(outerfunc *Node) *types.Sym {
+ outer := "glob."
+ prefix := "func"
+ gen := &globClosgen
+
+ if outerfunc != nil {
+ if outerfunc.Func.Closure != nil {
+ prefix = ""
+ }
+
+ outer = outerfunc.funcname()
+
+ // There may be multiple functions named "_". In those
+ // cases, we can't use their individual Closgens as it
+ // would lead to name clashes.
+ if !outerfunc.Func.Nname.isBlank() {
+ gen = &outerfunc.Func.Closgen
+ }
+ }
+
+ *gen++
+ return lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
+}
+
+// capturevarscomplete is set to true when the capturevars phase is done.
+var capturevarscomplete bool
+
+// capturevars is called in a separate phase after all typechecking is done.
+// It decides whether each variable captured by a closure should be captured
+// by value or by reference.
+// We use value capturing for values <= 128 bytes that are never reassigned
+// after capturing (effectively constant).
+func capturevars(xfunc *Node) {
+ lno := lineno
+ lineno = xfunc.Pos
+
+ clo := xfunc.Func.Closure
+ cvars := xfunc.Func.Cvars.Slice()
+ out := cvars[:0]
+ for _, v := range cvars {
+ if v.Type == nil {
+ // If v.Type is nil, it means v looked like it
+ // was going to be used in the closure, but
+ // isn't. This happens in struct literals like
+ // s{f: x} where we can't distinguish whether
+ // f is a field identifier or expression until
+ // resolving s.
+ continue
+ }
+ out = append(out, v)
+
+ // type check the & of closed variables outside the closure,
+ // so that the outer frame also grabs them and knows they escape.
+ dowidth(v.Type)
+
+ outer := v.Name.Param.Outer
+ outermost := v.Name.Defn
+
+ // out parameters will be assigned to implicitly upon return.
+ if outermost.Class() != PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 {
+ v.Name.SetByval(true)
+ } else {
+ outermost.Name.SetAddrtaken(true)
+ outer = nod(OADDR, outer, nil)
+ }
+
+ if Debug.m > 1 {
+ var name *types.Sym
+ if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
+ name = v.Name.Curfn.Func.Nname.Sym
+ }
+ how := "ref"
+ if v.Name.Byval() {
+ how = "value"
+ }
+ Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width))
+ }
+
+ outer = typecheck(outer, ctxExpr)
+ clo.Func.Enter.Append(outer)
+ }
+
+ xfunc.Func.Cvars.Set(out)
+ lineno = lno
+}
+
+// transformclosure is called in a separate phase after escape analysis.
+// It transform closure bodies to properly reference captured variables.
+func transformclosure(xfunc *Node) {
+ lno := lineno
+ lineno = xfunc.Pos
+ clo := xfunc.Func.Closure
+
+ if clo.Func.Top&ctxCallee != 0 {
+ // If the closure is directly called, we transform it to a plain function call
+ // with variables passed as args. This avoids allocation of a closure object.
+ // Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
+ // will complete the transformation later.
+ // For illustration, the following closure:
+ // func(a int) {
+ // println(byval)
+ // byref++
+ // }(42)
+ // becomes:
+ // func(byval int, &byref *int, a int) {
+ // println(byval)
+ // (*&byref)++
+ // }(byval, &byref, 42)
+
+ // f is ONAME of the actual function.
+ f := xfunc.Func.Nname
+
+ // We are going to insert captured variables before input args.
+ var params []*types.Field
+ var decls []*Node
+ for _, v := range xfunc.Func.Cvars.Slice() {
+ if !v.Name.Byval() {
+ // If v of type T is captured by reference,
+ // we introduce function param &v *T
+ // and v remains PAUTOHEAP with &v heapaddr
+ // (accesses will implicitly deref &v).
+ addr := newname(lookup("&" + v.Sym.Name))
+ addr.Type = types.NewPtr(v.Type)
+ v.Name.Param.Heapaddr = addr
+ v = addr
+ }
+
+ v.SetClass(PPARAM)
+ decls = append(decls, v)
+
+ fld := types.NewField()
+ fld.Nname = asTypesNode(v)
+ fld.Type = v.Type
+ fld.Sym = v.Sym
+ params = append(params, fld)
+ }
+
+ if len(params) > 0 {
+ // Prepend params and decls.
+ f.Type.Params().SetFields(append(params, f.Type.Params().FieldSlice()...))
+ xfunc.Func.Dcl = append(decls, xfunc.Func.Dcl...)
+ }
+
+ dowidth(f.Type)
+ xfunc.Type = f.Type // update type of ODCLFUNC
+ } else {
+ // The closure is not called, so it is going to stay as closure.
+ var body []*Node
+ offset := int64(Widthptr)
+ for _, v := range xfunc.Func.Cvars.Slice() {
+ // cv refers to the field inside of closure OSTRUCTLIT.
+ cv := nod(OCLOSUREVAR, nil, nil)
+
+ cv.Type = v.Type
+ if !v.Name.Byval() {
+ cv.Type = types.NewPtr(v.Type)
+ }
+ offset = Rnd(offset, int64(cv.Type.Align))
+ cv.Xoffset = offset
+ offset += cv.Type.Width
+
+ if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) {
+ // If it is a small variable captured by value, downgrade it to PAUTO.
+ v.SetClass(PAUTO)
+ xfunc.Func.Dcl = append(xfunc.Func.Dcl, v)
+ body = append(body, nod(OAS, v, cv))
+ } else {
+ // Declare variable holding addresses taken from closure
+ // and initialize in entry prologue.
+ addr := newname(lookup("&" + v.Sym.Name))
+ addr.Type = types.NewPtr(v.Type)
+ addr.SetClass(PAUTO)
+ addr.Name.SetUsed(true)
+ addr.Name.Curfn = xfunc
+ xfunc.Func.Dcl = append(xfunc.Func.Dcl, addr)
+ v.Name.Param.Heapaddr = addr
+ if v.Name.Byval() {
+ cv = nod(OADDR, cv, nil)
+ }
+ body = append(body, nod(OAS, addr, cv))
+ }
+ }
+
+ if len(body) > 0 {
+ typecheckslice(body, ctxStmt)
+ xfunc.Func.Enter.Set(body)
+ xfunc.Func.SetNeedctxt(true)
+ }
+ }
+
+ lineno = lno
+}
+
+// hasemptycvars reports whether closure clo has an
+// empty list of captured vars.
+func hasemptycvars(clo *Node) bool {
+ xfunc := clo.Func.Closure
+ return xfunc.Func.Cvars.Len() == 0
+}
+
+// closuredebugruntimecheck applies boilerplate checks for debug flags
+// and compiling runtime
+func closuredebugruntimecheck(clo *Node) {
+ if Debug_closure > 0 {
+ xfunc := clo.Func.Closure
+ if clo.Esc == EscHeap {
+ Warnl(clo.Pos, "heap closure, captured vars = %v", xfunc.Func.Cvars)
+ } else {
+ Warnl(clo.Pos, "stack closure, captured vars = %v", xfunc.Func.Cvars)
+ }
+ }
+ if compiling_runtime && clo.Esc == EscHeap {
+ yyerrorl(clo.Pos, "heap-allocated closure, not allowed in runtime")
+ }
+}
+
+// closureType returns the struct type used to hold all the information
+// needed in the closure for clo (clo must be a OCLOSURE node).
+// The address of a variable of the returned type can be cast to a func.
+func closureType(clo *Node) *types.Type {
+ // Create closure in the form of a composite literal.
+ // supposing the closure captures an int i and a string s
+ // and has one float64 argument and no results,
+ // the generated code looks like:
+ //
+ // clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
+ //
+ // The use of the struct provides type information to the garbage
+ // collector so that it can walk the closure. We could use (in this case)
+ // [3]unsafe.Pointer instead, but that would leave the gc in the dark.
+ // The information appears in the binary in the form of type descriptors;
+ // the struct is unnamed so that closures in multiple packages with the
+ // same struct type can share the descriptor.
+ fields := []*Node{
+ namedfield(".F", types.Types[TUINTPTR]),
+ }
+ for _, v := range clo.Func.Closure.Func.Cvars.Slice() {
+ typ := v.Type
+ if !v.Name.Byval() {
+ typ = types.NewPtr(typ)
+ }
+ fields = append(fields, symfield(v.Sym, typ))
+ }
+ typ := tostruct(fields)
+ typ.SetNoalg(true)
+ return typ
+}
+
+func walkclosure(clo *Node, init *Nodes) *Node {
+ xfunc := clo.Func.Closure
+
+ // If no closure vars, don't bother wrapping.
+ if hasemptycvars(clo) {
+ if Debug_closure > 0 {
+ Warnl(clo.Pos, "closure converted to global")
+ }
+ return xfunc.Func.Nname
+ }
+ closuredebugruntimecheck(clo)
+
+ typ := closureType(clo)
+
+ clos := nod(OCOMPLIT, nil, typenod(typ))
+ clos.Esc = clo.Esc
+ clos.List.Set(append([]*Node{nod(OCFUNC, xfunc.Func.Nname, nil)}, clo.Func.Enter.Slice()...))
+
+ clos = nod(OADDR, clos, nil)
+ clos.Esc = clo.Esc
+
+ // Force type conversion from *struct to the func type.
+ clos = convnop(clos, clo.Type)
+
+ // non-escaping temp to use, if any.
+ if x := prealloc[clo]; x != nil {
+ if !types.Identical(typ, x.Type) {
+ panic("closure type does not match order's assigned type")
+ }
+ clos.Left.Right = x
+ delete(prealloc, clo)
+ }
+
+ return walkexpr(clos, init)
+}
+
+func typecheckpartialcall(fn *Node, sym *types.Sym) {
+ switch fn.Op {
+ case ODOTINTER, ODOTMETH:
+ break
+
+ default:
+ Fatalf("invalid typecheckpartialcall")
+ }
+
+ // Create top-level function.
+ xfunc := makepartialcall(fn, fn.Type, sym)
+ fn.Func = xfunc.Func
+ fn.Func.SetWrapper(true)
+ fn.Right = newname(sym)
+ fn.Op = OCALLPART
+ fn.Type = xfunc.Type
+}
+
+// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
+// for partial calls.
+func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
+ rcvrtype := fn.Left.Type
+ sym := methodSymSuffix(rcvrtype, meth, "-fm")
+
+ if sym.Uniq() {
+ return asNode(sym.Def)
+ }
+ sym.SetUniq(true)
+
+ savecurfn := Curfn
+ saveLineNo := lineno
+ Curfn = nil
+
+ // Set line number equal to the line number where the method is declared.
+ var m *types.Field
+ if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() {
+ lineno = m.Pos
+ }
+ // Note: !m.Pos.IsKnown() happens for method expressions where
+ // the method is implicitly declared. The Error method of the
+ // built-in error type is one such method. We leave the line
+ // number at the use of the method expression in this
+ // case. See issue 29389.
+
+ tfn := nod(OTFUNC, nil, nil)
+ tfn.List.Set(structargs(t0.Params(), true))
+ tfn.Rlist.Set(structargs(t0.Results(), false))
+
+ xfunc := dclfunc(sym, tfn)
+ xfunc.Func.SetDupok(true)
+ xfunc.Func.SetNeedctxt(true)
+
+ tfn.Type.SetPkg(t0.Pkg())
+
+ // Declare and initialize variable holding receiver.
+
+ cv := nod(OCLOSUREVAR, nil, nil)
+ cv.Type = rcvrtype
+ cv.Xoffset = Rnd(int64(Widthptr), int64(cv.Type.Align))
+
+ ptr := newname(lookup(".this"))
+ declare(ptr, PAUTO)
+ ptr.Name.SetUsed(true)
+ var body []*Node
+ if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
+ ptr.Type = rcvrtype
+ body = append(body, nod(OAS, ptr, cv))
+ } else {
+ ptr.Type = types.NewPtr(rcvrtype)
+ body = append(body, nod(OAS, ptr, nod(OADDR, cv, nil)))
+ }
+
+ call := nod(OCALL, nodSym(OXDOT, ptr, meth), nil)
+ call.List.Set(paramNnames(tfn.Type))
+ call.SetIsDDD(tfn.Type.IsVariadic())
+ if t0.NumResults() != 0 {
+ n := nod(ORETURN, nil, nil)
+ n.List.Set1(call)
+ call = n
+ }
+ body = append(body, call)
+
+ xfunc.Nbody.Set(body)
+ funcbody()
+
+ xfunc = typecheck(xfunc, ctxStmt)
+ // Need to typecheck the body of the just-generated wrapper.
+ // typecheckslice() requires that Curfn is set when processing an ORETURN.
+ Curfn = xfunc
+ typecheckslice(xfunc.Nbody.Slice(), ctxStmt)
+ sym.Def = asTypesNode(xfunc)
+ xtop = append(xtop, xfunc)
+ Curfn = savecurfn
+ lineno = saveLineNo
+
+ return xfunc
+}
+
+// partialCallType returns the struct type used to hold all the information
+// needed in the closure for n (n must be a OCALLPART node).
+// The address of a variable of the returned type can be cast to a func.
+func partialCallType(n *Node) *types.Type {
+ t := tostruct([]*Node{
+ namedfield("F", types.Types[TUINTPTR]),
+ namedfield("R", n.Left.Type),
+ })
+ t.SetNoalg(true)
+ return t
+}
+
+func walkpartialcall(n *Node, init *Nodes) *Node {
+ // Create closure in the form of a composite literal.
+ // For x.M with receiver (x) type T, the generated code looks like:
+ //
+ // clos = &struct{F uintptr; R T}{T.M·f, x}
+ //
+ // Like walkclosure above.
+
+ if n.Left.Type.IsInterface() {
+ // Trigger panic for method on nil interface now.
+ // Otherwise it happens in the wrapper and is confusing.
+ n.Left = cheapexpr(n.Left, init)
+ n.Left = walkexpr(n.Left, nil)
+
+ tab := nod(OITAB, n.Left, nil)
+ tab = typecheck(tab, ctxExpr)
+
+ c := nod(OCHECKNIL, tab, nil)
+ c.SetTypecheck(1)
+ init.Append(c)
+ }
+
+ typ := partialCallType(n)
+
+ clos := nod(OCOMPLIT, nil, typenod(typ))
+ clos.Esc = n.Esc
+ clos.List.Set2(nod(OCFUNC, n.Func.Nname, nil), n.Left)
+
+ clos = nod(OADDR, clos, nil)
+ clos.Esc = n.Esc
+
+ // Force type conversion from *struct to the func type.
+ clos = convnop(clos, n.Type)
+
+ // non-escaping temp to use, if any.
+ if x := prealloc[n]; x != nil {
+ if !types.Identical(typ, x.Type) {
+ panic("partial call type does not match order's assigned type")
+ }
+ clos.Left.Right = x
+ delete(prealloc, n)
+ }
+
+ return walkexpr(clos, init)
+}
+
+// callpartMethod returns the *types.Field representing the method
+// referenced by method value n.
+func callpartMethod(n *Node) *types.Field {
+ if n.Op != OCALLPART {
+ Fatalf("expected OCALLPART, got %v", n)
+ }
+
+ // TODO(mdempsky): Optimize this. If necessary,
+ // makepartialcall could save m for us somewhere.
+ var m *types.Field
+ if lookdot0(n.Right.Sym, n.Left.Type, &m, false) != 1 {
+ Fatalf("failed to find field for OCALLPART")
+ }
+
+ return m
+}
diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go
new file mode 100644
index 0000000..b92c8d6
--- /dev/null
+++ b/src/cmd/compile/internal/gc/const.go
@@ -0,0 +1,1323 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "math/big"
+ "strings"
+)
+
+// Ctype describes the constant kind of an "ideal" (untyped) constant.
+type Ctype uint8
+
+const (
+ CTxxx Ctype = iota
+
+ CTINT
+ CTRUNE
+ CTFLT
+ CTCPLX
+ CTSTR
+ CTBOOL
+ CTNIL
+)
+
+type Val struct {
+ // U contains one of:
+ // bool bool when Ctype() == CTBOOL
+ // *Mpint int when Ctype() == CTINT, rune when Ctype() == CTRUNE
+ // *Mpflt float when Ctype() == CTFLT
+ // *Mpcplx pair of floats when Ctype() == CTCPLX
+ // string string when Ctype() == CTSTR
+ // *Nilval when Ctype() == CTNIL
+ U interface{}
+}
+
+func (v Val) Ctype() Ctype {
+ switch x := v.U.(type) {
+ default:
+ Fatalf("unexpected Ctype for %T", v.U)
+ panic("unreachable")
+ case nil:
+ return CTxxx
+ case *NilVal:
+ return CTNIL
+ case bool:
+ return CTBOOL
+ case *Mpint:
+ if x.Rune {
+ return CTRUNE
+ }
+ return CTINT
+ case *Mpflt:
+ return CTFLT
+ case *Mpcplx:
+ return CTCPLX
+ case string:
+ return CTSTR
+ }
+}
+
+func eqval(a, b Val) bool {
+ if a.Ctype() != b.Ctype() {
+ return false
+ }
+ switch x := a.U.(type) {
+ default:
+ Fatalf("unexpected Ctype for %T", a.U)
+ panic("unreachable")
+ case *NilVal:
+ return true
+ case bool:
+ y := b.U.(bool)
+ return x == y
+ case *Mpint:
+ y := b.U.(*Mpint)
+ return x.Cmp(y) == 0
+ case *Mpflt:
+ y := b.U.(*Mpflt)
+ return x.Cmp(y) == 0
+ case *Mpcplx:
+ y := b.U.(*Mpcplx)
+ return x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0
+ case string:
+ y := b.U.(string)
+ return x == y
+ }
+}
+
+// Interface returns the constant value stored in v as an interface{}.
+// It returns int64s for ints and runes, float64s for floats,
+// complex128s for complex values, and nil for constant nils.
+func (v Val) Interface() interface{} {
+ switch x := v.U.(type) {
+ default:
+ Fatalf("unexpected Interface for %T", v.U)
+ panic("unreachable")
+ case *NilVal:
+ return nil
+ case bool, string:
+ return x
+ case *Mpint:
+ return x.Int64()
+ case *Mpflt:
+ return x.Float64()
+ case *Mpcplx:
+ return complex(x.Real.Float64(), x.Imag.Float64())
+ }
+}
+
+type NilVal struct{}
+
+// Int64Val returns n as an int64.
+// n must be an integer or rune constant.
+func (n *Node) Int64Val() int64 {
+ if !Isconst(n, CTINT) {
+ Fatalf("Int64Val(%v)", n)
+ }
+ return n.Val().U.(*Mpint).Int64()
+}
+
+// CanInt64 reports whether it is safe to call Int64Val() on n.
+func (n *Node) CanInt64() bool {
+ if !Isconst(n, CTINT) {
+ return false
+ }
+
+ // if the value inside n cannot be represented as an int64, the
+ // return value of Int64 is undefined
+ return n.Val().U.(*Mpint).CmpInt64(n.Int64Val()) == 0
+}
+
+// BoolVal returns n as a bool.
+// n must be a boolean constant.
+func (n *Node) BoolVal() bool {
+ if !Isconst(n, CTBOOL) {
+ Fatalf("BoolVal(%v)", n)
+ }
+ return n.Val().U.(bool)
+}
+
+// StringVal returns the value of a literal string Node as a string.
+// n must be a string constant.
+func (n *Node) StringVal() string {
+ if !Isconst(n, CTSTR) {
+ Fatalf("StringVal(%v)", n)
+ }
+ return n.Val().U.(string)
+}
+
+// truncate float literal fv to 32-bit or 64-bit precision
+// according to type; return truncated value.
+func truncfltlit(oldv *Mpflt, t *types.Type) *Mpflt {
+ if t == nil {
+ return oldv
+ }
+
+ if overflow(Val{oldv}, t) {
+ // If there was overflow, simply continuing would set the
+ // value to Inf which in turn would lead to spurious follow-on
+ // errors. Avoid this by returning the existing value.
+ return oldv
+ }
+
+ fv := newMpflt()
+
+ // convert large precision literal floating
+ // into limited precision (float64 or float32)
+ switch t.Etype {
+ case types.TFLOAT32:
+ fv.SetFloat64(oldv.Float32())
+ case types.TFLOAT64:
+ fv.SetFloat64(oldv.Float64())
+ default:
+ Fatalf("truncfltlit: unexpected Etype %v", t.Etype)
+ }
+
+ return fv
+}
+
+// truncate Real and Imag parts of Mpcplx to 32-bit or 64-bit
+// precision, according to type; return truncated value. In case of
+// overflow, calls yyerror but does not truncate the input value.
+func trunccmplxlit(oldv *Mpcplx, t *types.Type) *Mpcplx {
+ if t == nil {
+ return oldv
+ }
+
+ if overflow(Val{oldv}, t) {
+ // If there was overflow, simply continuing would set the
+ // value to Inf which in turn would lead to spurious follow-on
+ // errors. Avoid this by returning the existing value.
+ return oldv
+ }
+
+ cv := newMpcmplx()
+
+ switch t.Etype {
+ case types.TCOMPLEX64:
+ cv.Real.SetFloat64(oldv.Real.Float32())
+ cv.Imag.SetFloat64(oldv.Imag.Float32())
+ case types.TCOMPLEX128:
+ cv.Real.SetFloat64(oldv.Real.Float64())
+ cv.Imag.SetFloat64(oldv.Imag.Float64())
+ default:
+ Fatalf("trunccplxlit: unexpected Etype %v", t.Etype)
+ }
+
+ return cv
+}
+
+// TODO(mdempsky): Replace these with better APIs.
+func convlit(n *Node, t *types.Type) *Node { return convlit1(n, t, false, nil) }
+func defaultlit(n *Node, t *types.Type) *Node { return convlit1(n, t, false, nil) }
+
+// convlit1 converts an untyped expression n to type t. If n already
+// has a type, convlit1 has no effect.
+//
+// For explicit conversions, t must be non-nil, and integer-to-string
+// conversions are allowed.
+//
+// For implicit conversions (e.g., assignments), t may be nil; if so,
+// n is converted to its default type.
+//
+// If there's an error converting n to t, context is used in the error
+// message.
+func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Node {
+ if explicit && t == nil {
+ Fatalf("explicit conversion missing type")
+ }
+ if t != nil && t.IsUntyped() {
+ Fatalf("bad conversion to untyped: %v", t)
+ }
+
+ if n == nil || n.Type == nil {
+ // Allow sloppy callers.
+ return n
+ }
+ if !n.Type.IsUntyped() {
+ // Already typed; nothing to do.
+ return n
+ }
+
+ if n.Op == OLITERAL {
+ // Can't always set n.Type directly on OLITERAL nodes.
+ // See discussion on CL 20813.
+ n = n.rawcopy()
+ }
+
+ // Nil is technically not a constant, so handle it specially.
+ if n.Type.Etype == TNIL {
+ if t == nil {
+ yyerror("use of untyped nil")
+ n.SetDiag(true)
+ n.Type = nil
+ return n
+ }
+
+ if !t.HasNil() {
+ // Leave for caller to handle.
+ return n
+ }
+
+ n.Type = t
+ return n
+ }
+
+ if t == nil || !okforconst[t.Etype] {
+ t = defaultType(n.Type)
+ }
+
+ switch n.Op {
+ default:
+ Fatalf("unexpected untyped expression: %v", n)
+
+ case OLITERAL:
+ v := convertVal(n.Val(), t, explicit)
+ if v.U == nil {
+ break
+ }
+ n.SetVal(v)
+ n.Type = t
+ return n
+
+ case OPLUS, ONEG, OBITNOT, ONOT, OREAL, OIMAG:
+ ot := operandType(n.Op, t)
+ if ot == nil {
+ n = defaultlit(n, nil)
+ break
+ }
+
+ n.Left = convlit(n.Left, ot)
+ if n.Left.Type == nil {
+ n.Type = nil
+ return n
+ }
+ n.Type = t
+ return n
+
+ case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND, OCOMPLEX:
+ ot := operandType(n.Op, t)
+ if ot == nil {
+ n = defaultlit(n, nil)
+ break
+ }
+
+ n.Left = convlit(n.Left, ot)
+ n.Right = convlit(n.Right, ot)
+ if n.Left.Type == nil || n.Right.Type == nil {
+ n.Type = nil
+ return n
+ }
+ if !types.Identical(n.Left.Type, n.Right.Type) {
+ yyerror("invalid operation: %v (mismatched types %v and %v)", n, n.Left.Type, n.Right.Type)
+ n.Type = nil
+ return n
+ }
+
+ n.Type = t
+ return n
+
+ case OEQ, ONE, OLT, OLE, OGT, OGE:
+ if !t.IsBoolean() {
+ break
+ }
+ n.Type = t
+ return n
+
+ case OLSH, ORSH:
+ n.Left = convlit1(n.Left, t, explicit, nil)
+ n.Type = n.Left.Type
+ if n.Type != nil && !n.Type.IsInteger() {
+ yyerror("invalid operation: %v (shift of type %v)", n, n.Type)
+ n.Type = nil
+ }
+ return n
+ }
+
+ if !n.Diag() {
+ if !t.Broke() {
+ if explicit {
+ yyerror("cannot convert %L to type %v", n, t)
+ } else if context != nil {
+ yyerror("cannot use %L as type %v in %s", n, t, context())
+ } else {
+ yyerror("cannot use %L as type %v", n, t)
+ }
+ }
+ n.SetDiag(true)
+ }
+ n.Type = nil
+ return n
+}
+
+func operandType(op Op, t *types.Type) *types.Type {
+ switch op {
+ case OCOMPLEX:
+ if t.IsComplex() {
+ return floatForComplex(t)
+ }
+ case OREAL, OIMAG:
+ if t.IsFloat() {
+ return complexForFloat(t)
+ }
+ default:
+ if okfor[op][t.Etype] {
+ return t
+ }
+ }
+ return nil
+}
+
+// convertVal converts v into a representation appropriate for t. If
+// no such representation exists, it returns Val{} instead.
+//
+// If explicit is true, then conversions from integer to string are
+// also allowed.
+func convertVal(v Val, t *types.Type, explicit bool) Val {
+ switch ct := v.Ctype(); ct {
+ case CTBOOL:
+ if t.IsBoolean() {
+ return v
+ }
+
+ case CTSTR:
+ if t.IsString() {
+ return v
+ }
+
+ case CTINT, CTRUNE:
+ if explicit && t.IsString() {
+ return tostr(v)
+ }
+ fallthrough
+ case CTFLT, CTCPLX:
+ switch {
+ case t.IsInteger():
+ v = toint(v)
+ overflow(v, t)
+ return v
+ case t.IsFloat():
+ v = toflt(v)
+ v = Val{truncfltlit(v.U.(*Mpflt), t)}
+ return v
+ case t.IsComplex():
+ v = tocplx(v)
+ v = Val{trunccmplxlit(v.U.(*Mpcplx), t)}
+ return v
+ }
+ }
+
+ return Val{}
+}
+
+func tocplx(v Val) Val {
+ switch u := v.U.(type) {
+ case *Mpint:
+ c := newMpcmplx()
+ c.Real.SetInt(u)
+ c.Imag.SetFloat64(0.0)
+ v.U = c
+
+ case *Mpflt:
+ c := newMpcmplx()
+ c.Real.Set(u)
+ c.Imag.SetFloat64(0.0)
+ v.U = c
+ }
+
+ return v
+}
+
+func toflt(v Val) Val {
+ switch u := v.U.(type) {
+ case *Mpint:
+ f := newMpflt()
+ f.SetInt(u)
+ v.U = f
+
+ case *Mpcplx:
+ f := newMpflt()
+ f.Set(&u.Real)
+ if u.Imag.CmpFloat64(0) != 0 {
+ yyerror("constant %v truncated to real", u.GoString())
+ }
+ v.U = f
+ }
+
+ return v
+}
+
+func toint(v Val) Val {
+ switch u := v.U.(type) {
+ case *Mpint:
+ if u.Rune {
+ i := new(Mpint)
+ i.Set(u)
+ v.U = i
+ }
+
+ case *Mpflt:
+ i := new(Mpint)
+ if !i.SetFloat(u) {
+ if i.checkOverflow(0) {
+ yyerror("integer too large")
+ } else {
+ // The value of u cannot be represented as an integer;
+ // so we need to print an error message.
+ // Unfortunately some float values cannot be
+ // reasonably formatted for inclusion in an error
+ // message (example: 1 + 1e-100), so first we try to
+ // format the float; if the truncation resulted in
+ // something that looks like an integer we omit the
+ // value from the error message.
+ // (See issue #11371).
+ var t big.Float
+ t.Parse(u.GoString(), 10)
+ if t.IsInt() {
+ yyerror("constant truncated to integer")
+ } else {
+ yyerror("constant %v truncated to integer", u.GoString())
+ }
+ }
+ }
+ v.U = i
+
+ case *Mpcplx:
+ i := new(Mpint)
+ if !i.SetFloat(&u.Real) || u.Imag.CmpFloat64(0) != 0 {
+ yyerror("constant %v truncated to integer", u.GoString())
+ }
+
+ v.U = i
+ }
+
+ return v
+}
+
+func doesoverflow(v Val, t *types.Type) bool {
+ switch u := v.U.(type) {
+ case *Mpint:
+ if !t.IsInteger() {
+ Fatalf("overflow: %v integer constant", t)
+ }
+ return u.Cmp(minintval[t.Etype]) < 0 || u.Cmp(maxintval[t.Etype]) > 0
+
+ case *Mpflt:
+ if !t.IsFloat() {
+ Fatalf("overflow: %v floating-point constant", t)
+ }
+ return u.Cmp(minfltval[t.Etype]) <= 0 || u.Cmp(maxfltval[t.Etype]) >= 0
+
+ case *Mpcplx:
+ if !t.IsComplex() {
+ Fatalf("overflow: %v complex constant", t)
+ }
+ return u.Real.Cmp(minfltval[t.Etype]) <= 0 || u.Real.Cmp(maxfltval[t.Etype]) >= 0 ||
+ u.Imag.Cmp(minfltval[t.Etype]) <= 0 || u.Imag.Cmp(maxfltval[t.Etype]) >= 0
+ }
+
+ return false
+}
+
+func overflow(v Val, t *types.Type) bool {
+ // v has already been converted
+ // to appropriate form for t.
+ if t == nil || t.Etype == TIDEAL {
+ return false
+ }
+
+ // Only uintptrs may be converted to pointers, which cannot overflow.
+ if t.IsPtr() || t.IsUnsafePtr() {
+ return false
+ }
+
+ if doesoverflow(v, t) {
+ yyerror("constant %v overflows %v", v, t)
+ return true
+ }
+
+ return false
+
+}
+
+func tostr(v Val) Val {
+ switch u := v.U.(type) {
+ case *Mpint:
+ var r rune = 0xFFFD
+ if u.Cmp(minintval[TINT32]) >= 0 && u.Cmp(maxintval[TINT32]) <= 0 {
+ r = rune(u.Int64())
+ }
+ v.U = string(r)
+ }
+
+ return v
+}
+
+func consttype(n *Node) Ctype {
+ if n == nil || n.Op != OLITERAL {
+ return CTxxx
+ }
+ return n.Val().Ctype()
+}
+
+func Isconst(n *Node, ct Ctype) bool {
+ t := consttype(n)
+
+ // If the caller is asking for CTINT, allow CTRUNE too.
+ // Makes life easier for back ends.
+ return t == ct || (ct == CTINT && t == CTRUNE)
+}
+
+// evconst rewrites constant expressions into OLITERAL nodes.
+func evconst(n *Node) {
+ nl, nr := n.Left, n.Right
+
+ // Pick off just the opcodes that can be constant evaluated.
+ switch op := n.Op; op {
+ case OPLUS, ONEG, OBITNOT, ONOT:
+ if nl.Op == OLITERAL {
+ setconst(n, unaryOp(op, nl.Val(), n.Type))
+ }
+
+ case OADD, OSUB, OMUL, ODIV, OMOD, OOR, OXOR, OAND, OANDNOT, OOROR, OANDAND:
+ if nl.Op == OLITERAL && nr.Op == OLITERAL {
+ setconst(n, binaryOp(nl.Val(), op, nr.Val()))
+ }
+
+ case OEQ, ONE, OLT, OLE, OGT, OGE:
+ if nl.Op == OLITERAL && nr.Op == OLITERAL {
+ setboolconst(n, compareOp(nl.Val(), op, nr.Val()))
+ }
+
+ case OLSH, ORSH:
+ if nl.Op == OLITERAL && nr.Op == OLITERAL {
+ setconst(n, shiftOp(nl.Val(), op, nr.Val()))
+ }
+
+ case OCONV, ORUNESTR:
+ if okforconst[n.Type.Etype] && nl.Op == OLITERAL {
+ setconst(n, convertVal(nl.Val(), n.Type, true))
+ }
+
+ case OCONVNOP:
+ if okforconst[n.Type.Etype] && nl.Op == OLITERAL {
+ // set so n.Orig gets OCONV instead of OCONVNOP
+ n.Op = OCONV
+ setconst(n, nl.Val())
+ }
+
+ case OADDSTR:
+ // Merge adjacent constants in the argument list.
+ s := n.List.Slice()
+ for i1 := 0; i1 < len(s); i1++ {
+ if Isconst(s[i1], CTSTR) && i1+1 < len(s) && Isconst(s[i1+1], CTSTR) {
+ // merge from i1 up to but not including i2
+ var strs []string
+ i2 := i1
+ for i2 < len(s) && Isconst(s[i2], CTSTR) {
+ strs = append(strs, s[i2].StringVal())
+ i2++
+ }
+
+ nl := *s[i1]
+ nl.Orig = &nl
+ nl.SetVal(Val{strings.Join(strs, "")})
+ s[i1] = &nl
+ s = append(s[:i1+1], s[i2:]...)
+ }
+ }
+
+ if len(s) == 1 && Isconst(s[0], CTSTR) {
+ n.Op = OLITERAL
+ n.SetVal(s[0].Val())
+ } else {
+ n.List.Set(s)
+ }
+
+ case OCAP, OLEN:
+ switch nl.Type.Etype {
+ case TSTRING:
+ if Isconst(nl, CTSTR) {
+ setintconst(n, int64(len(nl.StringVal())))
+ }
+ case TARRAY:
+ if !hascallchan(nl) {
+ setintconst(n, nl.Type.NumElem())
+ }
+ }
+
+ case OALIGNOF, OOFFSETOF, OSIZEOF:
+ setintconst(n, evalunsafe(n))
+
+ case OREAL, OIMAG:
+ if nl.Op == OLITERAL {
+ var re, im *Mpflt
+ switch u := nl.Val().U.(type) {
+ case *Mpint:
+ re = newMpflt()
+ re.SetInt(u)
+ // im = 0
+ case *Mpflt:
+ re = u
+ // im = 0
+ case *Mpcplx:
+ re = &u.Real
+ im = &u.Imag
+ default:
+ Fatalf("impossible")
+ }
+ if n.Op == OIMAG {
+ if im == nil {
+ im = newMpflt()
+ }
+ re = im
+ }
+ setconst(n, Val{re})
+ }
+
+ case OCOMPLEX:
+ if nl.Op == OLITERAL && nr.Op == OLITERAL {
+ // make it a complex literal
+ c := newMpcmplx()
+ c.Real.Set(toflt(nl.Val()).U.(*Mpflt))
+ c.Imag.Set(toflt(nr.Val()).U.(*Mpflt))
+ setconst(n, Val{c})
+ }
+ }
+}
+
+func match(x, y Val) (Val, Val) {
+ switch {
+ case x.Ctype() == CTCPLX || y.Ctype() == CTCPLX:
+ return tocplx(x), tocplx(y)
+ case x.Ctype() == CTFLT || y.Ctype() == CTFLT:
+ return toflt(x), toflt(y)
+ }
+
+ // Mixed int/rune are fine.
+ return x, y
+}
+
+func compareOp(x Val, op Op, y Val) bool {
+ x, y = match(x, y)
+
+ switch x.Ctype() {
+ case CTBOOL:
+ x, y := x.U.(bool), y.U.(bool)
+ switch op {
+ case OEQ:
+ return x == y
+ case ONE:
+ return x != y
+ }
+
+ case CTINT, CTRUNE:
+ x, y := x.U.(*Mpint), y.U.(*Mpint)
+ return cmpZero(x.Cmp(y), op)
+
+ case CTFLT:
+ x, y := x.U.(*Mpflt), y.U.(*Mpflt)
+ return cmpZero(x.Cmp(y), op)
+
+ case CTCPLX:
+ x, y := x.U.(*Mpcplx), y.U.(*Mpcplx)
+ eq := x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0
+ switch op {
+ case OEQ:
+ return eq
+ case ONE:
+ return !eq
+ }
+
+ case CTSTR:
+ x, y := x.U.(string), y.U.(string)
+ switch op {
+ case OEQ:
+ return x == y
+ case ONE:
+ return x != y
+ case OLT:
+ return x < y
+ case OLE:
+ return x <= y
+ case OGT:
+ return x > y
+ case OGE:
+ return x >= y
+ }
+ }
+
+ Fatalf("compareOp: bad comparison: %v %v %v", x, op, y)
+ panic("unreachable")
+}
+
+func cmpZero(x int, op Op) bool {
+ switch op {
+ case OEQ:
+ return x == 0
+ case ONE:
+ return x != 0
+ case OLT:
+ return x < 0
+ case OLE:
+ return x <= 0
+ case OGT:
+ return x > 0
+ case OGE:
+ return x >= 0
+ }
+
+ Fatalf("cmpZero: want comparison operator, got %v", op)
+ panic("unreachable")
+}
+
+func binaryOp(x Val, op Op, y Val) Val {
+ x, y = match(x, y)
+
+Outer:
+ switch x.Ctype() {
+ case CTBOOL:
+ x, y := x.U.(bool), y.U.(bool)
+ switch op {
+ case OANDAND:
+ return Val{U: x && y}
+ case OOROR:
+ return Val{U: x || y}
+ }
+
+ case CTINT, CTRUNE:
+ x, y := x.U.(*Mpint), y.U.(*Mpint)
+
+ u := new(Mpint)
+ u.Rune = x.Rune || y.Rune
+ u.Set(x)
+ switch op {
+ case OADD:
+ u.Add(y)
+ case OSUB:
+ u.Sub(y)
+ case OMUL:
+ u.Mul(y)
+ case ODIV:
+ if y.CmpInt64(0) == 0 {
+ yyerror("division by zero")
+ return Val{}
+ }
+ u.Quo(y)
+ case OMOD:
+ if y.CmpInt64(0) == 0 {
+ yyerror("division by zero")
+ return Val{}
+ }
+ u.Rem(y)
+ case OOR:
+ u.Or(y)
+ case OAND:
+ u.And(y)
+ case OANDNOT:
+ u.AndNot(y)
+ case OXOR:
+ u.Xor(y)
+ default:
+ break Outer
+ }
+ return Val{U: u}
+
+ case CTFLT:
+ x, y := x.U.(*Mpflt), y.U.(*Mpflt)
+
+ u := newMpflt()
+ u.Set(x)
+ switch op {
+ case OADD:
+ u.Add(y)
+ case OSUB:
+ u.Sub(y)
+ case OMUL:
+ u.Mul(y)
+ case ODIV:
+ if y.CmpFloat64(0) == 0 {
+ yyerror("division by zero")
+ return Val{}
+ }
+ u.Quo(y)
+ default:
+ break Outer
+ }
+ return Val{U: u}
+
+ case CTCPLX:
+ x, y := x.U.(*Mpcplx), y.U.(*Mpcplx)
+
+ u := newMpcmplx()
+ u.Real.Set(&x.Real)
+ u.Imag.Set(&x.Imag)
+ switch op {
+ case OADD:
+ u.Real.Add(&y.Real)
+ u.Imag.Add(&y.Imag)
+ case OSUB:
+ u.Real.Sub(&y.Real)
+ u.Imag.Sub(&y.Imag)
+ case OMUL:
+ u.Mul(y)
+ case ODIV:
+ if !u.Div(y) {
+ yyerror("complex division by zero")
+ return Val{}
+ }
+ default:
+ break Outer
+ }
+ return Val{U: u}
+ }
+
+ Fatalf("binaryOp: bad operation: %v %v %v", x, op, y)
+ panic("unreachable")
+}
+
+func unaryOp(op Op, x Val, t *types.Type) Val {
+ switch op {
+ case OPLUS:
+ switch x.Ctype() {
+ case CTINT, CTRUNE, CTFLT, CTCPLX:
+ return x
+ }
+
+ case ONEG:
+ switch x.Ctype() {
+ case CTINT, CTRUNE:
+ x := x.U.(*Mpint)
+ u := new(Mpint)
+ u.Rune = x.Rune
+ u.Set(x)
+ u.Neg()
+ return Val{U: u}
+
+ case CTFLT:
+ x := x.U.(*Mpflt)
+ u := newMpflt()
+ u.Set(x)
+ u.Neg()
+ return Val{U: u}
+
+ case CTCPLX:
+ x := x.U.(*Mpcplx)
+ u := newMpcmplx()
+ u.Real.Set(&x.Real)
+ u.Imag.Set(&x.Imag)
+ u.Real.Neg()
+ u.Imag.Neg()
+ return Val{U: u}
+ }
+
+ case OBITNOT:
+ switch x.Ctype() {
+ case CTINT, CTRUNE:
+ x := x.U.(*Mpint)
+
+ u := new(Mpint)
+ u.Rune = x.Rune
+ if t.IsSigned() || t.IsUntyped() {
+ // Signed values change sign.
+ u.SetInt64(-1)
+ } else {
+ // Unsigned values invert their bits.
+ u.Set(maxintval[t.Etype])
+ }
+ u.Xor(x)
+ return Val{U: u}
+ }
+
+ case ONOT:
+ return Val{U: !x.U.(bool)}
+ }
+
+ Fatalf("unaryOp: bad operation: %v %v", op, x)
+ panic("unreachable")
+}
+
+func shiftOp(x Val, op Op, y Val) Val {
+ if x.Ctype() != CTRUNE {
+ x = toint(x)
+ }
+ y = toint(y)
+
+ u := new(Mpint)
+ u.Set(x.U.(*Mpint))
+ u.Rune = x.U.(*Mpint).Rune
+ switch op {
+ case OLSH:
+ u.Lsh(y.U.(*Mpint))
+ case ORSH:
+ u.Rsh(y.U.(*Mpint))
+ default:
+ Fatalf("shiftOp: bad operator: %v", op)
+ panic("unreachable")
+ }
+ return Val{U: u}
+}
+
+// setconst rewrites n as an OLITERAL with value v.
+func setconst(n *Node, v Val) {
+ // If constant folding failed, mark n as broken and give up.
+ if v.U == nil {
+ n.Type = nil
+ return
+ }
+
+ // Ensure n.Orig still points to a semantically-equivalent
+ // expression after we rewrite n into a constant.
+ if n.Orig == n {
+ n.Orig = n.sepcopy()
+ }
+
+ *n = Node{
+ Op: OLITERAL,
+ Pos: n.Pos,
+ Orig: n.Orig,
+ Type: n.Type,
+ Xoffset: BADWIDTH,
+ }
+ n.SetVal(v)
+ if vt := idealType(v.Ctype()); n.Type.IsUntyped() && n.Type != vt {
+ Fatalf("untyped type mismatch, have: %v, want: %v", n.Type, vt)
+ }
+
+ // Check range.
+ lno := setlineno(n)
+ overflow(v, n.Type)
+ lineno = lno
+
+ if !n.Type.IsUntyped() {
+ switch v.Ctype() {
+ // Truncate precision for non-ideal float.
+ case CTFLT:
+ n.SetVal(Val{truncfltlit(v.U.(*Mpflt), n.Type)})
+ // Truncate precision for non-ideal complex.
+ case CTCPLX:
+ n.SetVal(Val{trunccmplxlit(v.U.(*Mpcplx), n.Type)})
+ }
+ }
+}
+
+func setboolconst(n *Node, v bool) {
+ setconst(n, Val{U: v})
+}
+
+func setintconst(n *Node, v int64) {
+ u := new(Mpint)
+ u.SetInt64(v)
+ setconst(n, Val{u})
+}
+
+// nodlit returns a new untyped constant with value v.
+func nodlit(v Val) *Node {
+ n := nod(OLITERAL, nil, nil)
+ n.SetVal(v)
+ n.Type = idealType(v.Ctype())
+ return n
+}
+
+func idealType(ct Ctype) *types.Type {
+ switch ct {
+ case CTSTR:
+ return types.UntypedString
+ case CTBOOL:
+ return types.UntypedBool
+ case CTINT:
+ return types.UntypedInt
+ case CTRUNE:
+ return types.UntypedRune
+ case CTFLT:
+ return types.UntypedFloat
+ case CTCPLX:
+ return types.UntypedComplex
+ case CTNIL:
+ return types.Types[TNIL]
+ }
+ Fatalf("unexpected Ctype: %v", ct)
+ return nil
+}
+
+// defaultlit on both nodes simultaneously;
+// if they're both ideal going in they better
+// get the same type going out.
+// force means must assign concrete (non-ideal) type.
+// The results of defaultlit2 MUST be assigned back to l and r, e.g.
+// n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
+func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) {
+ if l.Type == nil || r.Type == nil {
+ return l, r
+ }
+ if !l.Type.IsUntyped() {
+ r = convlit(r, l.Type)
+ return l, r
+ }
+
+ if !r.Type.IsUntyped() {
+ l = convlit(l, r.Type)
+ return l, r
+ }
+
+ if !force {
+ return l, r
+ }
+
+ // Can't mix bool with non-bool, string with non-string, or nil with anything (untyped).
+ if l.Type.IsBoolean() != r.Type.IsBoolean() {
+ return l, r
+ }
+ if l.Type.IsString() != r.Type.IsString() {
+ return l, r
+ }
+ if l.isNil() || r.isNil() {
+ return l, r
+ }
+
+ t := defaultType(mixUntyped(l.Type, r.Type))
+ l = convlit(l, t)
+ r = convlit(r, t)
+ return l, r
+}
+
+func ctype(t *types.Type) Ctype {
+ switch t {
+ case types.UntypedBool:
+ return CTBOOL
+ case types.UntypedString:
+ return CTSTR
+ case types.UntypedInt:
+ return CTINT
+ case types.UntypedRune:
+ return CTRUNE
+ case types.UntypedFloat:
+ return CTFLT
+ case types.UntypedComplex:
+ return CTCPLX
+ }
+ Fatalf("bad type %v", t)
+ panic("unreachable")
+}
+
+func mixUntyped(t1, t2 *types.Type) *types.Type {
+ t := t1
+ if ctype(t2) > ctype(t1) {
+ t = t2
+ }
+ return t
+}
+
+func defaultType(t *types.Type) *types.Type {
+ if !t.IsUntyped() || t.Etype == TNIL {
+ return t
+ }
+
+ switch t {
+ case types.UntypedBool:
+ return types.Types[TBOOL]
+ case types.UntypedString:
+ return types.Types[TSTRING]
+ case types.UntypedInt:
+ return types.Types[TINT]
+ case types.UntypedRune:
+ return types.Runetype
+ case types.UntypedFloat:
+ return types.Types[TFLOAT64]
+ case types.UntypedComplex:
+ return types.Types[TCOMPLEX128]
+ }
+
+ Fatalf("bad type %v", t)
+ return nil
+}
+
+func smallintconst(n *Node) bool {
+ if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil {
+ switch simtype[n.Type.Etype] {
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TBOOL:
+ return true
+
+ case TIDEAL, TINT64, TUINT64, TPTR:
+ v, ok := n.Val().U.(*Mpint)
+ if ok && v.Cmp(minintval[TINT32]) >= 0 && v.Cmp(maxintval[TINT32]) <= 0 {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// indexconst checks if Node n contains a constant expression
+// representable as a non-negative int and returns its value.
+// If n is not a constant expression, not representable as an
+// integer, or negative, it returns -1. If n is too large, it
+// returns -2.
+func indexconst(n *Node) int64 {
+ if n.Op != OLITERAL {
+ return -1
+ }
+
+ v := toint(n.Val()) // toint returns argument unchanged if not representable as an *Mpint
+ vi, ok := v.U.(*Mpint)
+ if !ok || vi.CmpInt64(0) < 0 {
+ return -1
+ }
+ if vi.Cmp(maxintval[TINT]) > 0 {
+ return -2
+ }
+
+ return vi.Int64()
+}
+
+// isGoConst reports whether n is a Go language constant (as opposed to a
+// compile-time constant).
+//
+// Expressions derived from nil, like string([]byte(nil)), while they
+// may be known at compile time, are not Go language constants.
+func (n *Node) isGoConst() bool {
+ return n.Op == OLITERAL && n.Val().Ctype() != CTNIL
+}
+
+func hascallchan(n *Node) bool {
+ if n == nil {
+ return false
+ }
+ switch n.Op {
+ case OAPPEND,
+ OCALL,
+ OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH,
+ OCAP,
+ OCLOSE,
+ OCOMPLEX,
+ OCOPY,
+ ODELETE,
+ OIMAG,
+ OLEN,
+ OMAKE,
+ ONEW,
+ OPANIC,
+ OPRINT,
+ OPRINTN,
+ OREAL,
+ ORECOVER,
+ ORECV:
+ return true
+ }
+
+ if hascallchan(n.Left) || hascallchan(n.Right) {
+ return true
+ }
+ for _, n1 := range n.List.Slice() {
+ if hascallchan(n1) {
+ return true
+ }
+ }
+ for _, n2 := range n.Rlist.Slice() {
+ if hascallchan(n2) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// A constSet represents a set of Go constant expressions.
+type constSet struct {
+ m map[constSetKey]src.XPos
+}
+
+type constSetKey struct {
+ typ *types.Type
+ val interface{}
+}
+
+// add adds constant expression n to s. If a constant expression of
+// equal value and identical type has already been added, then add
+// reports an error about the duplicate value.
+//
+// pos provides position information for where expression n occurred
+// (in case n does not have its own position information). what and
+// where are used in the error message.
+//
+// n must not be an untyped constant.
+func (s *constSet) add(pos src.XPos, n *Node, what, where string) {
+ if n.Op == OCONVIFACE && n.Implicit() {
+ n = n.Left
+ }
+
+ if !n.isGoConst() {
+ return
+ }
+ if n.Type.IsUntyped() {
+ Fatalf("%v is untyped", n)
+ }
+
+ // Consts are only duplicates if they have the same value and
+ // identical types.
+ //
+ // In general, we have to use types.Identical to test type
+ // identity, because == gives false negatives for anonymous
+ // types and the byte/uint8 and rune/int32 builtin type
+ // aliases. However, this is not a problem here, because
+ // constant expressions are always untyped or have a named
+ // type, and we explicitly handle the builtin type aliases
+ // below.
+ //
+ // This approach may need to be revisited though if we fix
+ // #21866 by treating all type aliases like byte/uint8 and
+ // rune/int32.
+
+ typ := n.Type
+ switch typ {
+ case types.Bytetype:
+ typ = types.Types[TUINT8]
+ case types.Runetype:
+ typ = types.Types[TINT32]
+ }
+ k := constSetKey{typ, n.Val().Interface()}
+
+ if hasUniquePos(n) {
+ pos = n.Pos
+ }
+
+ if s.m == nil {
+ s.m = make(map[constSetKey]src.XPos)
+ }
+
+ if prevPos, isDup := s.m[k]; isDup {
+ yyerrorl(pos, "duplicate %s %s in %s\n\tprevious %s at %v",
+ what, nodeAndVal(n), where,
+ what, linestr(prevPos))
+ } else {
+ s.m[k] = pos
+ }
+}
+
+// nodeAndVal reports both an expression and its constant value, if
+// the latter is non-obvious.
+//
+// TODO(mdempsky): This could probably be a fmt.go flag.
+func nodeAndVal(n *Node) string {
+ show := n.String()
+ val := n.Val().Interface()
+ if s := fmt.Sprintf("%#v", val); show != s {
+ show += " (value " + s + ")"
+ }
+ return show
+}
diff --git a/src/cmd/compile/internal/gc/constFold_test.go b/src/cmd/compile/internal/gc/constFold_test.go
new file mode 100644
index 0000000..59f905d
--- /dev/null
+++ b/src/cmd/compile/internal/gc/constFold_test.go
@@ -0,0 +1,18111 @@
+// run
+// Code generated by gen/constFoldGen.go. DO NOT EDIT.
+
+package gc
+
+import "testing"
+
+func TestConstFolduint64add(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967296 {
+ t.Errorf("0 %s 4294967296 = %d, want 4294967296", "+", r)
+ }
+ y = 18446744073709551615
+ r = x + y
+ if r != 18446744073709551615 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 18446744073709551615", "+", r)
+ }
+ x = 1
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967297 {
+ t.Errorf("1 %s 4294967296 = %d, want 4294967297", "+", r)
+ }
+ y = 18446744073709551615
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "+", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x + y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 4294967297 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967297", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 8589934592", "+", r)
+ }
+ y = 18446744073709551615
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967295", "+", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x + y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 0", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "+", r)
+ }
+ y = 18446744073709551615
+ r = x + y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 18446744073709551614", "+", r)
+ }
+}
+func TestConstFolduint64sub(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 18446744073709551615 {
+ t.Errorf("0 %s 1 = %d, want 18446744073709551615", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 18446744069414584320 {
+ t.Errorf("0 %s 4294967296 = %d, want 18446744069414584320", "-", r)
+ }
+ y = 18446744073709551615
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 1", "-", r)
+ }
+ x = 1
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 18446744069414584321 {
+ t.Errorf("1 %s 4294967296 = %d, want 18446744069414584321", "-", r)
+ }
+ y = 18446744073709551615
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 2", "-", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x - y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967295", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "-", r)
+ }
+ y = 18446744073709551615
+ r = x - y
+ if r != 4294967297 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967297", "-", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x - y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 18446744069414584319 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 18446744069414584319", "-", r)
+ }
+ y = 18446744073709551615
+ r = x - y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "-", r)
+ }
+}
+func TestConstFolduint64div(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 18446744073709551615
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 18446744073709551615
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "/", r)
+ }
+ x = 4294967296
+ y = 1
+ r = x / y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967296", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 1 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 1", "/", r)
+ }
+ y = 18446744073709551615
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "/", r)
+ }
+ x = 18446744073709551615
+ y = 1
+ r = x / y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551615", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 4294967295 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "/", r)
+ }
+ y = 18446744073709551615
+ r = x / y
+ if r != 1 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 1", "/", r)
+ }
+}
+func TestConstFolduint64mul(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 18446744073709551615
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("1 %s 4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = 18446744073709551615
+ r = x * y
+ if r != 18446744073709551615 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 18446744073709551615", "*", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967296", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 18446744073709551615
+ r = x * y
+ if r != 18446744069414584320 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 18446744069414584320", "*", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551615", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 18446744069414584320 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 18446744069414584320", "*", r)
+ }
+ y = 18446744073709551615
+ r = x * y
+ if r != 1 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 1", "*", r)
+ }
+}
+func TestConstFolduint64mod(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 18446744073709551615
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 4294967296 = %d, want 1", "%", r)
+ }
+ y = 18446744073709551615
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 1", "%", r)
+ }
+ x = 4294967296
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 18446744073709551615
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967296", "%", r)
+ }
+ x = 18446744073709551615
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 4294967295 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "%", r)
+ }
+ y = 18446744073709551615
+ r = x % y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "%", r)
+ }
+}
+func TestConstFoldint64add(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x + y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != 1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want 1", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 9223372032559808512 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 9223372032559808512", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want 9223372036854775807", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775807", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != -9223372032559808512 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -9223372032559808512", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -2 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -2", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "+", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x + y
+ if r != 1 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 1", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 2", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 9223372032559808513 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want 9223372032559808513", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want -9223372036854775808", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -9223372036854775806 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775806", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != -9223372032559808511 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -9223372032559808511", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 0", "+", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x + y
+ if r != 9223372032559808512 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 9223372032559808512", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != 9223372032559808513 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 9223372032559808513", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want -8589934592", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -4294967297 {
+ t.Errorf("-4294967296 %s -1 = %d, want -4294967297", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -4294967295 {
+ t.Errorf("-4294967296 %s 1 = %d, want -4294967295", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != 9223372032559808510 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 9223372032559808510", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != 9223372032559808511 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 9223372032559808511", "+", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want 9223372036854775807", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want -9223372036854775808", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != -4294967297 {
+ t.Errorf("-1 %s -4294967296 = %d, want -4294967297", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2 {
+ t.Errorf("-1 %s -1 = %d, want -2", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("-1 %s 4294967296 = %d, want 4294967295", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != 9223372036854775805 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want 9223372036854775805", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != 9223372036854775806 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want 9223372036854775806", "+", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want -9223372036854775808", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -9223372036854775807 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want -9223372036854775807", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != -4294967296 {
+ t.Errorf("0 %s -4294967296 = %d, want -4294967296", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -1 {
+ t.Errorf("0 %s -1 = %d, want -1", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967296 {
+ t.Errorf("0 %s 4294967296 = %d, want 4294967296", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != 9223372036854775806 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want 9223372036854775806", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want 9223372036854775807", "+", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x + y
+ if r != -9223372036854775807 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775807", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -9223372036854775806 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775806", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != -4294967295 {
+ t.Errorf("1 %s -4294967296 = %d, want -4294967295", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967297 {
+ t.Errorf("1 %s 4294967296 = %d, want 4294967297", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want 9223372036854775807", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want -9223372036854775808", "+", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x + y
+ if r != -9223372032559808512 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want -9223372032559808512", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -9223372032559808511 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want -9223372032559808511", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 0 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want 0", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("4294967296 %s -1 = %d, want 4294967295", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 4294967297 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967297", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 8589934592", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -9223372032559808514 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want -9223372032559808514", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -9223372032559808513 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want -9223372032559808513", "+", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x + y
+ if r != -2 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want -2", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -1 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want -1", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 9223372032559808510 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want 9223372032559808510", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 9223372036854775805 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want 9223372036854775805", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775807", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != -9223372032559808514 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want -9223372032559808514", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want -4", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -3 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -3", "+", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x + y
+ if r != -1 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -1", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want 0", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 9223372032559808511 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want 9223372032559808511", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want 9223372036854775806", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -9223372036854775808", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != -9223372032559808513 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want -9223372032559808513", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -3 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want -3", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want -2", "+", r)
+ }
+}
+func TestConstFoldint64sub(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x - y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -1", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != -9223372032559808512 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want -9223372032559808512", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775807", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 9223372036854775807", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 9223372032559808512 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 9223372032559808512", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 2 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want 2", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != 1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want 1", "-", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x - y
+ if r != 1 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 1", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 0", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != -9223372032559808511 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -9223372032559808511", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -9223372036854775806 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want -9223372036854775806", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775808", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 9223372032559808513 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 9223372032559808513", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 3 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want 3", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 2", "-", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x - y
+ if r != 9223372032559808512 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 9223372032559808512", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != 9223372032559808511 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 9223372032559808511", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -4294967295 {
+ t.Errorf("-4294967296 %s -1 = %d, want -4294967295", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -4294967297 {
+ t.Errorf("-4294967296 %s 1 = %d, want -4294967297", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want -8589934592", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 9223372032559808514 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 9223372032559808514", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != 9223372032559808513 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 9223372032559808513", "-", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want 9223372036854775807", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != 9223372036854775806 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want 9223372036854775806", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("-1 %s -4294967296 = %d, want 4294967295", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != -4294967297 {
+ t.Errorf("-1 %s 4294967296 = %d, want -4294967297", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want -9223372036854775807", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want -9223372036854775808", "-", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want -9223372036854775808", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want 9223372036854775807", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 4294967296 {
+ t.Errorf("0 %s -4294967296 = %d, want 4294967296", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s -1 = %d, want 1", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -1 {
+ t.Errorf("0 %s 1 = %d, want -1", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != -4294967296 {
+ t.Errorf("0 %s 4294967296 = %d, want -4294967296", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != -9223372036854775806 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want -9223372036854775806", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want -9223372036854775807", "-", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775807", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775808", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 4294967297 {
+ t.Errorf("1 %s -4294967296 = %d, want 4294967297", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s -1 = %d, want 2", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != -4294967295 {
+ t.Errorf("1 %s 4294967296 = %d, want -4294967295", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != -9223372036854775805 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want -9223372036854775805", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -9223372036854775806 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want -9223372036854775806", "-", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x - y
+ if r != -9223372032559808512 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want -9223372032559808512", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -9223372032559808513 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want -9223372032559808513", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want 8589934592", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 4294967297 {
+ t.Errorf("4294967296 %s -1 = %d, want 4294967297", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967295", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != -9223372032559808510 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want -9223372032559808510", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -9223372032559808511 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want -9223372032559808511", "-", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x - y
+ if r != -2 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want -2", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -3 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want -3", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != -9223372032559808514 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want -9223372032559808514", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want 9223372036854775807", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 9223372036854775805 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775805", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 9223372032559808510 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 9223372032559808510", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 0", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -1 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -1", "-", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x - y
+ if r != -1 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -1", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -2", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != -9223372032559808513 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want -9223372032559808513", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775808", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775806", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 9223372032559808511 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 9223372032559808511", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 0", "-", r)
+ }
+}
+func TestConstFoldint64div(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x / y
+ if r != 1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 1", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want 1", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 2147483648 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 2147483648", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775808", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775808", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != -2147483648 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -2147483648", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -1", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "/", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 1 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 1", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want 2147483647", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want 9223372036854775807", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775807", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -2147483647", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want -1", "/", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 1 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 4294967296 {
+ t.Errorf("-4294967296 %s -1 = %d, want 4294967296", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 1 = %d, want -4294967296", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want -1", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -4294967296 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -4294967296 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -4294967296 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != -1 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want -1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -4294967296 {
+ t.Errorf("4294967296 %s -1 = %d, want -4294967296", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967296", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 1 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 1", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want -2147483647", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want -9223372036854775806", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775806", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 2147483647", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 1 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 1", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != -1 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -1", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want -2147483647", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -9223372036854775807 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775807", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775807", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 2147483647", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 1", "/", r)
+ }
+}
+func TestConstFoldint64mul(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775808", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775808", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -9223372036854775808", "*", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 1 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 1", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -4294967296", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want 9223372036854775807", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775807", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 9223372036854775806 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want 9223372036854775806", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want -1", "*", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want -4294967296", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("-4294967296 %s -1 = %d, want 4294967296", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 1 = %d, want -4294967296", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 8589934592 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 8589934592", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 4294967296", "*", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 9223372036854775807 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want 9223372036854775807", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("-1 %s -4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("-1 %s 4294967296 = %d, want -4294967296", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != -9223372036854775806 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want -9223372036854775806", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -9223372036854775807 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want -9223372036854775807", "*", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want 0", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -4294967296 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != -9223372036854775807 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775807", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("1 %s -4294967296 = %d, want -4294967296", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("1 %s 4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 9223372036854775806 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want 9223372036854775806", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != 9223372036854775807 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want 9223372036854775807", "*", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want 4294967296", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("4294967296 %s -1 = %d, want -4294967296", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967296", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != -8589934592 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want -8589934592", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want -4294967296", "*", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 9223372036854775806", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 8589934592 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want 8589934592", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want -9223372036854775806", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775806", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != -8589934592 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want -8589934592", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 4 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 4", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -9223372036854775806", "*", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != -1 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -1", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -9223372036854775807 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775807", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775807", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want -4294967296", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != -9223372036854775806 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want -9223372036854775806", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 1", "*", r)
+ }
+}
+func TestConstFoldint64mod(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -1", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != -2 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -2", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "%", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x % y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775807", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 0", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != -4294967295 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -4294967295", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != -4294967295 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -4294967295", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 0", "%", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x % y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want -4294967296", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want -4294967296", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want -4294967296", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want -4294967296", "%", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want -1", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want -1", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -4294967296 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want -1", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want -1", "%", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want 0", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want 0", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -4294967296 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want 1", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want 1", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -4294967296 = %d, want 1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 4294967296 = %d, want 1", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want 1", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want 1", "%", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want 4294967296", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want 4294967296", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want 4294967296", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want 4294967296", "%", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x % y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 9223372036854775806", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 9223372036854775806", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 4294967294 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want 4294967294", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 4294967294 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 4294967294", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want 9223372036854775806", "%", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x % y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want 9223372036854775807", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want 0", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 4294967295 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want 4294967295", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 4294967295 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 4294967295", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 0", "%", r)
+ }
+}
+func TestConstFolduint32add(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 4294967295
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("0 %s 4294967295 = %d, want 4294967295", "+", r)
+ }
+ x = 1
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 4294967295
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "+", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("4294967295 %s 1 = %d, want 0", "+", r)
+ }
+ y = 4294967295
+ r = x + y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 4294967294", "+", r)
+ }
+}
+func TestConstFolduint32sub(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("0 %s 1 = %d, want 4294967295", "-", r)
+ }
+ y = 4294967295
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s 4294967295 = %d, want 1", "-", r)
+ }
+ x = 1
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 4294967295
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s 4294967295 = %d, want 2", "-", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "-", r)
+ }
+ y = 4294967295
+ r = x - y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 0", "-", r)
+ }
+}
+func TestConstFolduint32div(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 4294967295
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 4294967295
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "/", r)
+ }
+ x = 4294967295
+ y = 1
+ r = x / y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967295", "/", r)
+ }
+ y = 4294967295
+ r = x / y
+ if r != 1 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 1", "/", r)
+ }
+}
+func TestConstFolduint32mul(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 4294967295
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 4294967295
+ r = x * y
+ if r != 4294967295 {
+ t.Errorf("1 %s 4294967295 = %d, want 4294967295", "*", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967295 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967295", "*", r)
+ }
+ y = 4294967295
+ r = x * y
+ if r != 1 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 1", "*", r)
+ }
+}
+func TestConstFolduint32mod(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967295
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967295
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 4294967295 = %d, want 1", "%", r)
+ }
+ x = 4294967295
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967295 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967295
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 0", "%", r)
+ }
+}
+func TestConstFoldint32add(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x + y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != 1 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want 1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 2147483647 {
+ t.Errorf("-2147483648 %s -1 = %d, want 2147483647", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -2147483647 {
+ t.Errorf("-2147483648 %s 1 = %d, want -2147483647", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "+", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x + y
+ if r != 1 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want 1", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != 2 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 2", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("-2147483647 %s -1 = %d, want -2147483648", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -2147483646 {
+ t.Errorf("-2147483647 %s 1 = %d, want -2147483646", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want 0", "+", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x + y
+ if r != 2147483647 {
+ t.Errorf("-1 %s -2147483648 = %d, want 2147483647", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("-1 %s -2147483647 = %d, want -2147483648", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2 {
+ t.Errorf("-1 %s -1 = %d, want -2", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != 2147483646 {
+ t.Errorf("-1 %s 2147483647 = %d, want 2147483646", "+", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("0 %s -2147483648 = %d, want -2147483648", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != -2147483647 {
+ t.Errorf("0 %s -2147483647 = %d, want -2147483647", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -1 {
+ t.Errorf("0 %s -1 = %d, want -1", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != 2147483647 {
+ t.Errorf("0 %s 2147483647 = %d, want 2147483647", "+", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x + y
+ if r != -2147483647 {
+ t.Errorf("1 %s -2147483648 = %d, want -2147483647", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != -2147483646 {
+ t.Errorf("1 %s -2147483647 = %d, want -2147483646", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("1 %s 2147483647 = %d, want -2147483648", "+", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x + y
+ if r != -1 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want -1", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != 0 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want 0", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 2147483646 {
+ t.Errorf("2147483647 %s -1 = %d, want 2147483646", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("2147483647 %s 1 = %d, want -2147483648", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != -2 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want -2", "+", r)
+ }
+}
+func TestConstFoldint32sub(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x - y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != -1 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want -1", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -2147483647 {
+ t.Errorf("-2147483648 %s -1 = %d, want -2147483647", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 2147483647 {
+ t.Errorf("-2147483648 %s 1 = %d, want 2147483647", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != 1 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want 1", "-", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x - y
+ if r != 1 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want 1", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != 0 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 0", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -2147483646 {
+ t.Errorf("-2147483647 %s -1 = %d, want -2147483646", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("-2147483647 %s 1 = %d, want -2147483648", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want 2", "-", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x - y
+ if r != 2147483647 {
+ t.Errorf("-1 %s -2147483648 = %d, want 2147483647", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != 2147483646 {
+ t.Errorf("-1 %s -2147483647 = %d, want 2147483646", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("-1 %s 2147483647 = %d, want -2147483648", "-", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("0 %s -2147483648 = %d, want -2147483648", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != 2147483647 {
+ t.Errorf("0 %s -2147483647 = %d, want 2147483647", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s -1 = %d, want 1", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -1 {
+ t.Errorf("0 %s 1 = %d, want -1", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != -2147483647 {
+ t.Errorf("0 %s 2147483647 = %d, want -2147483647", "-", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x - y
+ if r != -2147483647 {
+ t.Errorf("1 %s -2147483648 = %d, want -2147483647", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("1 %s -2147483647 = %d, want -2147483648", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s -1 = %d, want 2", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != -2147483646 {
+ t.Errorf("1 %s 2147483647 = %d, want -2147483646", "-", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x - y
+ if r != -1 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want -1", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != -2 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want -2", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("2147483647 %s -1 = %d, want -2147483648", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 2147483646 {
+ t.Errorf("2147483647 %s 1 = %d, want 2147483646", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != 0 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want 0", "-", r)
+ }
+}
+func TestConstFoldint32div(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x / y
+ if r != 1 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 1", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 1 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s -1 = %d, want -2147483648", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 1 = %d, want -2147483648", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "/", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 1 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("-2147483647 %s -1 = %d, want 2147483647", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 1 = %d, want -2147483647", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want -1", "/", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -2147483647 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 2147483647 = %d, want 0", "/", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -2147483647 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 2147483647 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -2147483647 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 2147483647 = %d, want 0", "/", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != -1 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want -1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("2147483647 %s -1 = %d, want -2147483647", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 1 = %d, want 2147483647", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != 1 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want 1", "/", r)
+ }
+}
+func TestConstFoldint32mul(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x * y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want -2147483648", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s -1 = %d, want -2147483648", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 1 = %d, want -2147483648", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want -2147483648", "*", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want -2147483648", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != 1 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 2147483647 {
+ t.Errorf("-2147483647 %s -1 = %d, want 2147483647", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 1 = %d, want -2147483647", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want -1", "*", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-1 %s -2147483648 = %d, want -2147483648", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != 2147483647 {
+ t.Errorf("-1 %s -2147483647 = %d, want 2147483647", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != -2147483647 {
+ t.Errorf("-1 %s 2147483647 = %d, want -2147483647", "*", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -2147483648 = %d, want 0", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -2147483647 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 2147483647 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("1 %s -2147483648 = %d, want -2147483648", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != -2147483647 {
+ t.Errorf("1 %s -2147483647 = %d, want -2147483647", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != 2147483647 {
+ t.Errorf("1 %s 2147483647 = %d, want 2147483647", "*", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want -2147483648", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != -1 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want -1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -2147483647 {
+ t.Errorf("2147483647 %s -1 = %d, want -2147483647", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("2147483647 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 1 = %d, want 2147483647", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != 1 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want 1", "*", r)
+ }
+}
+func TestConstFoldint32mod(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != -1 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "%", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x % y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want -2147483647", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483647 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want 0", "%", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -2147483648 = %d, want -1", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -2147483647 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 2147483647 = %d, want -1", "%", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -2147483648 = %d, want 0", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -2147483647 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 2147483647 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -2147483648 = %d, want 1", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -2147483647 = %d, want 1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 2147483647 = %d, want 1", "%", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x % y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want 2147483647", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("2147483647 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("2147483647 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want 0", "%", r)
+ }
+}
+func TestConstFolduint16add(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 65535
+ r = x + y
+ if r != 65535 {
+ t.Errorf("0 %s 65535 = %d, want 65535", "+", r)
+ }
+ x = 1
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 65535
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "+", r)
+ }
+ x = 65535
+ y = 0
+ r = x + y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("65535 %s 1 = %d, want 0", "+", r)
+ }
+ y = 65535
+ r = x + y
+ if r != 65534 {
+ t.Errorf("65535 %s 65535 = %d, want 65534", "+", r)
+ }
+}
+func TestConstFolduint16sub(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 65535 {
+ t.Errorf("0 %s 1 = %d, want 65535", "-", r)
+ }
+ y = 65535
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s 65535 = %d, want 1", "-", r)
+ }
+ x = 1
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 65535
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s 65535 = %d, want 2", "-", r)
+ }
+ x = 65535
+ y = 0
+ r = x - y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "-", r)
+ }
+ y = 65535
+ r = x - y
+ if r != 0 {
+ t.Errorf("65535 %s 65535 = %d, want 0", "-", r)
+ }
+}
+func TestConstFolduint16div(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 65535
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 65535
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "/", r)
+ }
+ x = 65535
+ y = 1
+ r = x / y
+ if r != 65535 {
+ t.Errorf("65535 %s 1 = %d, want 65535", "/", r)
+ }
+ y = 65535
+ r = x / y
+ if r != 1 {
+ t.Errorf("65535 %s 65535 = %d, want 1", "/", r)
+ }
+}
+func TestConstFolduint16mul(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 65535
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 65535
+ r = x * y
+ if r != 65535 {
+ t.Errorf("1 %s 65535 = %d, want 65535", "*", r)
+ }
+ x = 65535
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("65535 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 65535 {
+ t.Errorf("65535 %s 1 = %d, want 65535", "*", r)
+ }
+ y = 65535
+ r = x * y
+ if r != 1 {
+ t.Errorf("65535 %s 65535 = %d, want 1", "*", r)
+ }
+}
+func TestConstFolduint16mod(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 65535
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 65535
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 65535 = %d, want 1", "%", r)
+ }
+ x = 65535
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("65535 %s 1 = %d, want 0", "%", r)
+ }
+ y = 65535
+ r = x % y
+ if r != 0 {
+ t.Errorf("65535 %s 65535 = %d, want 0", "%", r)
+ }
+}
+func TestConstFoldint16add(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x + y
+ if r != 0 {
+ t.Errorf("-32768 %s -32768 = %d, want 0", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != 1 {
+ t.Errorf("-32768 %s -32767 = %d, want 1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 32767 {
+ t.Errorf("-32768 %s -1 = %d, want 32767", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -32767 {
+ t.Errorf("-32768 %s 1 = %d, want -32767", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != -2 {
+ t.Errorf("-32768 %s 32766 = %d, want -2", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != -1 {
+ t.Errorf("-32768 %s 32767 = %d, want -1", "+", r)
+ }
+ x = -32767
+ y = -32768
+ r = x + y
+ if r != 1 {
+ t.Errorf("-32767 %s -32768 = %d, want 1", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != 2 {
+ t.Errorf("-32767 %s -32767 = %d, want 2", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -32768 {
+ t.Errorf("-32767 %s -1 = %d, want -32768", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -32766 {
+ t.Errorf("-32767 %s 1 = %d, want -32766", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != -1 {
+ t.Errorf("-32767 %s 32766 = %d, want -1", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != 0 {
+ t.Errorf("-32767 %s 32767 = %d, want 0", "+", r)
+ }
+ x = -1
+ y = -32768
+ r = x + y
+ if r != 32767 {
+ t.Errorf("-1 %s -32768 = %d, want 32767", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != -32768 {
+ t.Errorf("-1 %s -32767 = %d, want -32768", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2 {
+ t.Errorf("-1 %s -1 = %d, want -2", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != 32765 {
+ t.Errorf("-1 %s 32766 = %d, want 32765", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != 32766 {
+ t.Errorf("-1 %s 32767 = %d, want 32766", "+", r)
+ }
+ x = 0
+ y = -32768
+ r = x + y
+ if r != -32768 {
+ t.Errorf("0 %s -32768 = %d, want -32768", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != -32767 {
+ t.Errorf("0 %s -32767 = %d, want -32767", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -1 {
+ t.Errorf("0 %s -1 = %d, want -1", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != 32766 {
+ t.Errorf("0 %s 32766 = %d, want 32766", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != 32767 {
+ t.Errorf("0 %s 32767 = %d, want 32767", "+", r)
+ }
+ x = 1
+ y = -32768
+ r = x + y
+ if r != -32767 {
+ t.Errorf("1 %s -32768 = %d, want -32767", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != -32766 {
+ t.Errorf("1 %s -32767 = %d, want -32766", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != 32767 {
+ t.Errorf("1 %s 32766 = %d, want 32767", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != -32768 {
+ t.Errorf("1 %s 32767 = %d, want -32768", "+", r)
+ }
+ x = 32766
+ y = -32768
+ r = x + y
+ if r != -2 {
+ t.Errorf("32766 %s -32768 = %d, want -2", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != -1 {
+ t.Errorf("32766 %s -32767 = %d, want -1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 32765 {
+ t.Errorf("32766 %s -1 = %d, want 32765", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 32767 {
+ t.Errorf("32766 %s 1 = %d, want 32767", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != -4 {
+ t.Errorf("32766 %s 32766 = %d, want -4", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != -3 {
+ t.Errorf("32766 %s 32767 = %d, want -3", "+", r)
+ }
+ x = 32767
+ y = -32768
+ r = x + y
+ if r != -1 {
+ t.Errorf("32767 %s -32768 = %d, want -1", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != 0 {
+ t.Errorf("32767 %s -32767 = %d, want 0", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 32766 {
+ t.Errorf("32767 %s -1 = %d, want 32766", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -32768 {
+ t.Errorf("32767 %s 1 = %d, want -32768", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != -3 {
+ t.Errorf("32767 %s 32766 = %d, want -3", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != -2 {
+ t.Errorf("32767 %s 32767 = %d, want -2", "+", r)
+ }
+}
+func TestConstFoldint16sub(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x - y
+ if r != 0 {
+ t.Errorf("-32768 %s -32768 = %d, want 0", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != -1 {
+ t.Errorf("-32768 %s -32767 = %d, want -1", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -32767 {
+ t.Errorf("-32768 %s -1 = %d, want -32767", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 32767 {
+ t.Errorf("-32768 %s 1 = %d, want 32767", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != 2 {
+ t.Errorf("-32768 %s 32766 = %d, want 2", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != 1 {
+ t.Errorf("-32768 %s 32767 = %d, want 1", "-", r)
+ }
+ x = -32767
+ y = -32768
+ r = x - y
+ if r != 1 {
+ t.Errorf("-32767 %s -32768 = %d, want 1", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != 0 {
+ t.Errorf("-32767 %s -32767 = %d, want 0", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -32766 {
+ t.Errorf("-32767 %s -1 = %d, want -32766", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -32768 {
+ t.Errorf("-32767 %s 1 = %d, want -32768", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != 3 {
+ t.Errorf("-32767 %s 32766 = %d, want 3", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != 2 {
+ t.Errorf("-32767 %s 32767 = %d, want 2", "-", r)
+ }
+ x = -1
+ y = -32768
+ r = x - y
+ if r != 32767 {
+ t.Errorf("-1 %s -32768 = %d, want 32767", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != 32766 {
+ t.Errorf("-1 %s -32767 = %d, want 32766", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != -32767 {
+ t.Errorf("-1 %s 32766 = %d, want -32767", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != -32768 {
+ t.Errorf("-1 %s 32767 = %d, want -32768", "-", r)
+ }
+ x = 0
+ y = -32768
+ r = x - y
+ if r != -32768 {
+ t.Errorf("0 %s -32768 = %d, want -32768", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != 32767 {
+ t.Errorf("0 %s -32767 = %d, want 32767", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s -1 = %d, want 1", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -1 {
+ t.Errorf("0 %s 1 = %d, want -1", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != -32766 {
+ t.Errorf("0 %s 32766 = %d, want -32766", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != -32767 {
+ t.Errorf("0 %s 32767 = %d, want -32767", "-", r)
+ }
+ x = 1
+ y = -32768
+ r = x - y
+ if r != -32767 {
+ t.Errorf("1 %s -32768 = %d, want -32767", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != -32768 {
+ t.Errorf("1 %s -32767 = %d, want -32768", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s -1 = %d, want 2", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != -32765 {
+ t.Errorf("1 %s 32766 = %d, want -32765", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != -32766 {
+ t.Errorf("1 %s 32767 = %d, want -32766", "-", r)
+ }
+ x = 32766
+ y = -32768
+ r = x - y
+ if r != -2 {
+ t.Errorf("32766 %s -32768 = %d, want -2", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != -3 {
+ t.Errorf("32766 %s -32767 = %d, want -3", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 32767 {
+ t.Errorf("32766 %s -1 = %d, want 32767", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 32765 {
+ t.Errorf("32766 %s 1 = %d, want 32765", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != 0 {
+ t.Errorf("32766 %s 32766 = %d, want 0", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != -1 {
+ t.Errorf("32766 %s 32767 = %d, want -1", "-", r)
+ }
+ x = 32767
+ y = -32768
+ r = x - y
+ if r != -1 {
+ t.Errorf("32767 %s -32768 = %d, want -1", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != -2 {
+ t.Errorf("32767 %s -32767 = %d, want -2", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -32768 {
+ t.Errorf("32767 %s -1 = %d, want -32768", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 32766 {
+ t.Errorf("32767 %s 1 = %d, want 32766", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != 1 {
+ t.Errorf("32767 %s 32766 = %d, want 1", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != 0 {
+ t.Errorf("32767 %s 32767 = %d, want 0", "-", r)
+ }
+}
+func TestConstFoldint16div(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x / y
+ if r != 1 {
+ t.Errorf("-32768 %s -32768 = %d, want 1", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 1 {
+ t.Errorf("-32768 %s -32767 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -32768 {
+ t.Errorf("-32768 %s -1 = %d, want -32768", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -32768 {
+ t.Errorf("-32768 %s 1 = %d, want -32768", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != -1 {
+ t.Errorf("-32768 %s 32766 = %d, want -1", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != -1 {
+ t.Errorf("-32768 %s 32767 = %d, want -1", "/", r)
+ }
+ x = -32767
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("-32767 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 1 {
+ t.Errorf("-32767 %s -32767 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 32767 {
+ t.Errorf("-32767 %s -1 = %d, want 32767", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -32767 {
+ t.Errorf("-32767 %s 1 = %d, want -32767", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != -1 {
+ t.Errorf("-32767 %s 32766 = %d, want -1", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != -1 {
+ t.Errorf("-32767 %s 32767 = %d, want -1", "/", r)
+ }
+ x = -1
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -32767 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 32766 = %d, want 0", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 32767 = %d, want 0", "/", r)
+ }
+ x = 0
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -32767 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 32766 = %d, want 0", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 32767 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -32767 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 32766 = %d, want 0", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 32767 = %d, want 0", "/", r)
+ }
+ x = 32766
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("32766 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("32766 %s -32767 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -32766 {
+ t.Errorf("32766 %s -1 = %d, want -32766", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 32766 {
+ t.Errorf("32766 %s 1 = %d, want 32766", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 1 {
+ t.Errorf("32766 %s 32766 = %d, want 1", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("32766 %s 32767 = %d, want 0", "/", r)
+ }
+ x = 32767
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("32767 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != -1 {
+ t.Errorf("32767 %s -32767 = %d, want -1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -32767 {
+ t.Errorf("32767 %s -1 = %d, want -32767", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 32767 {
+ t.Errorf("32767 %s 1 = %d, want 32767", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 1 {
+ t.Errorf("32767 %s 32766 = %d, want 1", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 1 {
+ t.Errorf("32767 %s 32767 = %d, want 1", "/", r)
+ }
+}
+func TestConstFoldint16mul(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x * y
+ if r != 0 {
+ t.Errorf("-32768 %s -32768 = %d, want 0", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32768 %s -32767 = %d, want -32768", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32768 %s -1 = %d, want -32768", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-32768 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32768 %s 1 = %d, want -32768", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 0 {
+ t.Errorf("-32768 %s 32766 = %d, want 0", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32768 %s 32767 = %d, want -32768", "*", r)
+ }
+ x = -32767
+ y = -32768
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32767 %s -32768 = %d, want -32768", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != 1 {
+ t.Errorf("-32767 %s -32767 = %d, want 1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 32767 {
+ t.Errorf("-32767 %s -1 = %d, want 32767", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-32767 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -32767 {
+ t.Errorf("-32767 %s 1 = %d, want -32767", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 32766 {
+ t.Errorf("-32767 %s 32766 = %d, want 32766", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != -1 {
+ t.Errorf("-32767 %s 32767 = %d, want -1", "*", r)
+ }
+ x = -1
+ y = -32768
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-1 %s -32768 = %d, want -32768", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != 32767 {
+ t.Errorf("-1 %s -32767 = %d, want 32767", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != -32766 {
+ t.Errorf("-1 %s 32766 = %d, want -32766", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != -32767 {
+ t.Errorf("-1 %s 32767 = %d, want -32767", "*", r)
+ }
+ x = 0
+ y = -32768
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -32768 = %d, want 0", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -32767 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 32766 = %d, want 0", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 32767 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = -32768
+ r = x * y
+ if r != -32768 {
+ t.Errorf("1 %s -32768 = %d, want -32768", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != -32767 {
+ t.Errorf("1 %s -32767 = %d, want -32767", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 32766 {
+ t.Errorf("1 %s 32766 = %d, want 32766", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != 32767 {
+ t.Errorf("1 %s 32767 = %d, want 32767", "*", r)
+ }
+ x = 32766
+ y = -32768
+ r = x * y
+ if r != 0 {
+ t.Errorf("32766 %s -32768 = %d, want 0", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != 32766 {
+ t.Errorf("32766 %s -32767 = %d, want 32766", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -32766 {
+ t.Errorf("32766 %s -1 = %d, want -32766", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("32766 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 32766 {
+ t.Errorf("32766 %s 1 = %d, want 32766", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 4 {
+ t.Errorf("32766 %s 32766 = %d, want 4", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != -32766 {
+ t.Errorf("32766 %s 32767 = %d, want -32766", "*", r)
+ }
+ x = 32767
+ y = -32768
+ r = x * y
+ if r != -32768 {
+ t.Errorf("32767 %s -32768 = %d, want -32768", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != -1 {
+ t.Errorf("32767 %s -32767 = %d, want -1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -32767 {
+ t.Errorf("32767 %s -1 = %d, want -32767", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("32767 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 32767 {
+ t.Errorf("32767 %s 1 = %d, want 32767", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != -32766 {
+ t.Errorf("32767 %s 32766 = %d, want -32766", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != 1 {
+ t.Errorf("32767 %s 32767 = %d, want 1", "*", r)
+ }
+}
+func TestConstFoldint16mod(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32768 %s -32768 = %d, want 0", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != -1 {
+ t.Errorf("-32768 %s -32767 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32768 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != -2 {
+ t.Errorf("-32768 %s 32766 = %d, want -2", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != -1 {
+ t.Errorf("-32768 %s 32767 = %d, want -1", "%", r)
+ }
+ x = -32767
+ y = -32768
+ r = x % y
+ if r != -32767 {
+ t.Errorf("-32767 %s -32768 = %d, want -32767", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32767 %s -32767 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32767 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32767 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != -1 {
+ t.Errorf("-32767 %s 32766 = %d, want -1", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32767 %s 32767 = %d, want 0", "%", r)
+ }
+ x = -1
+ y = -32768
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -32768 = %d, want -1", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -32767 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 32766 = %d, want -1", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 32767 = %d, want -1", "%", r)
+ }
+ x = 0
+ y = -32768
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -32768 = %d, want 0", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -32767 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 32766 = %d, want 0", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 32767 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = -32768
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -32768 = %d, want 1", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -32767 = %d, want 1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 32766 = %d, want 1", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 32767 = %d, want 1", "%", r)
+ }
+ x = 32766
+ y = -32768
+ r = x % y
+ if r != 32766 {
+ t.Errorf("32766 %s -32768 = %d, want 32766", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 32766 {
+ t.Errorf("32766 %s -32767 = %d, want 32766", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("32766 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("32766 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != 0 {
+ t.Errorf("32766 %s 32766 = %d, want 0", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 32766 {
+ t.Errorf("32766 %s 32767 = %d, want 32766", "%", r)
+ }
+ x = 32767
+ y = -32768
+ r = x % y
+ if r != 32767 {
+ t.Errorf("32767 %s -32768 = %d, want 32767", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("32767 %s -32767 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("32767 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("32767 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != 1 {
+ t.Errorf("32767 %s 32766 = %d, want 1", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("32767 %s 32767 = %d, want 0", "%", r)
+ }
+}
+func TestConstFolduint8add(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 255
+ r = x + y
+ if r != 255 {
+ t.Errorf("0 %s 255 = %d, want 255", "+", r)
+ }
+ x = 1
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 255
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "+", r)
+ }
+ x = 255
+ y = 0
+ r = x + y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("255 %s 1 = %d, want 0", "+", r)
+ }
+ y = 255
+ r = x + y
+ if r != 254 {
+ t.Errorf("255 %s 255 = %d, want 254", "+", r)
+ }
+}
+func TestConstFolduint8sub(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 255 {
+ t.Errorf("0 %s 1 = %d, want 255", "-", r)
+ }
+ y = 255
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s 255 = %d, want 1", "-", r)
+ }
+ x = 1
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 255
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s 255 = %d, want 2", "-", r)
+ }
+ x = 255
+ y = 0
+ r = x - y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "-", r)
+ }
+ y = 255
+ r = x - y
+ if r != 0 {
+ t.Errorf("255 %s 255 = %d, want 0", "-", r)
+ }
+}
+func TestConstFolduint8div(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 255
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 255
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "/", r)
+ }
+ x = 255
+ y = 1
+ r = x / y
+ if r != 255 {
+ t.Errorf("255 %s 1 = %d, want 255", "/", r)
+ }
+ y = 255
+ r = x / y
+ if r != 1 {
+ t.Errorf("255 %s 255 = %d, want 1", "/", r)
+ }
+}
+func TestConstFolduint8mul(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 255
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 255
+ r = x * y
+ if r != 255 {
+ t.Errorf("1 %s 255 = %d, want 255", "*", r)
+ }
+ x = 255
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("255 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 255 {
+ t.Errorf("255 %s 1 = %d, want 255", "*", r)
+ }
+ y = 255
+ r = x * y
+ if r != 1 {
+ t.Errorf("255 %s 255 = %d, want 1", "*", r)
+ }
+}
+func TestConstFolduint8mod(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 255
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 255
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 255 = %d, want 1", "%", r)
+ }
+ x = 255
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("255 %s 1 = %d, want 0", "%", r)
+ }
+ y = 255
+ r = x % y
+ if r != 0 {
+ t.Errorf("255 %s 255 = %d, want 0", "%", r)
+ }
+}
+func TestConstFoldint8add(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x + y
+ if r != 0 {
+ t.Errorf("-128 %s -128 = %d, want 0", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != 1 {
+ t.Errorf("-128 %s -127 = %d, want 1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 127 {
+ t.Errorf("-128 %s -1 = %d, want 127", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -127 {
+ t.Errorf("-128 %s 1 = %d, want -127", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != -2 {
+ t.Errorf("-128 %s 126 = %d, want -2", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != -1 {
+ t.Errorf("-128 %s 127 = %d, want -1", "+", r)
+ }
+ x = -127
+ y = -128
+ r = x + y
+ if r != 1 {
+ t.Errorf("-127 %s -128 = %d, want 1", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != 2 {
+ t.Errorf("-127 %s -127 = %d, want 2", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -128 {
+ t.Errorf("-127 %s -1 = %d, want -128", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -126 {
+ t.Errorf("-127 %s 1 = %d, want -126", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != -1 {
+ t.Errorf("-127 %s 126 = %d, want -1", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != 0 {
+ t.Errorf("-127 %s 127 = %d, want 0", "+", r)
+ }
+ x = -1
+ y = -128
+ r = x + y
+ if r != 127 {
+ t.Errorf("-1 %s -128 = %d, want 127", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != -128 {
+ t.Errorf("-1 %s -127 = %d, want -128", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2 {
+ t.Errorf("-1 %s -1 = %d, want -2", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != 125 {
+ t.Errorf("-1 %s 126 = %d, want 125", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != 126 {
+ t.Errorf("-1 %s 127 = %d, want 126", "+", r)
+ }
+ x = 0
+ y = -128
+ r = x + y
+ if r != -128 {
+ t.Errorf("0 %s -128 = %d, want -128", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != -127 {
+ t.Errorf("0 %s -127 = %d, want -127", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -1 {
+ t.Errorf("0 %s -1 = %d, want -1", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != 126 {
+ t.Errorf("0 %s 126 = %d, want 126", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != 127 {
+ t.Errorf("0 %s 127 = %d, want 127", "+", r)
+ }
+ x = 1
+ y = -128
+ r = x + y
+ if r != -127 {
+ t.Errorf("1 %s -128 = %d, want -127", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != -126 {
+ t.Errorf("1 %s -127 = %d, want -126", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != 127 {
+ t.Errorf("1 %s 126 = %d, want 127", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != -128 {
+ t.Errorf("1 %s 127 = %d, want -128", "+", r)
+ }
+ x = 126
+ y = -128
+ r = x + y
+ if r != -2 {
+ t.Errorf("126 %s -128 = %d, want -2", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != -1 {
+ t.Errorf("126 %s -127 = %d, want -1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 125 {
+ t.Errorf("126 %s -1 = %d, want 125", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 127 {
+ t.Errorf("126 %s 1 = %d, want 127", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != -4 {
+ t.Errorf("126 %s 126 = %d, want -4", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != -3 {
+ t.Errorf("126 %s 127 = %d, want -3", "+", r)
+ }
+ x = 127
+ y = -128
+ r = x + y
+ if r != -1 {
+ t.Errorf("127 %s -128 = %d, want -1", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != 0 {
+ t.Errorf("127 %s -127 = %d, want 0", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 126 {
+ t.Errorf("127 %s -1 = %d, want 126", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -128 {
+ t.Errorf("127 %s 1 = %d, want -128", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != -3 {
+ t.Errorf("127 %s 126 = %d, want -3", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != -2 {
+ t.Errorf("127 %s 127 = %d, want -2", "+", r)
+ }
+}
+func TestConstFoldint8sub(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x - y
+ if r != 0 {
+ t.Errorf("-128 %s -128 = %d, want 0", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != -1 {
+ t.Errorf("-128 %s -127 = %d, want -1", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -127 {
+ t.Errorf("-128 %s -1 = %d, want -127", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 127 {
+ t.Errorf("-128 %s 1 = %d, want 127", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != 2 {
+ t.Errorf("-128 %s 126 = %d, want 2", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != 1 {
+ t.Errorf("-128 %s 127 = %d, want 1", "-", r)
+ }
+ x = -127
+ y = -128
+ r = x - y
+ if r != 1 {
+ t.Errorf("-127 %s -128 = %d, want 1", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != 0 {
+ t.Errorf("-127 %s -127 = %d, want 0", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -126 {
+ t.Errorf("-127 %s -1 = %d, want -126", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -128 {
+ t.Errorf("-127 %s 1 = %d, want -128", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != 3 {
+ t.Errorf("-127 %s 126 = %d, want 3", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != 2 {
+ t.Errorf("-127 %s 127 = %d, want 2", "-", r)
+ }
+ x = -1
+ y = -128
+ r = x - y
+ if r != 127 {
+ t.Errorf("-1 %s -128 = %d, want 127", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != 126 {
+ t.Errorf("-1 %s -127 = %d, want 126", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != -127 {
+ t.Errorf("-1 %s 126 = %d, want -127", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != -128 {
+ t.Errorf("-1 %s 127 = %d, want -128", "-", r)
+ }
+ x = 0
+ y = -128
+ r = x - y
+ if r != -128 {
+ t.Errorf("0 %s -128 = %d, want -128", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != 127 {
+ t.Errorf("0 %s -127 = %d, want 127", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s -1 = %d, want 1", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -1 {
+ t.Errorf("0 %s 1 = %d, want -1", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != -126 {
+ t.Errorf("0 %s 126 = %d, want -126", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != -127 {
+ t.Errorf("0 %s 127 = %d, want -127", "-", r)
+ }
+ x = 1
+ y = -128
+ r = x - y
+ if r != -127 {
+ t.Errorf("1 %s -128 = %d, want -127", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != -128 {
+ t.Errorf("1 %s -127 = %d, want -128", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s -1 = %d, want 2", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != -125 {
+ t.Errorf("1 %s 126 = %d, want -125", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != -126 {
+ t.Errorf("1 %s 127 = %d, want -126", "-", r)
+ }
+ x = 126
+ y = -128
+ r = x - y
+ if r != -2 {
+ t.Errorf("126 %s -128 = %d, want -2", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != -3 {
+ t.Errorf("126 %s -127 = %d, want -3", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 127 {
+ t.Errorf("126 %s -1 = %d, want 127", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 125 {
+ t.Errorf("126 %s 1 = %d, want 125", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != 0 {
+ t.Errorf("126 %s 126 = %d, want 0", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != -1 {
+ t.Errorf("126 %s 127 = %d, want -1", "-", r)
+ }
+ x = 127
+ y = -128
+ r = x - y
+ if r != -1 {
+ t.Errorf("127 %s -128 = %d, want -1", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != -2 {
+ t.Errorf("127 %s -127 = %d, want -2", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -128 {
+ t.Errorf("127 %s -1 = %d, want -128", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 126 {
+ t.Errorf("127 %s 1 = %d, want 126", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != 1 {
+ t.Errorf("127 %s 126 = %d, want 1", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != 0 {
+ t.Errorf("127 %s 127 = %d, want 0", "-", r)
+ }
+}
+func TestConstFoldint8div(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x / y
+ if r != 1 {
+ t.Errorf("-128 %s -128 = %d, want 1", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 1 {
+ t.Errorf("-128 %s -127 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -128 {
+ t.Errorf("-128 %s -1 = %d, want -128", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -128 {
+ t.Errorf("-128 %s 1 = %d, want -128", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != -1 {
+ t.Errorf("-128 %s 126 = %d, want -1", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != -1 {
+ t.Errorf("-128 %s 127 = %d, want -1", "/", r)
+ }
+ x = -127
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("-127 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 1 {
+ t.Errorf("-127 %s -127 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 127 {
+ t.Errorf("-127 %s -1 = %d, want 127", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -127 {
+ t.Errorf("-127 %s 1 = %d, want -127", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != -1 {
+ t.Errorf("-127 %s 126 = %d, want -1", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != -1 {
+ t.Errorf("-127 %s 127 = %d, want -1", "/", r)
+ }
+ x = -1
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -127 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 126 = %d, want 0", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 127 = %d, want 0", "/", r)
+ }
+ x = 0
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -127 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 126 = %d, want 0", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 127 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -127 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 126 = %d, want 0", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 127 = %d, want 0", "/", r)
+ }
+ x = 126
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("126 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 0 {
+ t.Errorf("126 %s -127 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -126 {
+ t.Errorf("126 %s -1 = %d, want -126", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 126 {
+ t.Errorf("126 %s 1 = %d, want 126", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 1 {
+ t.Errorf("126 %s 126 = %d, want 1", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 0 {
+ t.Errorf("126 %s 127 = %d, want 0", "/", r)
+ }
+ x = 127
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("127 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != -1 {
+ t.Errorf("127 %s -127 = %d, want -1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -127 {
+ t.Errorf("127 %s -1 = %d, want -127", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 127 {
+ t.Errorf("127 %s 1 = %d, want 127", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 1 {
+ t.Errorf("127 %s 126 = %d, want 1", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 1 {
+ t.Errorf("127 %s 127 = %d, want 1", "/", r)
+ }
+}
+func TestConstFoldint8mul(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x * y
+ if r != 0 {
+ t.Errorf("-128 %s -128 = %d, want 0", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != -128 {
+ t.Errorf("-128 %s -127 = %d, want -128", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -128 {
+ t.Errorf("-128 %s -1 = %d, want -128", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-128 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -128 {
+ t.Errorf("-128 %s 1 = %d, want -128", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 0 {
+ t.Errorf("-128 %s 126 = %d, want 0", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != -128 {
+ t.Errorf("-128 %s 127 = %d, want -128", "*", r)
+ }
+ x = -127
+ y = -128
+ r = x * y
+ if r != -128 {
+ t.Errorf("-127 %s -128 = %d, want -128", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != 1 {
+ t.Errorf("-127 %s -127 = %d, want 1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 127 {
+ t.Errorf("-127 %s -1 = %d, want 127", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-127 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -127 {
+ t.Errorf("-127 %s 1 = %d, want -127", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 126 {
+ t.Errorf("-127 %s 126 = %d, want 126", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != -1 {
+ t.Errorf("-127 %s 127 = %d, want -1", "*", r)
+ }
+ x = -1
+ y = -128
+ r = x * y
+ if r != -128 {
+ t.Errorf("-1 %s -128 = %d, want -128", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != 127 {
+ t.Errorf("-1 %s -127 = %d, want 127", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != -126 {
+ t.Errorf("-1 %s 126 = %d, want -126", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != -127 {
+ t.Errorf("-1 %s 127 = %d, want -127", "*", r)
+ }
+ x = 0
+ y = -128
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -128 = %d, want 0", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -127 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 126 = %d, want 0", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 127 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = -128
+ r = x * y
+ if r != -128 {
+ t.Errorf("1 %s -128 = %d, want -128", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != -127 {
+ t.Errorf("1 %s -127 = %d, want -127", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 126 {
+ t.Errorf("1 %s 126 = %d, want 126", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != 127 {
+ t.Errorf("1 %s 127 = %d, want 127", "*", r)
+ }
+ x = 126
+ y = -128
+ r = x * y
+ if r != 0 {
+ t.Errorf("126 %s -128 = %d, want 0", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != 126 {
+ t.Errorf("126 %s -127 = %d, want 126", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -126 {
+ t.Errorf("126 %s -1 = %d, want -126", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("126 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 126 {
+ t.Errorf("126 %s 1 = %d, want 126", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 4 {
+ t.Errorf("126 %s 126 = %d, want 4", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != -126 {
+ t.Errorf("126 %s 127 = %d, want -126", "*", r)
+ }
+ x = 127
+ y = -128
+ r = x * y
+ if r != -128 {
+ t.Errorf("127 %s -128 = %d, want -128", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != -1 {
+ t.Errorf("127 %s -127 = %d, want -1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -127 {
+ t.Errorf("127 %s -1 = %d, want -127", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("127 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 127 {
+ t.Errorf("127 %s 1 = %d, want 127", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != -126 {
+ t.Errorf("127 %s 126 = %d, want -126", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != 1 {
+ t.Errorf("127 %s 127 = %d, want 1", "*", r)
+ }
+}
+func TestConstFoldint8mod(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x % y
+ if r != 0 {
+ t.Errorf("-128 %s -128 = %d, want 0", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != -1 {
+ t.Errorf("-128 %s -127 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-128 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != -2 {
+ t.Errorf("-128 %s 126 = %d, want -2", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != -1 {
+ t.Errorf("-128 %s 127 = %d, want -1", "%", r)
+ }
+ x = -127
+ y = -128
+ r = x % y
+ if r != -127 {
+ t.Errorf("-127 %s -128 = %d, want -127", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 0 {
+ t.Errorf("-127 %s -127 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-127 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-127 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != -1 {
+ t.Errorf("-127 %s 126 = %d, want -1", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 0 {
+ t.Errorf("-127 %s 127 = %d, want 0", "%", r)
+ }
+ x = -1
+ y = -128
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -128 = %d, want -1", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -127 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 126 = %d, want -1", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 127 = %d, want -1", "%", r)
+ }
+ x = 0
+ y = -128
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -128 = %d, want 0", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -127 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 126 = %d, want 0", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 127 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = -128
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -128 = %d, want 1", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -127 = %d, want 1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 126 = %d, want 1", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 127 = %d, want 1", "%", r)
+ }
+ x = 126
+ y = -128
+ r = x % y
+ if r != 126 {
+ t.Errorf("126 %s -128 = %d, want 126", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 126 {
+ t.Errorf("126 %s -127 = %d, want 126", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("126 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("126 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != 0 {
+ t.Errorf("126 %s 126 = %d, want 0", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 126 {
+ t.Errorf("126 %s 127 = %d, want 126", "%", r)
+ }
+ x = 127
+ y = -128
+ r = x % y
+ if r != 127 {
+ t.Errorf("127 %s -128 = %d, want 127", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 0 {
+ t.Errorf("127 %s -127 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("127 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("127 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != 1 {
+ t.Errorf("127 %s 126 = %d, want 1", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 0 {
+ t.Errorf("127 %s 127 = %d, want 0", "%", r)
+ }
+}
+func TestConstFolduint64uint64lsh(t *testing.T) {
+ var x, r uint64
+ var y uint64
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x << y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint64uint64rsh(t *testing.T) {
+ var x, r uint64
+ var y uint64
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x >> y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint64uint32lsh(t *testing.T) {
+ var x, r uint64
+ var y uint32
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x << y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint64uint32rsh(t *testing.T) {
+ var x, r uint64
+ var y uint32
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x >> y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint64uint16lsh(t *testing.T) {
+ var x, r uint64
+ var y uint16
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x << y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint64uint16rsh(t *testing.T) {
+ var x, r uint64
+ var y uint16
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x >> y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint64uint8lsh(t *testing.T) {
+ var x, r uint64
+ var y uint8
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x << y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint64uint8rsh(t *testing.T) {
+ var x, r uint64
+ var y uint8
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x >> y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint64uint64lsh(t *testing.T) {
+ var x, r int64
+ var y uint64
+ x = -9223372036854775808
+ y = 0
+ r = x << y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x << y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x << y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x << y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x << y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint64uint64rsh(t *testing.T) {
+ var x, r int64
+ var y uint64
+ x = -9223372036854775808
+ y = 0
+ r = x >> y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x >> y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x >> y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x >> y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint64uint32lsh(t *testing.T) {
+ var x, r int64
+ var y uint32
+ x = -9223372036854775808
+ y = 0
+ r = x << y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x << y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x << y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x << y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x << y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint64uint32rsh(t *testing.T) {
+ var x, r int64
+ var y uint32
+ x = -9223372036854775808
+ y = 0
+ r = x >> y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x >> y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x >> y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x >> y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint64uint16lsh(t *testing.T) {
+ var x, r int64
+ var y uint16
+ x = -9223372036854775808
+ y = 0
+ r = x << y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x << y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x << y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x << y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x << y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint64uint16rsh(t *testing.T) {
+ var x, r int64
+ var y uint16
+ x = -9223372036854775808
+ y = 0
+ r = x >> y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x >> y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x >> y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x >> y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint64uint8lsh(t *testing.T) {
+ var x, r int64
+ var y uint8
+ x = -9223372036854775808
+ y = 0
+ r = x << y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x << y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x << y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x << y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x << y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint64uint8rsh(t *testing.T) {
+ var x, r int64
+ var y uint8
+ x = -9223372036854775808
+ y = 0
+ r = x >> y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x >> y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x >> y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x >> y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint32uint64lsh(t *testing.T) {
+ var x, r uint32
+ var y uint64
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x << y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint32uint64rsh(t *testing.T) {
+ var x, r uint32
+ var y uint64
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x >> y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint32uint32lsh(t *testing.T) {
+ var x, r uint32
+ var y uint32
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x << y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint32uint32rsh(t *testing.T) {
+ var x, r uint32
+ var y uint32
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x >> y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint32uint16lsh(t *testing.T) {
+ var x, r uint32
+ var y uint16
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x << y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint32uint16rsh(t *testing.T) {
+ var x, r uint32
+ var y uint16
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x >> y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint32uint8lsh(t *testing.T) {
+ var x, r uint32
+ var y uint8
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x << y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint32uint8rsh(t *testing.T) {
+ var x, r uint32
+ var y uint8
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x >> y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint32uint64lsh(t *testing.T) {
+ var x, r int32
+ var y uint64
+ x = -2147483648
+ y = 0
+ r = x << y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x << y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x << y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint32uint64rsh(t *testing.T) {
+ var x, r int32
+ var y uint64
+ x = -2147483648
+ y = 0
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x >> y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 1073741823 {
+ t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint32uint32lsh(t *testing.T) {
+ var x, r int32
+ var y uint32
+ x = -2147483648
+ y = 0
+ r = x << y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x << y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x << y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint32uint32rsh(t *testing.T) {
+ var x, r int32
+ var y uint32
+ x = -2147483648
+ y = 0
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x >> y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 1073741823 {
+ t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint32uint16lsh(t *testing.T) {
+ var x, r int32
+ var y uint16
+ x = -2147483648
+ y = 0
+ r = x << y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x << y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x << y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint32uint16rsh(t *testing.T) {
+ var x, r int32
+ var y uint16
+ x = -2147483648
+ y = 0
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x >> y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 1073741823 {
+ t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint32uint8lsh(t *testing.T) {
+ var x, r int32
+ var y uint8
+ x = -2147483648
+ y = 0
+ r = x << y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x << y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x << y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint32uint8rsh(t *testing.T) {
+ var x, r int32
+ var y uint8
+ x = -2147483648
+ y = 0
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x >> y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 1073741823 {
+ t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint16uint64lsh(t *testing.T) {
+ var x, r uint16
+ var y uint64
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 65535
+ y = 0
+ r = x << y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint16uint64rsh(t *testing.T) {
+ var x, r uint16
+ var y uint64
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 65535
+ y = 0
+ r = x >> y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint16uint32lsh(t *testing.T) {
+ var x, r uint16
+ var y uint32
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 65535
+ y = 0
+ r = x << y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint16uint32rsh(t *testing.T) {
+ var x, r uint16
+ var y uint32
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 65535
+ y = 0
+ r = x >> y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint16uint16lsh(t *testing.T) {
+ var x, r uint16
+ var y uint16
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 65535
+ y = 0
+ r = x << y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint16uint16rsh(t *testing.T) {
+ var x, r uint16
+ var y uint16
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 65535
+ y = 0
+ r = x >> y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint16uint8lsh(t *testing.T) {
+ var x, r uint16
+ var y uint8
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 65535
+ y = 0
+ r = x << y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint16uint8rsh(t *testing.T) {
+ var x, r uint16
+ var y uint8
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 65535
+ y = 0
+ r = x >> y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint16uint64lsh(t *testing.T) {
+ var x, r int16
+ var y uint64
+ x = -32768
+ y = 0
+ r = x << y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -32767
+ y = 0
+ r = x << y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 32766
+ y = 0
+ r = x << y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 32767
+ y = 0
+ r = x << y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint16uint64rsh(t *testing.T) {
+ var x, r int16
+ var y uint64
+ x = -32768
+ y = 0
+ r = x >> y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -32767
+ y = 0
+ r = x >> y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 32766
+ y = 0
+ r = x >> y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 32767
+ y = 0
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint16uint32lsh(t *testing.T) {
+ var x, r int16
+ var y uint32
+ x = -32768
+ y = 0
+ r = x << y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -32767
+ y = 0
+ r = x << y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 32766
+ y = 0
+ r = x << y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 32767
+ y = 0
+ r = x << y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint16uint32rsh(t *testing.T) {
+ var x, r int16
+ var y uint32
+ x = -32768
+ y = 0
+ r = x >> y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -32767
+ y = 0
+ r = x >> y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 32766
+ y = 0
+ r = x >> y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 32767
+ y = 0
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint16uint16lsh(t *testing.T) {
+ var x, r int16
+ var y uint16
+ x = -32768
+ y = 0
+ r = x << y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -32767
+ y = 0
+ r = x << y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 32766
+ y = 0
+ r = x << y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 32767
+ y = 0
+ r = x << y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint16uint16rsh(t *testing.T) {
+ var x, r int16
+ var y uint16
+ x = -32768
+ y = 0
+ r = x >> y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -32767
+ y = 0
+ r = x >> y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 32766
+ y = 0
+ r = x >> y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 32767
+ y = 0
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint16uint8lsh(t *testing.T) {
+ var x, r int16
+ var y uint8
+ x = -32768
+ y = 0
+ r = x << y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -32767
+ y = 0
+ r = x << y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 32766
+ y = 0
+ r = x << y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 32767
+ y = 0
+ r = x << y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint16uint8rsh(t *testing.T) {
+ var x, r int16
+ var y uint8
+ x = -32768
+ y = 0
+ r = x >> y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -32767
+ y = 0
+ r = x >> y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 32766
+ y = 0
+ r = x >> y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 32767
+ y = 0
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint8uint64lsh(t *testing.T) {
+ var x, r uint8
+ var y uint64
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 255
+ y = 0
+ r = x << y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint8uint64rsh(t *testing.T) {
+ var x, r uint8
+ var y uint64
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 255
+ y = 0
+ r = x >> y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 127 {
+ t.Errorf("255 %s 1 = %d, want 127", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint8uint32lsh(t *testing.T) {
+ var x, r uint8
+ var y uint32
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 255
+ y = 0
+ r = x << y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint8uint32rsh(t *testing.T) {
+ var x, r uint8
+ var y uint32
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 255
+ y = 0
+ r = x >> y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 127 {
+ t.Errorf("255 %s 1 = %d, want 127", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint8uint16lsh(t *testing.T) {
+ var x, r uint8
+ var y uint16
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 255
+ y = 0
+ r = x << y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint8uint16rsh(t *testing.T) {
+ var x, r uint8
+ var y uint16
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 255
+ y = 0
+ r = x >> y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 127 {
+ t.Errorf("255 %s 1 = %d, want 127", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint8uint8lsh(t *testing.T) {
+ var x, r uint8
+ var y uint8
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 255
+ y = 0
+ r = x << y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint8uint8rsh(t *testing.T) {
+ var x, r uint8
+ var y uint8
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 255
+ y = 0
+ r = x >> y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 127 {
+ t.Errorf("255 %s 1 = %d, want 127", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint8uint64lsh(t *testing.T) {
+ var x, r int8
+ var y uint64
+ x = -128
+ y = 0
+ r = x << y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -127
+ y = 0
+ r = x << y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 126
+ y = 0
+ r = x << y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("126 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 127
+ y = 0
+ r = x << y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("127 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint8uint64rsh(t *testing.T) {
+ var x, r int8
+ var y uint64
+ x = -128
+ y = 0
+ r = x >> y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -127
+ y = 0
+ r = x >> y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 126
+ y = 0
+ r = x >> y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("126 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 127
+ y = 0
+ r = x >> y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("127 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint8uint32lsh(t *testing.T) {
+ var x, r int8
+ var y uint32
+ x = -128
+ y = 0
+ r = x << y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -127
+ y = 0
+ r = x << y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 126
+ y = 0
+ r = x << y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("126 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 127
+ y = 0
+ r = x << y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("127 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint8uint32rsh(t *testing.T) {
+ var x, r int8
+ var y uint32
+ x = -128
+ y = 0
+ r = x >> y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -127
+ y = 0
+ r = x >> y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 126
+ y = 0
+ r = x >> y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("126 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 127
+ y = 0
+ r = x >> y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("127 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint8uint16lsh(t *testing.T) {
+ var x, r int8
+ var y uint16
+ x = -128
+ y = 0
+ r = x << y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -127
+ y = 0
+ r = x << y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 126
+ y = 0
+ r = x << y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("126 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 127
+ y = 0
+ r = x << y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("127 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint8uint16rsh(t *testing.T) {
+ var x, r int8
+ var y uint16
+ x = -128
+ y = 0
+ r = x >> y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -127
+ y = 0
+ r = x >> y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 126
+ y = 0
+ r = x >> y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("126 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 127
+ y = 0
+ r = x >> y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("127 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint8uint8lsh(t *testing.T) {
+ var x, r int8
+ var y uint8
+ x = -128
+ y = 0
+ r = x << y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -127
+ y = 0
+ r = x << y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 126
+ y = 0
+ r = x << y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("126 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 127
+ y = 0
+ r = x << y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("127 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint8uint8rsh(t *testing.T) {
+ var x, r int8
+ var y uint8
+ x = -128
+ y = 0
+ r = x >> y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -127
+ y = 0
+ r = x >> y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 126
+ y = 0
+ r = x >> y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("126 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 127
+ y = 0
+ r = x >> y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("127 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldCompareuint64(t *testing.T) {
+ {
+ var x uint64 = 0
+ var y uint64 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 0
+ var y uint64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 0
+ var y uint64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 0
+ var y uint64 = 18446744073709551615
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 1
+ var y uint64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 1
+ var y uint64 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 1
+ var y uint64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 1
+ var y uint64 = 18446744073709551615
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 4294967296
+ var y uint64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 4294967296
+ var y uint64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 4294967296
+ var y uint64 = 4294967296
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 4294967296
+ var y uint64 = 18446744073709551615
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 18446744073709551615
+ var y uint64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 18446744073709551615
+ var y uint64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 18446744073709551615
+ var y uint64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 18446744073709551615
+ var y uint64 = 18446744073709551615
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareint64(t *testing.T) {
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = -9223372036854775808
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = -9223372036854775807
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = -4294967296
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = -1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 4294967296
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 9223372036854775806
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 9223372036854775807
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareuint32(t *testing.T) {
+ {
+ var x uint32 = 0
+ var y uint32 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 0
+ var y uint32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint32 = 0
+ var y uint32 = 4294967295
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint32 = 1
+ var y uint32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 1
+ var y uint32 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 1
+ var y uint32 = 4294967295
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint32 = 4294967295
+ var y uint32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 4294967295
+ var y uint32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 4294967295
+ var y uint32 = 4294967295
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareint32(t *testing.T) {
+ {
+ var x int32 = -2147483648
+ var y int32 = -2147483648
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = -2147483647
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = -1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = 2147483647
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareuint16(t *testing.T) {
+ {
+ var x uint16 = 0
+ var y uint16 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 0
+ var y uint16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint16 = 0
+ var y uint16 = 65535
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint16 = 1
+ var y uint16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 1
+ var y uint16 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 1
+ var y uint16 = 65535
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint16 = 65535
+ var y uint16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 65535
+ var y uint16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 65535
+ var y uint16 = 65535
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareint16(t *testing.T) {
+ {
+ var x int16 = -32768
+ var y int16 = -32768
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = -32767
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = -1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = 32766
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = 32767
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareuint8(t *testing.T) {
+ {
+ var x uint8 = 0
+ var y uint8 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 0
+ var y uint8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint8 = 0
+ var y uint8 = 255
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint8 = 1
+ var y uint8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 1
+ var y uint8 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 1
+ var y uint8 = 255
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint8 = 255
+ var y uint8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 255
+ var y uint8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 255
+ var y uint8 = 255
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareint8(t *testing.T) {
+ {
+ var x int8 = -128
+ var y int8 = -128
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = -127
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = -1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = 126
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = 127
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go
new file mode 100644
index 0000000..6e90eb4
--- /dev/null
+++ b/src/cmd/compile/internal/gc/dcl.go
@@ -0,0 +1,1185 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+ "strings"
+)
+
+// Declaration stack & operations
+
+var externdcl []*Node
+
+func testdclstack() {
+ if !types.IsDclstackValid() {
+ if nerrors != 0 {
+ errorexit()
+ }
+ Fatalf("mark left on the dclstack")
+ }
+}
+
+// redeclare emits a diagnostic about symbol s being redeclared at pos.
+func redeclare(pos src.XPos, s *types.Sym, where string) {
+ if !s.Lastlineno.IsKnown() {
+ pkg := s.Origpkg
+ if pkg == nil {
+ pkg = s.Pkg
+ }
+ yyerrorl(pos, "%v redeclared %s\n"+
+ "\tprevious declaration during import %q", s, where, pkg.Path)
+ } else {
+ prevPos := s.Lastlineno
+
+ // When an import and a declaration collide in separate files,
+ // present the import as the "redeclared", because the declaration
+ // is visible where the import is, but not vice versa.
+ // See issue 4510.
+ if s.Def == nil {
+ pos, prevPos = prevPos, pos
+ }
+
+ yyerrorl(pos, "%v redeclared %s\n"+
+ "\tprevious declaration at %v", s, where, linestr(prevPos))
+ }
+}
+
+var vargen int
+
+// declare individual names - var, typ, const
+
+var declare_typegen int
+
+// declare records that Node n declares symbol n.Sym in the specified
+// declaration context.
+func declare(n *Node, ctxt Class) {
+ if n.isBlank() {
+ return
+ }
+
+ if n.Name == nil {
+ // named OLITERAL needs Name; most OLITERALs don't.
+ n.Name = new(Name)
+ }
+
+ s := n.Sym
+
+ // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
+ if !inimport && !typecheckok && s.Pkg != localpkg {
+ yyerrorl(n.Pos, "cannot declare name %v", s)
+ }
+
+ gen := 0
+ if ctxt == PEXTERN {
+ if s.Name == "init" {
+ yyerrorl(n.Pos, "cannot declare init - must be func")
+ }
+ if s.Name == "main" && s.Pkg.Name == "main" {
+ yyerrorl(n.Pos, "cannot declare main - must be func")
+ }
+ externdcl = append(externdcl, n)
+ } else {
+ if Curfn == nil && ctxt == PAUTO {
+ lineno = n.Pos
+ Fatalf("automatic outside function")
+ }
+ if Curfn != nil && ctxt != PFUNC {
+ Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ }
+ if n.Op == OTYPE {
+ declare_typegen++
+ gen = declare_typegen
+ } else if n.Op == ONAME && ctxt == PAUTO && !strings.Contains(s.Name, "·") {
+ vargen++
+ gen = vargen
+ }
+ types.Pushdcl(s)
+ n.Name.Curfn = Curfn
+ }
+
+ if ctxt == PAUTO {
+ n.Xoffset = 0
+ }
+
+ if s.Block == types.Block {
+ // functype will print errors about duplicate function arguments.
+ // Don't repeat the error here.
+ if ctxt != PPARAM && ctxt != PPARAMOUT {
+ redeclare(n.Pos, s, "in this block")
+ }
+ }
+
+ s.Block = types.Block
+ s.Lastlineno = lineno
+ s.Def = asTypesNode(n)
+ n.Name.Vargen = int32(gen)
+ n.SetClass(ctxt)
+ if ctxt == PFUNC {
+ n.Sym.SetFunc(true)
+ }
+
+ autoexport(n, ctxt)
+}
+
+func addvar(n *Node, t *types.Type, ctxt Class) {
+ if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil {
+ Fatalf("addvar: n=%v t=%v nil", n, t)
+ }
+
+ n.Op = ONAME
+ declare(n, ctxt)
+ n.Type = t
+}
+
+// declare variables from grammar
+// new_name_list (type | [type] = expr_list)
+func variter(vl []*Node, t *Node, el []*Node) []*Node {
+ var init []*Node
+ doexpr := len(el) > 0
+
+ if len(el) == 1 && len(vl) > 1 {
+ e := el[0]
+ as2 := nod(OAS2, nil, nil)
+ as2.List.Set(vl)
+ as2.Rlist.Set1(e)
+ for _, v := range vl {
+ v.Op = ONAME
+ declare(v, dclcontext)
+ v.Name.Param.Ntype = t
+ v.Name.Defn = as2
+ if Curfn != nil {
+ init = append(init, nod(ODCL, v, nil))
+ }
+ }
+
+ return append(init, as2)
+ }
+
+ nel := len(el)
+ for _, v := range vl {
+ var e *Node
+ if doexpr {
+ if len(el) == 0 {
+ yyerror("assignment mismatch: %d variables but %d values", len(vl), nel)
+ break
+ }
+ e = el[0]
+ el = el[1:]
+ }
+
+ v.Op = ONAME
+ declare(v, dclcontext)
+ v.Name.Param.Ntype = t
+
+ if e != nil || Curfn != nil || v.isBlank() {
+ if Curfn != nil {
+ init = append(init, nod(ODCL, v, nil))
+ }
+ e = nod(OAS, v, e)
+ init = append(init, e)
+ if e.Right != nil {
+ v.Name.Defn = e
+ }
+ }
+ }
+
+ if len(el) != 0 {
+ yyerror("assignment mismatch: %d variables but %d values", len(vl), nel)
+ }
+ return init
+}
+
+// newnoname returns a new ONONAME Node associated with symbol s.
+func newnoname(s *types.Sym) *Node {
+ if s == nil {
+ Fatalf("newnoname nil")
+ }
+ n := nod(ONONAME, nil, nil)
+ n.Sym = s
+ n.Xoffset = 0
+ return n
+}
+
+// newfuncnamel generates a new name node for a function or method.
+// TODO(rsc): Use an ODCLFUNC node instead. See comment in CL 7360.
+func newfuncnamel(pos src.XPos, s *types.Sym) *Node {
+ n := newnamel(pos, s)
+ n.Func = new(Func)
+ n.Func.SetIsHiddenClosure(Curfn != nil)
+ return n
+}
+
+// this generates a new name node for a name
+// being declared.
+func dclname(s *types.Sym) *Node {
+ n := newname(s)
+ n.Op = ONONAME // caller will correct it
+ return n
+}
+
+func typenod(t *types.Type) *Node {
+ return typenodl(src.NoXPos, t)
+}
+
+func typenodl(pos src.XPos, t *types.Type) *Node {
+ // if we copied another type with *t = *u
+ // then t->nod might be out of date, so
+ // check t->nod->type too
+ if asNode(t.Nod) == nil || asNode(t.Nod).Type != t {
+ t.Nod = asTypesNode(nodl(pos, OTYPE, nil, nil))
+ asNode(t.Nod).Type = t
+ asNode(t.Nod).Sym = t.Sym
+ }
+
+ return asNode(t.Nod)
+}
+
+func anonfield(typ *types.Type) *Node {
+ return symfield(nil, typ)
+}
+
+func namedfield(s string, typ *types.Type) *Node {
+ return symfield(lookup(s), typ)
+}
+
+func symfield(s *types.Sym, typ *types.Type) *Node {
+ n := nodSym(ODCLFIELD, nil, s)
+ n.Type = typ
+ return n
+}
+
+// oldname returns the Node that declares symbol s in the current scope.
+// If no such Node currently exists, an ONONAME Node is returned instead.
+// Automatically creates a new closure variable if the referenced symbol was
+// declared in a different (containing) function.
+func oldname(s *types.Sym) *Node {
+ n := asNode(s.Def)
+ if n == nil {
+ // Maybe a top-level declaration will come along later to
+ // define s. resolve will check s.Def again once all input
+ // source has been processed.
+ return newnoname(s)
+ }
+
+ if Curfn != nil && n.Op == ONAME && n.Name.Curfn != nil && n.Name.Curfn != Curfn {
+ // Inner func is referring to var in outer func.
+ //
+ // TODO(rsc): If there is an outer variable x and we
+ // are parsing x := 5 inside the closure, until we get to
+ // the := it looks like a reference to the outer x so we'll
+ // make x a closure variable unnecessarily.
+ c := n.Name.Param.Innermost
+ if c == nil || c.Name.Curfn != Curfn {
+ // Do not have a closure var for the active closure yet; make one.
+ c = newname(s)
+ c.SetClass(PAUTOHEAP)
+ c.Name.SetIsClosureVar(true)
+ c.SetIsDDD(n.IsDDD())
+ c.Name.Defn = n
+
+ // Link into list of active closure variables.
+ // Popped from list in func funcLit.
+ c.Name.Param.Outer = n.Name.Param.Innermost
+ n.Name.Param.Innermost = c
+
+ Curfn.Func.Cvars.Append(c)
+ }
+
+ // return ref to closure var, not original
+ return c
+ }
+
+ return n
+}
+
+// importName is like oldname, but it reports an error if sym is from another package and not exported.
+func importName(sym *types.Sym) *Node {
+ n := oldname(sym)
+ if !types.IsExported(sym.Name) && sym.Pkg != localpkg {
+ n.SetDiag(true)
+ yyerror("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
+ }
+ return n
+}
+
+// := declarations
+func colasname(n *Node) bool {
+ switch n.Op {
+ case ONAME,
+ ONONAME,
+ OPACK,
+ OTYPE,
+ OLITERAL:
+ return n.Sym != nil
+ }
+
+ return false
+}
+
+func colasdefn(left []*Node, defn *Node) {
+ for _, n := range left {
+ if n.Sym != nil {
+ n.Sym.SetUniq(true)
+ }
+ }
+
+ var nnew, nerr int
+ for i, n := range left {
+ if n.isBlank() {
+ continue
+ }
+ if !colasname(n) {
+ yyerrorl(defn.Pos, "non-name %v on left side of :=", n)
+ nerr++
+ continue
+ }
+
+ if !n.Sym.Uniq() {
+ yyerrorl(defn.Pos, "%v repeated on left side of :=", n.Sym)
+ n.SetDiag(true)
+ nerr++
+ continue
+ }
+
+ n.Sym.SetUniq(false)
+ if n.Sym.Block == types.Block {
+ continue
+ }
+
+ nnew++
+ n = newname(n.Sym)
+ declare(n, dclcontext)
+ n.Name.Defn = defn
+ defn.Ninit.Append(nod(ODCL, n, nil))
+ left[i] = n
+ }
+
+ if nnew == 0 && nerr == 0 {
+ yyerrorl(defn.Pos, "no new variables on left side of :=")
+ }
+}
+
+// declare the arguments in an
+// interface field declaration.
+func ifacedcl(n *Node) {
+ if n.Op != ODCLFIELD || n.Left == nil {
+ Fatalf("ifacedcl")
+ }
+
+ if n.Sym.IsBlank() {
+ yyerror("methods must have a unique non-blank name")
+ }
+}
+
+// declare the function proper
+// and declare the arguments.
+// called in extern-declaration context
+// returns in auto-declaration context.
+func funchdr(n *Node) {
+ // change the declaration context from extern to auto
+ funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext})
+ Curfn = n
+ dclcontext = PAUTO
+
+ types.Markdcl()
+
+ if n.Func.Nname != nil {
+ funcargs(n.Func.Nname.Name.Param.Ntype)
+ } else if n.Func.Ntype != nil {
+ funcargs(n.Func.Ntype)
+ } else {
+ funcargs2(n.Type)
+ }
+}
+
+func funcargs(nt *Node) {
+ if nt.Op != OTFUNC {
+ Fatalf("funcargs %v", nt.Op)
+ }
+
+ // re-start the variable generation number
+ // we want to use small numbers for the return variables,
+ // so let them have the chunk starting at 1.
+ //
+ // TODO(mdempsky): This is ugly, and only necessary because
+ // esc.go uses Vargen to figure out result parameters' index
+ // within the result tuple.
+ vargen = nt.Rlist.Len()
+
+ // declare the receiver and in arguments.
+ if nt.Left != nil {
+ funcarg(nt.Left, PPARAM)
+ }
+ for _, n := range nt.List.Slice() {
+ funcarg(n, PPARAM)
+ }
+
+ oldvargen := vargen
+ vargen = 0
+
+ // declare the out arguments.
+ gen := nt.List.Len()
+ for _, n := range nt.Rlist.Slice() {
+ if n.Sym == nil {
+ // Name so that escape analysis can track it. ~r stands for 'result'.
+ n.Sym = lookupN("~r", gen)
+ gen++
+ }
+ if n.Sym.IsBlank() {
+ // Give it a name so we can assign to it during return. ~b stands for 'blank'.
+ // The name must be different from ~r above because if you have
+ // func f() (_ int)
+ // func g() int
+ // f is allowed to use a plain 'return' with no arguments, while g is not.
+ // So the two cases must be distinguished.
+ n.Sym = lookupN("~b", gen)
+ gen++
+ }
+
+ funcarg(n, PPARAMOUT)
+ }
+
+ vargen = oldvargen
+}
+
+func funcarg(n *Node, ctxt Class) {
+ if n.Op != ODCLFIELD {
+ Fatalf("funcarg %v", n.Op)
+ }
+ if n.Sym == nil {
+ return
+ }
+
+ n.Right = newnamel(n.Pos, n.Sym)
+ n.Right.Name.Param.Ntype = n.Left
+ n.Right.SetIsDDD(n.IsDDD())
+ declare(n.Right, ctxt)
+
+ vargen++
+ n.Right.Name.Vargen = int32(vargen)
+}
+
+// Same as funcargs, except run over an already constructed TFUNC.
+// This happens during import, where the hidden_fndcl rule has
+// used functype directly to parse the function's type.
+func funcargs2(t *types.Type) {
+ if t.Etype != TFUNC {
+ Fatalf("funcargs2 %v", t)
+ }
+
+ for _, f := range t.Recvs().Fields().Slice() {
+ funcarg2(f, PPARAM)
+ }
+ for _, f := range t.Params().Fields().Slice() {
+ funcarg2(f, PPARAM)
+ }
+ for _, f := range t.Results().Fields().Slice() {
+ funcarg2(f, PPARAMOUT)
+ }
+}
+
+func funcarg2(f *types.Field, ctxt Class) {
+ if f.Sym == nil {
+ return
+ }
+ n := newnamel(f.Pos, f.Sym)
+ f.Nname = asTypesNode(n)
+ n.Type = f.Type
+ n.SetIsDDD(f.IsDDD())
+ declare(n, ctxt)
+}
+
+var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext
+
+type funcStackEnt struct {
+ curfn *Node
+ dclcontext Class
+}
+
+// finish the body.
+// called in auto-declaration context.
+// returns in extern-declaration context.
+func funcbody() {
+ // change the declaration context from auto to previous context
+ types.Popdcl()
+ var e funcStackEnt
+ funcStack, e = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1]
+ Curfn, dclcontext = e.curfn, e.dclcontext
+}
+
+// structs, functions, and methods.
+// they don't belong here, but where do they belong?
+func checkembeddedtype(t *types.Type) {
+ if t == nil {
+ return
+ }
+
+ if t.Sym == nil && t.IsPtr() {
+ t = t.Elem()
+ if t.IsInterface() {
+ yyerror("embedded type cannot be a pointer to interface")
+ }
+ }
+
+ if t.IsPtr() || t.IsUnsafePtr() {
+ yyerror("embedded type cannot be a pointer")
+ } else if t.Etype == TFORW && !t.ForwardType().Embedlineno.IsKnown() {
+ t.ForwardType().Embedlineno = lineno
+ }
+}
+
+func structfield(n *Node) *types.Field {
+ lno := lineno
+ lineno = n.Pos
+
+ if n.Op != ODCLFIELD {
+ Fatalf("structfield: oops %v\n", n)
+ }
+
+ f := types.NewField()
+ f.Pos = n.Pos
+ f.Sym = n.Sym
+
+ if n.Left != nil {
+ n.Left = typecheck(n.Left, ctxType)
+ n.Type = n.Left.Type
+ n.Left = nil
+ }
+
+ f.Type = n.Type
+ if f.Type == nil {
+ f.SetBroke(true)
+ }
+
+ if n.Embedded() {
+ checkembeddedtype(n.Type)
+ f.Embedded = 1
+ } else {
+ f.Embedded = 0
+ }
+
+ switch u := n.Val().U.(type) {
+ case string:
+ f.Note = u
+ default:
+ yyerror("field tag must be a string")
+ case nil:
+ // no-op
+ }
+
+ lineno = lno
+ return f
+}
+
+// checkdupfields emits errors for duplicately named fields or methods in
+// a list of struct or interface types.
+func checkdupfields(what string, fss ...[]*types.Field) {
+ seen := make(map[*types.Sym]bool)
+ for _, fs := range fss {
+ for _, f := range fs {
+ if f.Sym == nil || f.Sym.IsBlank() {
+ continue
+ }
+ if seen[f.Sym] {
+ yyerrorl(f.Pos, "duplicate %s %s", what, f.Sym.Name)
+ continue
+ }
+ seen[f.Sym] = true
+ }
+ }
+}
+
+// convert a parsed id/type list into
+// a type for struct/interface/arglist
+func tostruct(l []*Node) *types.Type {
+ t := types.New(TSTRUCT)
+
+ fields := make([]*types.Field, len(l))
+ for i, n := range l {
+ f := structfield(n)
+ if f.Broke() {
+ t.SetBroke(true)
+ }
+ fields[i] = f
+ }
+ t.SetFields(fields)
+
+ checkdupfields("field", t.FieldSlice())
+
+ if !t.Broke() {
+ checkwidth(t)
+ }
+
+ return t
+}
+
+func tofunargs(l []*Node, funarg types.Funarg) *types.Type {
+ t := types.New(TSTRUCT)
+ t.StructType().Funarg = funarg
+
+ fields := make([]*types.Field, len(l))
+ for i, n := range l {
+ f := structfield(n)
+ f.SetIsDDD(n.IsDDD())
+ if n.Right != nil {
+ n.Right.Type = f.Type
+ f.Nname = asTypesNode(n.Right)
+ }
+ if f.Broke() {
+ t.SetBroke(true)
+ }
+ fields[i] = f
+ }
+ t.SetFields(fields)
+ return t
+}
+
+func tofunargsfield(fields []*types.Field, funarg types.Funarg) *types.Type {
+ t := types.New(TSTRUCT)
+ t.StructType().Funarg = funarg
+ t.SetFields(fields)
+ return t
+}
+
+func interfacefield(n *Node) *types.Field {
+ lno := lineno
+ lineno = n.Pos
+
+ if n.Op != ODCLFIELD {
+ Fatalf("interfacefield: oops %v\n", n)
+ }
+
+ if n.Val().Ctype() != CTxxx {
+ yyerror("interface method cannot have annotation")
+ }
+
+ // MethodSpec = MethodName Signature | InterfaceTypeName .
+ //
+ // If Sym != nil, then Sym is MethodName and Left is Signature.
+ // Otherwise, Left is InterfaceTypeName.
+
+ if n.Left != nil {
+ n.Left = typecheck(n.Left, ctxType)
+ n.Type = n.Left.Type
+ n.Left = nil
+ }
+
+ f := types.NewField()
+ f.Pos = n.Pos
+ f.Sym = n.Sym
+ f.Type = n.Type
+ if f.Type == nil {
+ f.SetBroke(true)
+ }
+
+ lineno = lno
+ return f
+}
+
+func tointerface(l []*Node) *types.Type {
+ if len(l) == 0 {
+ return types.Types[TINTER]
+ }
+ t := types.New(TINTER)
+ var fields []*types.Field
+ for _, n := range l {
+ f := interfacefield(n)
+ if f.Broke() {
+ t.SetBroke(true)
+ }
+ fields = append(fields, f)
+ }
+ t.SetInterface(fields)
+ return t
+}
+
+func fakeRecv() *Node {
+ return anonfield(types.FakeRecvType())
+}
+
+func fakeRecvField() *types.Field {
+ f := types.NewField()
+ f.Type = types.FakeRecvType()
+ return f
+}
+
+// isifacemethod reports whether (field) m is
+// an interface method. Such methods have the
+// special receiver type types.FakeRecvType().
+func isifacemethod(f *types.Type) bool {
+ return f.Recv().Type == types.FakeRecvType()
+}
+
+// turn a parsed function declaration into a type
+func functype(this *Node, in, out []*Node) *types.Type {
+ t := types.New(TFUNC)
+
+ var rcvr []*Node
+ if this != nil {
+ rcvr = []*Node{this}
+ }
+ t.FuncType().Receiver = tofunargs(rcvr, types.FunargRcvr)
+ t.FuncType().Params = tofunargs(in, types.FunargParams)
+ t.FuncType().Results = tofunargs(out, types.FunargResults)
+
+ checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice())
+
+ if t.Recvs().Broke() || t.Results().Broke() || t.Params().Broke() {
+ t.SetBroke(true)
+ }
+
+ t.FuncType().Outnamed = t.NumResults() > 0 && origSym(t.Results().Field(0).Sym) != nil
+
+ return t
+}
+
+func functypefield(this *types.Field, in, out []*types.Field) *types.Type {
+ t := types.New(TFUNC)
+
+ var rcvr []*types.Field
+ if this != nil {
+ rcvr = []*types.Field{this}
+ }
+ t.FuncType().Receiver = tofunargsfield(rcvr, types.FunargRcvr)
+ t.FuncType().Params = tofunargsfield(in, types.FunargParams)
+ t.FuncType().Results = tofunargsfield(out, types.FunargResults)
+
+ t.FuncType().Outnamed = t.NumResults() > 0 && origSym(t.Results().Field(0).Sym) != nil
+
+ return t
+}
+
+// origSym returns the original symbol written by the user.
+func origSym(s *types.Sym) *types.Sym {
+ if s == nil {
+ return nil
+ }
+
+ if len(s.Name) > 1 && s.Name[0] == '~' {
+ switch s.Name[1] {
+ case 'r': // originally an unnamed result
+ return nil
+ case 'b': // originally the blank identifier _
+ // TODO(mdempsky): Does s.Pkg matter here?
+ return nblank.Sym
+ }
+ return s
+ }
+
+ if strings.HasPrefix(s.Name, ".anon") {
+ // originally an unnamed or _ name (see subr.go: structargs)
+ return nil
+ }
+
+ return s
+}
+
+// methodSym returns the method symbol representing a method name
+// associated with a specific receiver type.
+//
+// Method symbols can be used to distinguish the same method appearing
+// in different method sets. For example, T.M and (*T).M have distinct
+// method symbols.
+//
+// The returned symbol will be marked as a function.
+func methodSym(recv *types.Type, msym *types.Sym) *types.Sym {
+ sym := methodSymSuffix(recv, msym, "")
+ sym.SetFunc(true)
+ return sym
+}
+
+// methodSymSuffix is like methodsym, but allows attaching a
+// distinguisher suffix. To avoid collisions, the suffix must not
+// start with a letter, number, or period.
+func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym {
+ if msym.IsBlank() {
+ Fatalf("blank method name")
+ }
+
+ rsym := recv.Sym
+ if recv.IsPtr() {
+ if rsym != nil {
+ Fatalf("declared pointer receiver type: %v", recv)
+ }
+ rsym = recv.Elem().Sym
+ }
+
+ // Find the package the receiver type appeared in. For
+ // anonymous receiver types (i.e., anonymous structs with
+ // embedded fields), use the "go" pseudo-package instead.
+ rpkg := gopkg
+ if rsym != nil {
+ rpkg = rsym.Pkg
+ }
+
+ var b bytes.Buffer
+ if recv.IsPtr() {
+ // The parentheses aren't really necessary, but
+ // they're pretty traditional at this point.
+ fmt.Fprintf(&b, "(%-S)", recv)
+ } else {
+ fmt.Fprintf(&b, "%-S", recv)
+ }
+
+ // A particular receiver type may have multiple non-exported
+ // methods with the same name. To disambiguate them, include a
+ // package qualifier for names that came from a different
+ // package than the receiver type.
+ if !types.IsExported(msym.Name) && msym.Pkg != rpkg {
+ b.WriteString(".")
+ b.WriteString(msym.Pkg.Prefix)
+ }
+
+ b.WriteString(".")
+ b.WriteString(msym.Name)
+ b.WriteString(suffix)
+
+ return rpkg.LookupBytes(b.Bytes())
+}
+
+// Add a method, declared as a function.
+// - msym is the method symbol
+// - t is function type (with receiver)
+// Returns a pointer to the existing or added Field; or nil if there's an error.
+func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
+ if msym == nil {
+ Fatalf("no method symbol")
+ }
+
+ // get parent type sym
+ rf := t.Recv() // ptr to this structure
+ if rf == nil {
+ yyerror("missing receiver")
+ return nil
+ }
+
+ mt := methtype(rf.Type)
+ if mt == nil || mt.Sym == nil {
+ pa := rf.Type
+ t := pa
+ if t != nil && t.IsPtr() {
+ if t.Sym != nil {
+ yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
+ return nil
+ }
+ t = t.Elem()
+ }
+
+ switch {
+ case t == nil || t.Broke():
+ // rely on typecheck having complained before
+ case t.Sym == nil:
+ yyerror("invalid receiver type %v (%v is not a defined type)", pa, t)
+ case t.IsPtr():
+ yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
+ case t.IsInterface():
+ yyerror("invalid receiver type %v (%v is an interface type)", pa, t)
+ default:
+ // Should have picked off all the reasons above,
+ // but just in case, fall back to generic error.
+ yyerror("invalid receiver type %v (%L / %L)", pa, pa, t)
+ }
+ return nil
+ }
+
+ if local && mt.Sym.Pkg != localpkg {
+ yyerror("cannot define new methods on non-local type %v", mt)
+ return nil
+ }
+
+ if msym.IsBlank() {
+ return nil
+ }
+
+ if mt.IsStruct() {
+ for _, f := range mt.Fields().Slice() {
+ if f.Sym == msym {
+ yyerror("type %v has both field and method named %v", mt, msym)
+ f.SetBroke(true)
+ return nil
+ }
+ }
+ }
+
+ for _, f := range mt.Methods().Slice() {
+ if msym.Name != f.Sym.Name {
+ continue
+ }
+ // types.Identical only checks that incoming and result parameters match,
+ // so explicitly check that the receiver parameters match too.
+ if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) {
+ yyerror("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
+ }
+ return f
+ }
+
+ f := types.NewField()
+ f.Pos = lineno
+ f.Sym = msym
+ f.Type = t
+ f.SetNointerface(nointerface)
+
+ mt.Methods().Append(f)
+ return f
+}
+
+func funcsymname(s *types.Sym) string {
+ return s.Name + "·f"
+}
+
+// funcsym returns s·f.
+func funcsym(s *types.Sym) *types.Sym {
+ // funcsymsmu here serves to protect not just mutations of funcsyms (below),
+ // but also the package lookup of the func sym name,
+ // since this function gets called concurrently from the backend.
+ // There are no other concurrent package lookups in the backend,
+ // except for the types package, which is protected separately.
+ // Reusing funcsymsmu to also cover this package lookup
+ // avoids a general, broader, expensive package lookup mutex.
+ // Note makefuncsym also does package look-up of func sym names,
+ // but that it is only called serially, from the front end.
+ funcsymsmu.Lock()
+ sf, existed := s.Pkg.LookupOK(funcsymname(s))
+ // Don't export s·f when compiling for dynamic linking.
+ // When dynamically linking, the necessary function
+ // symbols will be created explicitly with makefuncsym.
+ // See the makefuncsym comment for details.
+ if !Ctxt.Flag_dynlink && !existed {
+ funcsyms = append(funcsyms, s)
+ }
+ funcsymsmu.Unlock()
+ return sf
+}
+
+// makefuncsym ensures that s·f is exported.
+// It is only used with -dynlink.
+// When not compiling for dynamic linking,
+// the funcsyms are created as needed by
+// the packages that use them.
+// Normally we emit the s·f stubs as DUPOK syms,
+// but DUPOK doesn't work across shared library boundaries.
+// So instead, when dynamic linking, we only create
+// the s·f stubs in s's package.
+func makefuncsym(s *types.Sym) {
+ if !Ctxt.Flag_dynlink {
+ Fatalf("makefuncsym dynlink")
+ }
+ if s.IsBlank() {
+ return
+ }
+ if compiling_runtime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") {
+ // runtime.getg(), getclosureptr(), getcallerpc(), and
+ // getcallersp() are not real functions and so do not
+ // get funcsyms.
+ return
+ }
+ if _, existed := s.Pkg.LookupOK(funcsymname(s)); !existed {
+ funcsyms = append(funcsyms, s)
+ }
+}
+
+// setNodeNameFunc marks a node as a function.
+func setNodeNameFunc(n *Node) {
+ if n.Op != ONAME || n.Class() != Pxxx {
+ Fatalf("expected ONAME/Pxxx node, got %v", n)
+ }
+
+ n.SetClass(PFUNC)
+ n.Sym.SetFunc(true)
+}
+
+func dclfunc(sym *types.Sym, tfn *Node) *Node {
+ if tfn.Op != OTFUNC {
+ Fatalf("expected OTFUNC node, got %v", tfn)
+ }
+
+ fn := nod(ODCLFUNC, nil, nil)
+ fn.Func.Nname = newfuncnamel(lineno, sym)
+ fn.Func.Nname.Name.Defn = fn
+ fn.Func.Nname.Name.Param.Ntype = tfn
+ setNodeNameFunc(fn.Func.Nname)
+ funchdr(fn)
+ fn.Func.Nname.Name.Param.Ntype = typecheck(fn.Func.Nname.Name.Param.Ntype, ctxType)
+ return fn
+}
+
+type nowritebarrierrecChecker struct {
+ // extraCalls contains extra function calls that may not be
+ // visible during later analysis. It maps from the ODCLFUNC of
+ // the caller to a list of callees.
+ extraCalls map[*Node][]nowritebarrierrecCall
+
+ // curfn is the current function during AST walks.
+ curfn *Node
+}
+
+type nowritebarrierrecCall struct {
+ target *Node // ODCLFUNC of caller or callee
+ lineno src.XPos // line of call
+}
+
+type nowritebarrierrecCallSym struct {
+ target *obj.LSym // LSym of callee
+ lineno src.XPos // line of call
+}
+
+// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
+// must be called before transformclosure and walk.
+func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
+ c := &nowritebarrierrecChecker{
+ extraCalls: make(map[*Node][]nowritebarrierrecCall),
+ }
+
+ // Find all systemstack calls and record their targets. In
+ // general, flow analysis can't see into systemstack, but it's
+ // important to handle it for this check, so we model it
+ // directly. This has to happen before transformclosure since
+ // it's a lot harder to work out the argument after.
+ for _, n := range xtop {
+ if n.Op != ODCLFUNC {
+ continue
+ }
+ c.curfn = n
+ inspect(n, c.findExtraCalls)
+ }
+ c.curfn = nil
+ return c
+}
+
+func (c *nowritebarrierrecChecker) findExtraCalls(n *Node) bool {
+ if n.Op != OCALLFUNC {
+ return true
+ }
+ fn := n.Left
+ if fn == nil || fn.Op != ONAME || fn.Class() != PFUNC || fn.Name.Defn == nil {
+ return true
+ }
+ if !isRuntimePkg(fn.Sym.Pkg) || fn.Sym.Name != "systemstack" {
+ return true
+ }
+
+ var callee *Node
+ arg := n.List.First()
+ switch arg.Op {
+ case ONAME:
+ callee = arg.Name.Defn
+ case OCLOSURE:
+ callee = arg.Func.Closure
+ default:
+ Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
+ }
+ if callee.Op != ODCLFUNC {
+ Fatalf("expected ODCLFUNC node, got %+v", callee)
+ }
+ c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos})
+ return true
+}
+
+// recordCall records a call from ODCLFUNC node "from", to function
+// symbol "to" at position pos.
+//
+// This should be done as late as possible during compilation to
+// capture precise call graphs. The target of the call is an LSym
+// because that's all we know after we start SSA.
+//
+// This can be called concurrently for different from Nodes.
+func (c *nowritebarrierrecChecker) recordCall(from *Node, to *obj.LSym, pos src.XPos) {
+ if from.Op != ODCLFUNC {
+ Fatalf("expected ODCLFUNC, got %v", from)
+ }
+ // We record this information on the *Func so this is
+ // concurrent-safe.
+ fn := from.Func
+ if fn.nwbrCalls == nil {
+ fn.nwbrCalls = new([]nowritebarrierrecCallSym)
+ }
+ *fn.nwbrCalls = append(*fn.nwbrCalls, nowritebarrierrecCallSym{to, pos})
+}
+
+func (c *nowritebarrierrecChecker) check() {
+ // We walk the call graph as late as possible so we can
+ // capture all calls created by lowering, but this means we
+ // only get to see the obj.LSyms of calls. symToFunc lets us
+ // get back to the ODCLFUNCs.
+ symToFunc := make(map[*obj.LSym]*Node)
+ // funcs records the back-edges of the BFS call graph walk. It
+ // maps from the ODCLFUNC of each function that must not have
+ // write barriers to the call that inhibits them. Functions
+ // that are directly marked go:nowritebarrierrec are in this
+ // map with a zero-valued nowritebarrierrecCall. This also
+ // acts as the set of marks for the BFS of the call graph.
+ funcs := make(map[*Node]nowritebarrierrecCall)
+ // q is the queue of ODCLFUNC Nodes to visit in BFS order.
+ var q nodeQueue
+
+ for _, n := range xtop {
+ if n.Op != ODCLFUNC {
+ continue
+ }
+
+ symToFunc[n.Func.lsym] = n
+
+ // Make nowritebarrierrec functions BFS roots.
+ if n.Func.Pragma&Nowritebarrierrec != 0 {
+ funcs[n] = nowritebarrierrecCall{}
+ q.pushRight(n)
+ }
+ // Check go:nowritebarrier functions.
+ if n.Func.Pragma&Nowritebarrier != 0 && n.Func.WBPos.IsKnown() {
+ yyerrorl(n.Func.WBPos, "write barrier prohibited")
+ }
+ }
+
+ // Perform a BFS of the call graph from all
+ // go:nowritebarrierrec functions.
+ enqueue := func(src, target *Node, pos src.XPos) {
+ if target.Func.Pragma&Yeswritebarrierrec != 0 {
+ // Don't flow into this function.
+ return
+ }
+ if _, ok := funcs[target]; ok {
+ // Already found a path to target.
+ return
+ }
+
+ // Record the path.
+ funcs[target] = nowritebarrierrecCall{target: src, lineno: pos}
+ q.pushRight(target)
+ }
+ for !q.empty() {
+ fn := q.popLeft()
+
+ // Check fn.
+ if fn.Func.WBPos.IsKnown() {
+ var err bytes.Buffer
+ call := funcs[fn]
+ for call.target != nil {
+ fmt.Fprintf(&err, "\n\t%v: called by %v", linestr(call.lineno), call.target.Func.Nname)
+ call = funcs[call.target]
+ }
+ yyerrorl(fn.Func.WBPos, "write barrier prohibited by caller; %v%s", fn.Func.Nname, err.String())
+ continue
+ }
+
+ // Enqueue fn's calls.
+ for _, callee := range c.extraCalls[fn] {
+ enqueue(fn, callee.target, callee.lineno)
+ }
+ if fn.Func.nwbrCalls == nil {
+ continue
+ }
+ for _, callee := range *fn.Func.nwbrCalls {
+ target := symToFunc[callee.target]
+ if target != nil {
+ enqueue(fn, target, callee.lineno)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/dep_test.go b/src/cmd/compile/internal/gc/dep_test.go
new file mode 100644
index 0000000..c1dac93
--- /dev/null
+++ b/src/cmd/compile/internal/gc/dep_test.go
@@ -0,0 +1,25 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "internal/testenv"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+func TestDeps(t *testing.T) {
+ out, err := exec.Command(testenv.GoToolPath(t), "list", "-f", "{{.Deps}}", "cmd/compile/internal/gc").Output()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, dep := range strings.Fields(strings.Trim(string(out), "[]")) {
+ switch dep {
+ case "go/build", "go/token":
+ t.Errorf("undesired dependency on %q", dep)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/dump.go b/src/cmd/compile/internal/gc/dump.go
new file mode 100644
index 0000000..29eb1c1
--- /dev/null
+++ b/src/cmd/compile/internal/gc/dump.go
@@ -0,0 +1,280 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements textual dumping of arbitrary data structures
+// for debugging purposes. The code is customized for Node graphs
+// and may be used for an alternative view of the node structure.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+)
+
+// dump is like fdump but prints to stderr.
+func dump(root interface{}, filter string, depth int) {
+ fdump(os.Stderr, root, filter, depth)
+}
+
+// fdump prints the structure of a rooted data structure
+// to w by depth-first traversal of the data structure.
+//
+// The filter parameter is a regular expression. If it is
+// non-empty, only struct fields whose names match filter
+// are printed.
+//
+// The depth parameter controls how deep traversal recurses
+// before it returns (higher value means greater depth).
+// If an empty field filter is given, a good depth default value
+// is 4. A negative depth means no depth limit, which may be fine
+// for small data structures or if there is a non-empty filter.
+//
+// In the output, Node structs are identified by their Op name
+// rather than their type; struct fields with zero values or
+// non-matching field names are omitted, and "…" means recursion
+// depth has been reached or struct fields have been omitted.
+func fdump(w io.Writer, root interface{}, filter string, depth int) {
+ if root == nil {
+ fmt.Fprintln(w, "nil")
+ return
+ }
+
+ if filter == "" {
+ filter = ".*" // default
+ }
+
+ p := dumper{
+ output: w,
+ fieldrx: regexp.MustCompile(filter),
+ ptrmap: make(map[uintptr]int),
+ last: '\n', // force printing of line number on first line
+ }
+
+ p.dump(reflect.ValueOf(root), depth)
+ p.printf("\n")
+}
+
+type dumper struct {
+ output io.Writer
+ fieldrx *regexp.Regexp // field name filter
+ ptrmap map[uintptr]int // ptr -> dump line number
+ lastadr string // last address string printed (for shortening)
+
+ // output
+ indent int // current indentation level
+ last byte // last byte processed by Write
+ line int // current line number
+}
+
+var indentBytes = []byte(". ")
+
+func (p *dumper) Write(data []byte) (n int, err error) {
+ var m int
+ for i, b := range data {
+ // invariant: data[0:n] has been written
+ if b == '\n' {
+ m, err = p.output.Write(data[n : i+1])
+ n += m
+ if err != nil {
+ return
+ }
+ } else if p.last == '\n' {
+ p.line++
+ _, err = fmt.Fprintf(p.output, "%6d ", p.line)
+ if err != nil {
+ return
+ }
+ for j := p.indent; j > 0; j-- {
+ _, err = p.output.Write(indentBytes)
+ if err != nil {
+ return
+ }
+ }
+ }
+ p.last = b
+ }
+ if len(data) > n {
+ m, err = p.output.Write(data[n:])
+ n += m
+ }
+ return
+}
+
+// printf is a convenience wrapper.
+func (p *dumper) printf(format string, args ...interface{}) {
+ if _, err := fmt.Fprintf(p, format, args...); err != nil {
+ panic(err)
+ }
+}
+
+// addr returns the (hexadecimal) address string of the object
+// represented by x (or "?" if x is not addressable), with the
+// common prefix between this and the prior address replaced by
+// "0x…" to make it easier to visually match addresses.
+func (p *dumper) addr(x reflect.Value) string {
+ if !x.CanAddr() {
+ return "?"
+ }
+ adr := fmt.Sprintf("%p", x.Addr().Interface())
+ s := adr
+ if i := commonPrefixLen(p.lastadr, adr); i > 0 {
+ s = "0x…" + adr[i:]
+ }
+ p.lastadr = adr
+ return s
+}
+
+// dump prints the contents of x.
+func (p *dumper) dump(x reflect.Value, depth int) {
+ if depth == 0 {
+ p.printf("…")
+ return
+ }
+
+ // special cases
+ switch v := x.Interface().(type) {
+ case Nodes:
+ // unpack Nodes since reflect cannot look inside
+ // due to the unexported field in its struct
+ x = reflect.ValueOf(v.Slice())
+
+ case src.XPos:
+ p.printf("%s", linestr(v))
+ return
+
+ case *types.Node:
+ x = reflect.ValueOf(asNode(v))
+ }
+
+ switch x.Kind() {
+ case reflect.String:
+ p.printf("%q", x.Interface()) // print strings in quotes
+
+ case reflect.Interface:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.dump(x.Elem(), depth-1)
+
+ case reflect.Ptr:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+
+ p.printf("*")
+ ptr := x.Pointer()
+ if line, exists := p.ptrmap[ptr]; exists {
+ p.printf("(@%d)", line)
+ return
+ }
+ p.ptrmap[ptr] = p.line
+ p.dump(x.Elem(), depth) // don't count pointer indirection towards depth
+
+ case reflect.Slice:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.printf("%s (%d entries) {", x.Type(), x.Len())
+ if x.Len() > 0 {
+ p.indent++
+ p.printf("\n")
+ for i, n := 0, x.Len(); i < n; i++ {
+ p.printf("%d: ", i)
+ p.dump(x.Index(i), depth-1)
+ p.printf("\n")
+ }
+ p.indent--
+ }
+ p.printf("}")
+
+ case reflect.Struct:
+ typ := x.Type()
+
+ isNode := false
+ if n, ok := x.Interface().(Node); ok {
+ isNode = true
+ p.printf("%s %s {", n.Op.String(), p.addr(x))
+ } else {
+ p.printf("%s {", typ)
+ }
+ p.indent++
+
+ first := true
+ omitted := false
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ // Exclude non-exported fields because their
+ // values cannot be accessed via reflection.
+ if name := typ.Field(i).Name; types.IsExported(name) {
+ if !p.fieldrx.MatchString(name) {
+ omitted = true
+ continue // field name not selected by filter
+ }
+
+ // special cases
+ if isNode && name == "Op" {
+ omitted = true
+ continue // Op field already printed for Nodes
+ }
+ x := x.Field(i)
+ if isZeroVal(x) {
+ omitted = true
+ continue // exclude zero-valued fields
+ }
+ if n, ok := x.Interface().(Nodes); ok && n.Len() == 0 {
+ omitted = true
+ continue // exclude empty Nodes slices
+ }
+
+ if first {
+ p.printf("\n")
+ first = false
+ }
+ p.printf("%s: ", name)
+ p.dump(x, depth-1)
+ p.printf("\n")
+ }
+ }
+ if omitted {
+ p.printf("…\n")
+ }
+
+ p.indent--
+ p.printf("}")
+
+ default:
+ p.printf("%v", x.Interface())
+ }
+}
+
+func isZeroVal(x reflect.Value) bool {
+ switch x.Kind() {
+ case reflect.Bool:
+ return !x.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return x.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return x.Uint() == 0
+ case reflect.String:
+ return x.String() == ""
+ case reflect.Interface, reflect.Ptr, reflect.Slice:
+ return x.IsNil()
+ }
+ return false
+}
+
+func commonPrefixLen(a, b string) (i int) {
+ for i < len(a) && i < len(b) && a[i] == b[i] {
+ i++
+ }
+ return
+}
diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/gc/dwinl.go
new file mode 100644
index 0000000..31d0768
--- /dev/null
+++ b/src/cmd/compile/internal/gc/dwinl.go
@@ -0,0 +1,450 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+ "strings"
+)
+
+// To identify variables by original source position.
+type varPos struct {
+ DeclName string
+ DeclFile string
+ DeclLine uint
+ DeclCol uint
+}
+
+// This is the main entry point for collection of raw material to
+// drive generation of DWARF "inlined subroutine" DIEs. See proposal
+// 22080 for more details and background info.
+func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
+ var inlcalls dwarf.InlCalls
+
+ if Debug_gendwarfinl != 0 {
+ Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
+ }
+
+ // This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls
+ imap := make(map[int]int)
+
+ // Walk progs to build up the InlCalls data structure
+ var prevpos src.XPos
+ for p := fnsym.Func().Text; p != nil; p = p.Link {
+ if p.Pos == prevpos {
+ continue
+ }
+ ii := posInlIndex(p.Pos)
+ if ii >= 0 {
+ insertInlCall(&inlcalls, ii, imap)
+ }
+ prevpos = p.Pos
+ }
+
+ // This is used to partition DWARF vars by inline index. Vars not
+ // produced by the inliner will wind up in the vmap[0] entry.
+ vmap := make(map[int32][]*dwarf.Var)
+
+ // Now walk the dwarf vars and partition them based on whether they
+ // were produced by the inliner (dwv.InlIndex > 0) or were original
+ // vars/params from the function (dwv.InlIndex == 0).
+ for _, dwv := range dwVars {
+
+ vmap[dwv.InlIndex] = append(vmap[dwv.InlIndex], dwv)
+
+ // Zero index => var was not produced by an inline
+ if dwv.InlIndex == 0 {
+ continue
+ }
+
+ // Look up index in our map, then tack the var in question
+ // onto the vars list for the correct inlined call.
+ ii := int(dwv.InlIndex) - 1
+ idx, ok := imap[ii]
+ if !ok {
+ // We can occasionally encounter a var produced by the
+ // inliner for which there is no remaining prog; add a new
+ // entry to the call list in this scenario.
+ idx = insertInlCall(&inlcalls, ii, imap)
+ }
+ inlcalls.Calls[idx].InlVars =
+ append(inlcalls.Calls[idx].InlVars, dwv)
+ }
+
+ // Post process the map above to assign child indices to vars.
+ //
+ // A given variable is treated differently depending on whether it
+ // is part of the top-level function (ii == 0) or if it was
+ // produced as a result of an inline (ii != 0).
+ //
+ // If a variable was not produced by an inline and its containing
+ // function was not inlined, then we just assign an ordering of
+ // based on variable name.
+ //
+ // If a variable was not produced by an inline and its containing
+ // function was inlined, then we need to assign a child index
+ // based on the order of vars in the abstract function (in
+ // addition, those vars that don't appear in the abstract
+ // function, such as "~r1", are flagged as such).
+ //
+ // If a variable was produced by an inline, then we locate it in
+ // the pre-inlining decls for the target function and assign child
+ // index accordingly.
+ for ii, sl := range vmap {
+ var m map[varPos]int
+ if ii == 0 {
+ if !fnsym.WasInlined() {
+ for j, v := range sl {
+ v.ChildIndex = int32(j)
+ }
+ continue
+ }
+ m = makePreinlineDclMap(fnsym)
+ } else {
+ ifnlsym := Ctxt.InlTree.InlinedFunction(int(ii - 1))
+ m = makePreinlineDclMap(ifnlsym)
+ }
+
+ // Here we assign child indices to variables based on
+ // pre-inlined decls, and set the "IsInAbstract" flag
+ // appropriately. In addition: parameter and local variable
+ // names are given "middle dot" version numbers as part of the
+ // writing them out to export data (see issue 4326). If DWARF
+ // inlined routine generation is turned on, we want to undo
+ // this versioning, since DWARF variables in question will be
+ // parented by the inlined routine and not the top-level
+ // caller.
+ synthCount := len(m)
+ for _, v := range sl {
+ canonName := unversion(v.Name)
+ vp := varPos{
+ DeclName: canonName,
+ DeclFile: v.DeclFile,
+ DeclLine: v.DeclLine,
+ DeclCol: v.DeclCol,
+ }
+ synthesized := strings.HasPrefix(v.Name, "~r") || canonName == "_" || strings.HasPrefix(v.Name, "~b")
+ if idx, found := m[vp]; found {
+ v.ChildIndex = int32(idx)
+ v.IsInAbstract = !synthesized
+ v.Name = canonName
+ } else {
+ // Variable can't be found in the pre-inline dcl list.
+ // In the top-level case (ii=0) this can happen
+ // because a composite variable was split into pieces,
+ // and we're looking at a piece. We can also see
+ // return temps (~r%d) that were created during
+ // lowering, or unnamed params ("_").
+ v.ChildIndex = int32(synthCount)
+ synthCount++
+ }
+ }
+ }
+
+ // Make a second pass through the progs to compute PC ranges for
+ // the various inlined calls.
+ start := int64(-1)
+ curii := -1
+ var prevp *obj.Prog
+ for p := fnsym.Func().Text; p != nil; prevp, p = p, p.Link {
+ if prevp != nil && p.Pos == prevp.Pos {
+ continue
+ }
+ ii := posInlIndex(p.Pos)
+ if ii == curii {
+ continue
+ }
+ // Close out the current range
+ if start != -1 {
+ addRange(inlcalls.Calls, start, p.Pc, curii, imap)
+ }
+ // Begin new range
+ start = p.Pc
+ curii = ii
+ }
+ if start != -1 {
+ addRange(inlcalls.Calls, start, fnsym.Size, curii, imap)
+ }
+
+ // Issue 33188: if II foo is a child of II bar, then ensure that
+ // bar's ranges include the ranges of foo (the loop above will produce
+ // disjoint ranges).
+ for k, c := range inlcalls.Calls {
+ if c.Root {
+ unifyCallRanges(inlcalls, k)
+ }
+ }
+
+ // Debugging
+ if Debug_gendwarfinl != 0 {
+ dumpInlCalls(inlcalls)
+ dumpInlVars(dwVars)
+ }
+
+ // Perform a consistency check on inlined routine PC ranges
+ // produced by unifyCallRanges above. In particular, complain in
+ // cases where you have A -> B -> C (e.g. C is inlined into B, and
+ // B is inlined into A) and the ranges for B are not enclosed
+ // within the ranges for A, or C within B.
+ for k, c := range inlcalls.Calls {
+ if c.Root {
+ checkInlCall(fnsym.Name, inlcalls, fnsym.Size, k, -1)
+ }
+ }
+
+ return inlcalls
+}
+
+// Secondary hook for DWARF inlined subroutine generation. This is called
+// late in the compilation when it is determined that we need an
+// abstract function DIE for an inlined routine imported from a
+// previously compiled package.
+func genAbstractFunc(fn *obj.LSym) {
+ ifn := Ctxt.DwFixups.GetPrecursorFunc(fn)
+ if ifn == nil {
+ Ctxt.Diag("failed to locate precursor fn for %v", fn)
+ return
+ }
+ if Debug_gendwarfinl != 0 {
+ Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
+ }
+ Ctxt.DwarfAbstractFunc(ifn, fn, myimportpath)
+}
+
+// Undo any versioning performed when a name was written
+// out as part of export data.
+func unversion(name string) string {
+ if i := strings.Index(name, "·"); i > 0 {
+ name = name[:i]
+ }
+ return name
+}
+
+// Given a function that was inlined as part of the compilation, dig
+// up the pre-inlining DCL list for the function and create a map that
+// supports lookup of pre-inline dcl index, based on variable
+// position/name. NB: the recipe for computing variable pos/file/line
+// needs to be kept in sync with the similar code in gc.createSimpleVars
+// and related functions.
+func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int {
+ dcl := preInliningDcls(fnsym)
+ m := make(map[varPos]int)
+ for i, n := range dcl {
+ pos := Ctxt.InnermostPos(n.Pos)
+ vp := varPos{
+ DeclName: unversion(n.Sym.Name),
+ DeclFile: pos.RelFilename(),
+ DeclLine: pos.RelLine(),
+ DeclCol: pos.Col(),
+ }
+ if _, found := m[vp]; found {
+ // We can see collisions (variables with the same name/file/line/col) in obfuscated or machine-generated code -- see issue 44378 for an example. Skip duplicates in such cases, since it is unlikely that a human will be debugging such code.
+ continue
+ }
+ m[vp] = i
+ }
+ return m
+}
+
+func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
+ callIdx, found := imap[inlIdx]
+ if found {
+ return callIdx
+ }
+
+ // Haven't seen this inline yet. Visit parent of inline if there
+ // is one. We do this first so that parents appear before their
+ // children in the resulting table.
+ parCallIdx := -1
+ parInlIdx := Ctxt.InlTree.Parent(inlIdx)
+ if parInlIdx >= 0 {
+ parCallIdx = insertInlCall(dwcalls, parInlIdx, imap)
+ }
+
+ // Create new entry for this inline
+ inlinedFn := Ctxt.InlTree.InlinedFunction(inlIdx)
+ callXPos := Ctxt.InlTree.CallPos(inlIdx)
+ absFnSym := Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
+ pb := Ctxt.PosTable.Pos(callXPos).Base()
+ callFileSym := Ctxt.Lookup(pb.SymFilename())
+ ic := dwarf.InlCall{
+ InlIndex: inlIdx,
+ CallFile: callFileSym,
+ CallLine: uint32(callXPos.Line()),
+ AbsFunSym: absFnSym,
+ Root: parCallIdx == -1,
+ }
+ dwcalls.Calls = append(dwcalls.Calls, ic)
+ callIdx = len(dwcalls.Calls) - 1
+ imap[inlIdx] = callIdx
+
+ if parCallIdx != -1 {
+ // Add this inline to parent's child list
+ dwcalls.Calls[parCallIdx].Children = append(dwcalls.Calls[parCallIdx].Children, callIdx)
+ }
+
+ return callIdx
+}
+
+// Given a src.XPos, return its associated inlining index if it
+// corresponds to something created as a result of an inline, or -1 if
+// there is no inline info. Note that the index returned will refer to
+// the deepest call in the inlined stack, e.g. if you have "A calls B
+// calls C calls D" and all three callees are inlined (B, C, and D),
+// the index for a node from the inlined body of D will refer to the
+// call to D from C. Whew.
+func posInlIndex(xpos src.XPos) int {
+ pos := Ctxt.PosTable.Pos(xpos)
+ if b := pos.Base(); b != nil {
+ ii := b.InliningIndex()
+ if ii >= 0 {
+ return ii
+ }
+ }
+ return -1
+}
+
+func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int) {
+ if start == -1 {
+ panic("bad range start")
+ }
+ if end == -1 {
+ panic("bad range end")
+ }
+ if ii == -1 {
+ return
+ }
+ if start == end {
+ return
+ }
+ // Append range to correct inlined call
+ callIdx, found := imap[ii]
+ if !found {
+ Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
+ }
+ call := &calls[callIdx]
+ call.Ranges = append(call.Ranges, dwarf.Range{Start: start, End: end})
+}
+
+func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) {
+ for i := 0; i < ilevel; i++ {
+ Ctxt.Logf(" ")
+ }
+ ic := inlcalls.Calls[idx]
+ callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex)
+ Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
+ for _, f := range ic.InlVars {
+ Ctxt.Logf(" %v", f.Name)
+ }
+ Ctxt.Logf(" ) C: (")
+ for _, k := range ic.Children {
+ Ctxt.Logf(" %v", k)
+ }
+ Ctxt.Logf(" ) R:")
+ for _, r := range ic.Ranges {
+ Ctxt.Logf(" [%d,%d)", r.Start, r.End)
+ }
+ Ctxt.Logf("\n")
+ for _, k := range ic.Children {
+ dumpInlCall(inlcalls, k, ilevel+1)
+ }
+
+}
+
+func dumpInlCalls(inlcalls dwarf.InlCalls) {
+ for k, c := range inlcalls.Calls {
+ if c.Root {
+ dumpInlCall(inlcalls, k, 0)
+ }
+ }
+}
+
+func dumpInlVars(dwvars []*dwarf.Var) {
+ for i, dwv := range dwvars {
+ typ := "local"
+ if dwv.Abbrev == dwarf.DW_ABRV_PARAM_LOCLIST || dwv.Abbrev == dwarf.DW_ABRV_PARAM {
+ typ = "param"
+ }
+ ia := 0
+ if dwv.IsInAbstract {
+ ia = 1
+ }
+ Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
+ }
+}
+
+func rangesContains(par []dwarf.Range, rng dwarf.Range) (bool, string) {
+ for _, r := range par {
+ if rng.Start >= r.Start && rng.End <= r.End {
+ return true, ""
+ }
+ }
+ msg := fmt.Sprintf("range [%d,%d) not contained in {", rng.Start, rng.End)
+ for _, r := range par {
+ msg += fmt.Sprintf(" [%d,%d)", r.Start, r.End)
+ }
+ msg += " }"
+ return false, msg
+}
+
+func rangesContainsAll(parent, child []dwarf.Range) (bool, string) {
+ for _, r := range child {
+ c, m := rangesContains(parent, r)
+ if !c {
+ return false, m
+ }
+ }
+ return true, ""
+}
+
+// checkInlCall verifies that the PC ranges for inline info 'idx' are
+// enclosed/contained within the ranges of its parent inline (or if
+// this is a root/toplevel inline, checks that the ranges fall within
+// the extent of the top level function). A panic is issued if a
+// malformed range is found.
+func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx, parentIdx int) {
+
+ // Callee
+ ic := inlCalls.Calls[idx]
+ callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
+ calleeRanges := ic.Ranges
+
+ // Caller
+ caller := funcName
+ parentRanges := []dwarf.Range{dwarf.Range{Start: int64(0), End: funcSize}}
+ if parentIdx != -1 {
+ pic := inlCalls.Calls[parentIdx]
+ caller = Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
+ parentRanges = pic.Ranges
+ }
+
+ // Callee ranges contained in caller ranges?
+ c, m := rangesContainsAll(parentRanges, calleeRanges)
+ if !c {
+ Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
+ }
+
+ // Now visit kids
+ for _, k := range ic.Children {
+ checkInlCall(funcName, inlCalls, funcSize, k, idx)
+ }
+}
+
+// unifyCallRanges ensures that the ranges for a given inline
+// transitively include all of the ranges for its child inlines.
+func unifyCallRanges(inlcalls dwarf.InlCalls, idx int) {
+ ic := &inlcalls.Calls[idx]
+ for _, childIdx := range ic.Children {
+ // First make sure child ranges are unified.
+ unifyCallRanges(inlcalls, childIdx)
+
+ // Then merge child ranges into ranges for this inline.
+ cic := inlcalls.Calls[childIdx]
+ ic.Ranges = dwarf.MergeRanges(ic.Ranges, cic.Ranges)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/embed.go b/src/cmd/compile/internal/gc/embed.go
new file mode 100644
index 0000000..f45796c
--- /dev/null
+++ b/src/cmd/compile/internal/gc/embed.go
@@ -0,0 +1,256 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "encoding/json"
+ "io/ioutil"
+ "log"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var embedlist []*Node
+
+var embedCfg struct {
+ Patterns map[string][]string
+ Files map[string]string
+}
+
+func readEmbedCfg(file string) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-embedcfg: %v", err)
+ }
+ if err := json.Unmarshal(data, &embedCfg); err != nil {
+ log.Fatalf("%s: %v", file, err)
+ }
+ if embedCfg.Patterns == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
+ }
+ if embedCfg.Files == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Files", file)
+ }
+}
+
+const (
+ embedUnknown = iota
+ embedBytes
+ embedString
+ embedFiles
+)
+
+func varEmbed(p *noder, names []*Node, typ *Node, exprs []*Node, embeds []PragmaEmbed) {
+ haveEmbed := false
+ for _, decl := range p.file.DeclList {
+ imp, ok := decl.(*syntax.ImportDecl)
+ if !ok {
+ // imports always come first
+ break
+ }
+ path, _ := strconv.Unquote(imp.Path.Value)
+ if path == "embed" {
+ haveEmbed = true
+ break
+ }
+ }
+
+ pos := embeds[0].Pos
+ if !haveEmbed {
+ p.yyerrorpos(pos, "invalid go:embed: missing import \"embed\"")
+ return
+ }
+ if len(names) > 1 {
+ p.yyerrorpos(pos, "go:embed cannot apply to multiple vars")
+ return
+ }
+ if len(exprs) > 0 {
+ p.yyerrorpos(pos, "go:embed cannot apply to var with initializer")
+ return
+ }
+ if typ == nil {
+ // Should not happen, since len(exprs) == 0 now.
+ p.yyerrorpos(pos, "go:embed cannot apply to var without type")
+ return
+ }
+ if dclcontext != PEXTERN {
+ p.yyerrorpos(pos, "go:embed cannot apply to var inside func")
+ return
+ }
+
+ var list []irEmbed
+ for _, e := range embeds {
+ list = append(list, irEmbed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns})
+ }
+ v := names[0]
+ v.Name.Param.SetEmbedList(list)
+ embedlist = append(embedlist, v)
+}
+
+func embedFileList(v *Node, kind int) []string {
+ // Build list of files to store.
+ have := make(map[string]bool)
+ var list []string
+ for _, e := range v.Name.Param.EmbedList() {
+ for _, pattern := range e.Patterns {
+ files, ok := embedCfg.Patterns[pattern]
+ if !ok {
+ yyerrorl(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
+ }
+ for _, file := range files {
+ if embedCfg.Files[file] == "" {
+ yyerrorl(e.Pos, "invalid go:embed: build system did not map file: %s", file)
+ continue
+ }
+ if !have[file] {
+ have[file] = true
+ list = append(list, file)
+ }
+ if kind == embedFiles {
+ for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) {
+ have[dir] = true
+ list = append(list, dir+"/")
+ }
+ }
+ }
+ }
+ }
+ sort.Slice(list, func(i, j int) bool {
+ return embedFileLess(list[i], list[j])
+ })
+
+ if kind == embedString || kind == embedBytes {
+ if len(list) > 1 {
+ yyerrorl(v.Pos, "invalid go:embed: multiple files for type %v", v.Type)
+ return nil
+ }
+ }
+
+ return list
+}
+
+// embedKind determines the kind of embedding variable.
+func embedKind(typ *types.Type) int {
+ if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == localpkg && myimportpath == "embed")) {
+ return embedFiles
+ }
+ if typ.Etype == types.TSTRING {
+ return embedString
+ }
+ if typ.Etype == types.TSLICE && typ.Elem().Etype == types.TUINT8 {
+ return embedBytes
+ }
+ return embedUnknown
+}
+
+func embedFileNameSplit(name string) (dir, elem string, isDir bool) {
+ if name[len(name)-1] == '/' {
+ isDir = true
+ name = name[:len(name)-1]
+ }
+ i := len(name) - 1
+ for i >= 0 && name[i] != '/' {
+ i--
+ }
+ if i < 0 {
+ return ".", name, isDir
+ }
+ return name[:i], name[i+1:], isDir
+}
+
+// embedFileLess implements the sort order for a list of embedded files.
+// See the comment inside ../../../../embed/embed.go's Files struct for rationale.
+func embedFileLess(x, y string) bool {
+ xdir, xelem, _ := embedFileNameSplit(x)
+ ydir, yelem, _ := embedFileNameSplit(y)
+ return xdir < ydir || xdir == ydir && xelem < yelem
+}
+
+func dumpembeds() {
+ for _, v := range embedlist {
+ initEmbed(v)
+ }
+}
+
+// initEmbed emits the init data for a //go:embed variable,
+// which is either a string, a []byte, or an embed.FS.
+func initEmbed(v *Node) {
+ commentPos := v.Name.Param.EmbedList()[0].Pos
+ if !langSupported(1, 16, localpkg) {
+ lno := lineno
+ lineno = commentPos
+ yyerrorv("go1.16", "go:embed")
+ lineno = lno
+ return
+ }
+ if embedCfg.Patterns == nil {
+ yyerrorl(commentPos, "invalid go:embed: build system did not supply embed configuration")
+ return
+ }
+ kind := embedKind(v.Type)
+ if kind == embedUnknown {
+ yyerrorl(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
+ return
+ }
+
+ files := embedFileList(v, kind)
+ switch kind {
+ case embedString, embedBytes:
+ file := files[0]
+ fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], kind == embedString, nil)
+ if err != nil {
+ yyerrorl(v.Pos, "embed %s: %v", file, err)
+ }
+ sym := v.Sym.Linksym()
+ off := 0
+ off = dsymptr(sym, off, fsym, 0) // data string
+ off = duintptr(sym, off, uint64(size)) // len
+ if kind == embedBytes {
+ duintptr(sym, off, uint64(size)) // cap for slice
+ }
+
+ case embedFiles:
+ slicedata := Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
+ off := 0
+ // []files pointed at by Files
+ off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
+ off = duintptr(slicedata, off, uint64(len(files)))
+ off = duintptr(slicedata, off, uint64(len(files)))
+
+ // embed/embed.go type file is:
+ // name string
+ // data string
+ // hash [16]byte
+ // Emit one of these per file in the set.
+ const hashSize = 16
+ hash := make([]byte, hashSize)
+ for _, file := range files {
+ off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
+ off = duintptr(slicedata, off, uint64(len(file)))
+ if strings.HasSuffix(file, "/") {
+ // entry for directory - no data
+ off = duintptr(slicedata, off, 0)
+ off = duintptr(slicedata, off, 0)
+ off += hashSize
+ } else {
+ fsym, size, err := fileStringSym(v.Pos, embedCfg.Files[file], true, hash)
+ if err != nil {
+ yyerrorl(v.Pos, "embed %s: %v", file, err)
+ }
+ off = dsymptr(slicedata, off, fsym, 0) // data string
+ off = duintptr(slicedata, off, uint64(size))
+ off = int(slicedata.WriteBytes(Ctxt, int64(off), hash))
+ }
+ }
+ ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
+ sym := v.Sym.Linksym()
+ dsymptr(sym, 0, slicedata, 0)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
new file mode 100644
index 0000000..6f328ab
--- /dev/null
+++ b/src/cmd/compile/internal/gc/esc.go
@@ -0,0 +1,472 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+func escapes(all []*Node) {
+ visitBottomUp(all, escapeFuncs)
+}
+
+const (
+ EscFuncUnknown = 0 + iota
+ EscFuncPlanned
+ EscFuncStarted
+ EscFuncTagged
+)
+
+func min8(a, b int8) int8 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max8(a, b int8) int8 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+const (
+ EscUnknown = iota
+ EscNone // Does not escape to heap, result, or parameters.
+ EscHeap // Reachable from the heap
+ EscNever // By construction will not escape.
+)
+
+// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
+func funcSym(fn *Node) *types.Sym {
+ if fn == nil || fn.Func.Nname == nil {
+ return nil
+ }
+ return fn.Func.Nname.Sym
+}
+
+// Mark labels that have no backjumps to them as not increasing e.loopdepth.
+// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat
+// and set it to one of the following two. Then in esc we'll clear it again.
+var (
+ looping Node
+ nonlooping Node
+)
+
+func isSliceSelfAssign(dst, src *Node) bool {
+ // Detect the following special case.
+ //
+ // func (b *Buffer) Foo() {
+ // n, m := ...
+ // b.buf = b.buf[n:m]
+ // }
+ //
+ // This assignment is a no-op for escape analysis,
+ // it does not store any new pointers into b that were not already there.
+ // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
+ // Here we assume that the statement will not contain calls,
+ // that is, that order will move any calls to init.
+ // Otherwise base ONAME value could change between the moments
+ // when we evaluate it for dst and for src.
+
+ // dst is ONAME dereference.
+ if dst.Op != ODEREF && dst.Op != ODOTPTR || dst.Left.Op != ONAME {
+ return false
+ }
+ // src is a slice operation.
+ switch src.Op {
+ case OSLICE, OSLICE3, OSLICESTR:
+ // OK.
+ case OSLICEARR, OSLICE3ARR:
+ // Since arrays are embedded into containing object,
+ // slice of non-pointer array will introduce a new pointer into b that was not already there
+ // (pointer to b itself). After such assignment, if b contents escape,
+ // b escapes as well. If we ignore such OSLICEARR, we will conclude
+ // that b does not escape when b contents do.
+ //
+ // Pointer to an array is OK since it's not stored inside b directly.
+ // For slicing an array (not pointer to array), there is an implicit OADDR.
+ // We check that to determine non-pointer array slicing.
+ if src.Left.Op == OADDR {
+ return false
+ }
+ default:
+ return false
+ }
+ // slice is applied to ONAME dereference.
+ if src.Left.Op != ODEREF && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME {
+ return false
+ }
+ // dst and src reference the same base ONAME.
+ return dst.Left == src.Left.Left
+}
+
+// isSelfAssign reports whether assignment from src to dst can
+// be ignored by the escape analysis as it's effectively a self-assignment.
+func isSelfAssign(dst, src *Node) bool {
+ if isSliceSelfAssign(dst, src) {
+ return true
+ }
+
+ // Detect trivial assignments that assign back to the same object.
+ //
+ // It covers these cases:
+ // val.x = val.y
+ // val.x[i] = val.y[j]
+ // val.x1.x2 = val.x1.y2
+ // ... etc
+ //
+ // These assignments do not change assigned object lifetime.
+
+ if dst == nil || src == nil || dst.Op != src.Op {
+ return false
+ }
+
+ switch dst.Op {
+ case ODOT, ODOTPTR:
+ // Safe trailing accessors that are permitted to differ.
+ case OINDEX:
+ if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) {
+ return false
+ }
+ default:
+ return false
+ }
+
+ // The expression prefix must be both "safe" and identical.
+ return samesafeexpr(dst.Left, src.Left)
+}
+
+// mayAffectMemory reports whether evaluation of n may affect the program's
+// memory state. If the expression can't affect memory state, then it can be
+// safely ignored by the escape analysis.
+func mayAffectMemory(n *Node) bool {
+ // We may want to use a list of "memory safe" ops instead of generally
+ // "side-effect free", which would include all calls and other ops that can
+ // allocate or change global state. For now, it's safer to start with the latter.
+ //
+ // We're ignoring things like division by zero, index out of range,
+ // and nil pointer dereference here.
+ switch n.Op {
+ case ONAME, OCLOSUREVAR, OLITERAL:
+ return false
+
+ // Left+Right group.
+ case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
+ return mayAffectMemory(n.Left) || mayAffectMemory(n.Right)
+
+ // Left group.
+ case ODOT, ODOTPTR, ODEREF, OCONVNOP, OCONV, OLEN, OCAP,
+ ONOT, OBITNOT, OPLUS, ONEG, OALIGNOF, OOFFSETOF, OSIZEOF:
+ return mayAffectMemory(n.Left)
+
+ default:
+ return true
+ }
+}
+
+// heapAllocReason returns the reason the given Node must be heap
+// allocated, or the empty string if it doesn't.
+func heapAllocReason(n *Node) string {
+ if n.Type == nil {
+ return ""
+ }
+
+ // Parameters are always passed via the stack.
+ if n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) {
+ return ""
+ }
+
+ if n.Type.Width > maxStackVarSize {
+ return "too large for stack"
+ }
+
+ if (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize {
+ return "too large for stack"
+ }
+
+ if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
+ return "too large for stack"
+ }
+ if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
+ return "too large for stack"
+ }
+
+ if n.Op == OMAKESLICE {
+ r := n.Right
+ if r == nil {
+ r = n.Left
+ }
+ if !smallintconst(r) {
+ return "non-constant size"
+ }
+ if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
+ return "too large for stack"
+ }
+ }
+
+ return ""
+}
+
+// addrescapes tags node n as having had its address taken
+// by "increasing" the "value" of n.Esc to EscHeap.
+// Storage is allocated as necessary to allow the address
+// to be taken.
+func addrescapes(n *Node) {
+ switch n.Op {
+ default:
+ // Unexpected Op, probably due to a previous type error. Ignore.
+
+ case ODEREF, ODOTPTR:
+ // Nothing to do.
+
+ case ONAME:
+ if n == nodfp {
+ break
+ }
+
+ // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
+ // on PPARAM it means something different.
+ if n.Class() == PAUTO && n.Esc == EscNever {
+ break
+ }
+
+ // If a closure reference escapes, mark the outer variable as escaping.
+ if n.Name.IsClosureVar() {
+ addrescapes(n.Name.Defn)
+ break
+ }
+
+ if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO {
+ break
+ }
+
+ // This is a plain parameter or local variable that needs to move to the heap,
+ // but possibly for the function outside the one we're compiling.
+ // That is, if we have:
+ //
+ // func f(x int) {
+ // func() {
+ // global = &x
+ // }
+ // }
+ //
+ // then we're analyzing the inner closure but we need to move x to the
+ // heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
+ oldfn := Curfn
+ Curfn = n.Name.Curfn
+ if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE {
+ Curfn = Curfn.Func.Closure
+ }
+ ln := lineno
+ lineno = Curfn.Pos
+ moveToHeap(n)
+ Curfn = oldfn
+ lineno = ln
+
+ // ODOTPTR has already been introduced,
+ // so these are the non-pointer ODOT and OINDEX.
+ // In &x[0], if x is a slice, then x does not
+ // escape--the pointer inside x does, but that
+ // is always a heap pointer anyway.
+ case ODOT, OINDEX, OPAREN, OCONVNOP:
+ if !n.Left.Type.IsSlice() {
+ addrescapes(n.Left)
+ }
+ }
+}
+
+// moveToHeap records the parameter or local variable n as moved to the heap.
+func moveToHeap(n *Node) {
+ if Debug.r != 0 {
+ Dump("MOVE", n)
+ }
+ if compiling_runtime {
+ yyerror("%v escapes to heap, not allowed in runtime", n)
+ }
+ if n.Class() == PAUTOHEAP {
+ Dump("n", n)
+ Fatalf("double move to heap")
+ }
+
+ // Allocate a local stack variable to hold the pointer to the heap copy.
+ // temp will add it to the function declaration list automatically.
+ heapaddr := temp(types.NewPtr(n.Type))
+ heapaddr.Sym = lookup("&" + n.Sym.Name)
+ heapaddr.Orig.Sym = heapaddr.Sym
+ heapaddr.Pos = n.Pos
+
+ // Unset AutoTemp to persist the &foo variable name through SSA to
+ // liveness analysis.
+ // TODO(mdempsky/drchase): Cleaner solution?
+ heapaddr.Name.SetAutoTemp(false)
+
+ // Parameters have a local stack copy used at function start/end
+ // in addition to the copy in the heap that may live longer than
+ // the function.
+ if n.Class() == PPARAM || n.Class() == PPARAMOUT {
+ if n.Xoffset == BADWIDTH {
+ Fatalf("addrescapes before param assignment")
+ }
+
+ // We rewrite n below to be a heap variable (indirection of heapaddr).
+ // Preserve a copy so we can still write code referring to the original,
+ // and substitute that copy into the function declaration list
+ // so that analyses of the local (on-stack) variables use it.
+ stackcopy := newname(n.Sym)
+ stackcopy.Type = n.Type
+ stackcopy.Xoffset = n.Xoffset
+ stackcopy.SetClass(n.Class())
+ stackcopy.Name.Param.Heapaddr = heapaddr
+ if n.Class() == PPARAMOUT {
+ // Make sure the pointer to the heap copy is kept live throughout the function.
+ // The function could panic at any point, and then a defer could recover.
+ // Thus, we need the pointer to the heap copy always available so the
+ // post-deferreturn code can copy the return value back to the stack.
+ // See issue 16095.
+ heapaddr.Name.SetIsOutputParamHeapAddr(true)
+ }
+ n.Name.Param.Stackcopy = stackcopy
+
+ // Substitute the stackcopy into the function variable list so that
+ // liveness and other analyses use the underlying stack slot
+ // and not the now-pseudo-variable n.
+ found := false
+ for i, d := range Curfn.Func.Dcl {
+ if d == n {
+ Curfn.Func.Dcl[i] = stackcopy
+ found = true
+ break
+ }
+ // Parameters are before locals, so can stop early.
+ // This limits the search even in functions with many local variables.
+ if d.Class() == PAUTO {
+ break
+ }
+ }
+ if !found {
+ Fatalf("cannot find %v in local variable list", n)
+ }
+ Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ }
+
+ // Modify n in place so that uses of n now mean indirection of the heapaddr.
+ n.SetClass(PAUTOHEAP)
+ n.Xoffset = 0
+ n.Name.Param.Heapaddr = heapaddr
+ n.Esc = EscHeap
+ if Debug.m != 0 {
+ Warnl(n.Pos, "moved to heap: %v", n)
+ }
+}
+
+// This special tag is applied to uintptr variables
+// that we believe may hold unsafe.Pointers for
+// calls into assembly functions.
+const unsafeUintptrTag = "unsafe-uintptr"
+
+// This special tag is applied to uintptr parameters of functions
+// marked go:uintptrescapes.
+const uintptrEscapesTag = "uintptr-escapes"
+
+func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
+ name := func() string {
+ if f.Sym != nil {
+ return f.Sym.Name
+ }
+ return fmt.Sprintf("arg#%d", narg)
+ }
+
+ if fn.Nbody.Len() == 0 {
+ // Assume that uintptr arguments must be held live across the call.
+ // This is most important for syscall.Syscall.
+ // See golang.org/issue/13372.
+ // This really doesn't have much to do with escape analysis per se,
+ // but we are reusing the ability to annotate an individual function
+ // argument and pass those annotations along to importing code.
+ if f.Type.IsUintptr() {
+ if Debug.m != 0 {
+ Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
+ }
+ return unsafeUintptrTag
+ }
+
+ if !f.Type.HasPointers() { // don't bother tagging for scalars
+ return ""
+ }
+
+ var esc EscLeaks
+
+ // External functions are assumed unsafe, unless
+ // //go:noescape is given before the declaration.
+ if fn.Func.Pragma&Noescape != 0 {
+ if Debug.m != 0 && f.Sym != nil {
+ Warnl(f.Pos, "%v does not escape", name())
+ }
+ } else {
+ if Debug.m != 0 && f.Sym != nil {
+ Warnl(f.Pos, "leaking param: %v", name())
+ }
+ esc.AddHeap(0)
+ }
+
+ return esc.Encode()
+ }
+
+ if fn.Func.Pragma&UintptrEscapes != 0 {
+ if f.Type.IsUintptr() {
+ if Debug.m != 0 {
+ Warnl(f.Pos, "marking %v as escaping uintptr", name())
+ }
+ return uintptrEscapesTag
+ }
+ if f.IsDDD() && f.Type.Elem().IsUintptr() {
+ // final argument is ...uintptr.
+ if Debug.m != 0 {
+ Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
+ }
+ return uintptrEscapesTag
+ }
+ }
+
+ if !f.Type.HasPointers() { // don't bother tagging for scalars
+ return ""
+ }
+
+ // Unnamed parameters are unused and therefore do not escape.
+ if f.Sym == nil || f.Sym.IsBlank() {
+ var esc EscLeaks
+ return esc.Encode()
+ }
+
+ n := asNode(f.Nname)
+ loc := e.oldLoc(n)
+ esc := loc.paramEsc
+ esc.Optimize()
+
+ if Debug.m != 0 && !loc.escapes {
+ if esc.Empty() {
+ Warnl(f.Pos, "%v does not escape", name())
+ }
+ if x := esc.Heap(); x >= 0 {
+ if x == 0 {
+ Warnl(f.Pos, "leaking param: %v", name())
+ } else {
+ // TODO(mdempsky): Mention level=x like below?
+ Warnl(f.Pos, "leaking param content: %v", name())
+ }
+ }
+ for i := 0; i < numEscResults; i++ {
+ if x := esc.Result(i); x >= 0 {
+ res := fn.Type.Results().Field(i).Sym
+ Warnl(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
+ }
+ }
+ }
+
+ return esc.Encode()
+}
diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go
new file mode 100644
index 0000000..f719892
--- /dev/null
+++ b/src/cmd/compile/internal/gc/escape.go
@@ -0,0 +1,1539 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "math"
+ "strings"
+)
+
+// Escape analysis.
+//
+// Here we analyze functions to determine which Go variables
+// (including implicit allocations such as calls to "new" or "make",
+// composite literals, etc.) can be allocated on the stack. The two
+// key invariants we have to ensure are: (1) pointers to stack objects
+// cannot be stored in the heap, and (2) pointers to a stack object
+// cannot outlive that object (e.g., because the declaring function
+// returned and destroyed the object's stack frame, or its space is
+// reused across loop iterations for logically distinct variables).
+//
+// We implement this with a static data-flow analysis of the AST.
+// First, we construct a directed weighted graph where vertices
+// (termed "locations") represent variables allocated by statements
+// and expressions, and edges represent assignments between variables
+// (with weights representing addressing/dereference counts).
+//
+// Next we walk the graph looking for assignment paths that might
+// violate the invariants stated above. If a variable v's address is
+// stored in the heap or elsewhere that may outlive it, then v is
+// marked as requiring heap allocation.
+//
+// To support interprocedural analysis, we also record data-flow from
+// each function's parameters to the heap and to its result
+// parameters. This information is summarized as "parameter tags",
+// which are used at static call sites to improve escape analysis of
+// function arguments.
+
+// Constructing the location graph.
+//
+// Every allocating statement (e.g., variable declaration) or
+// expression (e.g., "new" or "make") is first mapped to a unique
+// "location."
+//
+// We also model every Go assignment as a directed edges between
+// locations. The number of dereference operations minus the number of
+// addressing operations is recorded as the edge's weight (termed
+// "derefs"). For example:
+//
+// p = &q // -1
+// p = q // 0
+// p = *q // 1
+// p = **q // 2
+//
+// p = **&**&q // 2
+//
+// Note that the & operator can only be applied to addressable
+// expressions, and the expression &x itself is not addressable, so
+// derefs cannot go below -1.
+//
+// Every Go language construct is lowered into this representation,
+// generally without sensitivity to flow, path, or context; and
+// without distinguishing elements within a compound variable. For
+// example:
+//
+// var x struct { f, g *int }
+// var u []*int
+//
+// x.f = u[0]
+//
+// is modeled simply as
+//
+// x = *u
+//
+// That is, we don't distinguish x.f from x.g, or u[0] from u[1],
+// u[2], etc. However, we do record the implicit dereference involved
+// in indexing a slice.
+
+type Escape struct {
+ allLocs []*EscLocation
+
+ curfn *Node
+
+ // loopDepth counts the current loop nesting depth within
+ // curfn. It increments within each "for" loop and at each
+ // label with a corresponding backwards "goto" (i.e.,
+ // unstructured loop).
+ loopDepth int
+
+ heapLoc EscLocation
+ blankLoc EscLocation
+}
+
+// An EscLocation represents an abstract location that stores a Go
+// variable.
+type EscLocation struct {
+ n *Node // represented variable or expression, if any
+ curfn *Node // enclosing function
+ edges []EscEdge // incoming edges
+ loopDepth int // loopDepth at declaration
+
+ // derefs and walkgen are used during walkOne to track the
+ // minimal dereferences from the walk root.
+ derefs int // >= -1
+ walkgen uint32
+
+ // dst and dstEdgeindex track the next immediate assignment
+ // destination location during walkone, along with the index
+ // of the edge pointing back to this location.
+ dst *EscLocation
+ dstEdgeIdx int
+
+ // queued is used by walkAll to track whether this location is
+ // in the walk queue.
+ queued bool
+
+ // escapes reports whether the represented variable's address
+ // escapes; that is, whether the variable must be heap
+ // allocated.
+ escapes bool
+
+ // transient reports whether the represented expression's
+ // address does not outlive the statement; that is, whether
+ // its storage can be immediately reused.
+ transient bool
+
+ // paramEsc records the represented parameter's leak set.
+ paramEsc EscLeaks
+}
+
+// An EscEdge represents an assignment edge between two Go variables.
+type EscEdge struct {
+ src *EscLocation
+ derefs int // >= -1
+ notes *EscNote
+}
+
+// escapeFuncs performs escape analysis on a minimal batch of
+// functions.
+func escapeFuncs(fns []*Node, recursive bool) {
+ for _, fn := range fns {
+ if fn.Op != ODCLFUNC {
+ Fatalf("unexpected node: %v", fn)
+ }
+ }
+
+ var e Escape
+ e.heapLoc.escapes = true
+
+ // Construct data-flow graph from syntax trees.
+ for _, fn := range fns {
+ e.initFunc(fn)
+ }
+ for _, fn := range fns {
+ e.walkFunc(fn)
+ }
+ e.curfn = nil
+
+ e.walkAll()
+ e.finish(fns)
+}
+
+func (e *Escape) initFunc(fn *Node) {
+ if fn.Op != ODCLFUNC || fn.Esc != EscFuncUnknown {
+ Fatalf("unexpected node: %v", fn)
+ }
+ fn.Esc = EscFuncPlanned
+ if Debug.m > 3 {
+ Dump("escAnalyze", fn)
+ }
+
+ e.curfn = fn
+ e.loopDepth = 1
+
+ // Allocate locations for local variables.
+ for _, dcl := range fn.Func.Dcl {
+ if dcl.Op == ONAME {
+ e.newLoc(dcl, false)
+ }
+ }
+}
+
+func (e *Escape) walkFunc(fn *Node) {
+ fn.Esc = EscFuncStarted
+
+ // Identify labels that mark the head of an unstructured loop.
+ inspectList(fn.Nbody, func(n *Node) bool {
+ switch n.Op {
+ case OLABEL:
+ n.Sym.Label = asTypesNode(&nonlooping)
+
+ case OGOTO:
+ // If we visited the label before the goto,
+ // then this is a looping label.
+ if n.Sym.Label == asTypesNode(&nonlooping) {
+ n.Sym.Label = asTypesNode(&looping)
+ }
+ }
+
+ return true
+ })
+
+ e.curfn = fn
+ e.loopDepth = 1
+ e.block(fn.Nbody)
+}
+
+// Below we implement the methods for walking the AST and recording
+// data flow edges. Note that because a sub-expression might have
+// side-effects, it's important to always visit the entire AST.
+//
+// For example, write either:
+//
+// if x {
+// e.discard(n.Left)
+// } else {
+// e.value(k, n.Left)
+// }
+//
+// or
+//
+// if x {
+// k = e.discardHole()
+// }
+// e.value(k, n.Left)
+//
+// Do NOT write:
+//
+// // BAD: possibly loses side-effects within n.Left
+// if !x {
+// e.value(k, n.Left)
+// }
+
+// stmt evaluates a single Go statement.
+func (e *Escape) stmt(n *Node) {
+ if n == nil {
+ return
+ }
+
+ lno := setlineno(n)
+ defer func() {
+ lineno = lno
+ }()
+
+ if Debug.m > 2 {
+ fmt.Printf("%v:[%d] %v stmt: %v\n", linestr(lineno), e.loopDepth, funcSym(e.curfn), n)
+ }
+
+ e.stmts(n.Ninit)
+
+ switch n.Op {
+ default:
+ Fatalf("unexpected stmt: %v", n)
+
+ case ODCLCONST, ODCLTYPE, OEMPTY, OFALL, OINLMARK:
+ // nop
+
+ case OBREAK, OCONTINUE, OGOTO:
+ // TODO(mdempsky): Handle dead code?
+
+ case OBLOCK:
+ e.stmts(n.List)
+
+ case ODCL:
+ // Record loop depth at declaration.
+ if !n.Left.isBlank() {
+ e.dcl(n.Left)
+ }
+
+ case OLABEL:
+ switch asNode(n.Sym.Label) {
+ case &nonlooping:
+ if Debug.m > 2 {
+ fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
+ }
+ case &looping:
+ if Debug.m > 2 {
+ fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
+ }
+ e.loopDepth++
+ default:
+ Fatalf("label missing tag")
+ }
+ n.Sym.Label = nil
+
+ case OIF:
+ e.discard(n.Left)
+ e.block(n.Nbody)
+ e.block(n.Rlist)
+
+ case OFOR, OFORUNTIL:
+ e.loopDepth++
+ e.discard(n.Left)
+ e.stmt(n.Right)
+ e.block(n.Nbody)
+ e.loopDepth--
+
+ case ORANGE:
+ // for List = range Right { Nbody }
+ e.loopDepth++
+ ks := e.addrs(n.List)
+ e.block(n.Nbody)
+ e.loopDepth--
+
+ // Right is evaluated outside the loop.
+ k := e.discardHole()
+ if len(ks) >= 2 {
+ if n.Right.Type.IsArray() {
+ k = ks[1].note(n, "range")
+ } else {
+ k = ks[1].deref(n, "range-deref")
+ }
+ }
+ e.expr(e.later(k), n.Right)
+
+ case OSWITCH:
+ typesw := n.Left != nil && n.Left.Op == OTYPESW
+
+ var ks []EscHole
+ for _, cas := range n.List.Slice() { // cases
+ if typesw && n.Left.Left != nil {
+ cv := cas.Rlist.First()
+ k := e.dcl(cv) // type switch variables have no ODCL.
+ if cv.Type.HasPointers() {
+ ks = append(ks, k.dotType(cv.Type, cas, "switch case"))
+ }
+ }
+
+ e.discards(cas.List)
+ e.block(cas.Nbody)
+ }
+
+ if typesw {
+ e.expr(e.teeHole(ks...), n.Left.Right)
+ } else {
+ e.discard(n.Left)
+ }
+
+ case OSELECT:
+ for _, cas := range n.List.Slice() {
+ e.stmt(cas.Left)
+ e.block(cas.Nbody)
+ }
+ case OSELRECV:
+ e.assign(n.Left, n.Right, "selrecv", n)
+ case OSELRECV2:
+ e.assign(n.Left, n.Right, "selrecv", n)
+ e.assign(n.List.First(), nil, "selrecv", n)
+ case ORECV:
+ // TODO(mdempsky): Consider e.discard(n.Left).
+ e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
+ case OSEND:
+ e.discard(n.Left)
+ e.assignHeap(n.Right, "send", n)
+
+ case OAS, OASOP:
+ e.assign(n.Left, n.Right, "assign", n)
+
+ case OAS2:
+ for i, nl := range n.List.Slice() {
+ e.assign(nl, n.Rlist.Index(i), "assign-pair", n)
+ }
+
+ case OAS2DOTTYPE: // v, ok = x.(type)
+ e.assign(n.List.First(), n.Right, "assign-pair-dot-type", n)
+ e.assign(n.List.Second(), nil, "assign-pair-dot-type", n)
+ case OAS2MAPR: // v, ok = m[k]
+ e.assign(n.List.First(), n.Right, "assign-pair-mapr", n)
+ e.assign(n.List.Second(), nil, "assign-pair-mapr", n)
+ case OAS2RECV: // v, ok = <-ch
+ e.assign(n.List.First(), n.Right, "assign-pair-receive", n)
+ e.assign(n.List.Second(), nil, "assign-pair-receive", n)
+
+ case OAS2FUNC:
+ e.stmts(n.Right.Ninit)
+ e.call(e.addrs(n.List), n.Right, nil)
+ case ORETURN:
+ results := e.curfn.Type.Results().FieldSlice()
+ for i, v := range n.List.Slice() {
+ e.assign(asNode(results[i].Nname), v, "return", n)
+ }
+ case OCALLFUNC, OCALLMETH, OCALLINTER, OCLOSE, OCOPY, ODELETE, OPANIC, OPRINT, OPRINTN, ORECOVER:
+ e.call(nil, n, nil)
+ case OGO, ODEFER:
+ e.stmts(n.Left.Ninit)
+ e.call(nil, n.Left, n)
+
+ case ORETJMP:
+ // TODO(mdempsky): What do? esc.go just ignores it.
+ }
+}
+
+func (e *Escape) stmts(l Nodes) {
+ for _, n := range l.Slice() {
+ e.stmt(n)
+ }
+}
+
+// block is like stmts, but preserves loopDepth.
+func (e *Escape) block(l Nodes) {
+ old := e.loopDepth
+ e.stmts(l)
+ e.loopDepth = old
+}
+
+// expr models evaluating an expression n and flowing the result into
+// hole k.
+func (e *Escape) expr(k EscHole, n *Node) {
+ if n == nil {
+ return
+ }
+ e.stmts(n.Ninit)
+ e.exprSkipInit(k, n)
+}
+
+func (e *Escape) exprSkipInit(k EscHole, n *Node) {
+ if n == nil {
+ return
+ }
+
+ lno := setlineno(n)
+ defer func() {
+ lineno = lno
+ }()
+
+ uintptrEscapesHack := k.uintptrEscapesHack
+ k.uintptrEscapesHack = false
+
+ if uintptrEscapesHack && n.Op == OCONVNOP && n.Left.Type.IsUnsafePtr() {
+ // nop
+ } else if k.derefs >= 0 && !n.Type.HasPointers() {
+ k = e.discardHole()
+ }
+
+ switch n.Op {
+ default:
+ Fatalf("unexpected expr: %v", n)
+
+ case OLITERAL, OGETG, OCLOSUREVAR, OTYPE:
+ // nop
+
+ case ONAME:
+ if n.Class() == PFUNC || n.Class() == PEXTERN {
+ return
+ }
+ e.flow(k, e.oldLoc(n))
+
+ case OPLUS, ONEG, OBITNOT, ONOT:
+ e.discard(n.Left)
+ case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OLSH, ORSH, OAND, OANDNOT, OEQ, ONE, OLT, OLE, OGT, OGE, OANDAND, OOROR:
+ e.discard(n.Left)
+ e.discard(n.Right)
+
+ case OADDR:
+ e.expr(k.addr(n, "address-of"), n.Left) // "address-of"
+ case ODEREF:
+ e.expr(k.deref(n, "indirection"), n.Left) // "indirection"
+ case ODOT, ODOTMETH, ODOTINTER:
+ e.expr(k.note(n, "dot"), n.Left)
+ case ODOTPTR:
+ e.expr(k.deref(n, "dot of pointer"), n.Left) // "dot of pointer"
+ case ODOTTYPE, ODOTTYPE2:
+ e.expr(k.dotType(n.Type, n, "dot"), n.Left)
+ case OINDEX:
+ if n.Left.Type.IsArray() {
+ e.expr(k.note(n, "fixed-array-index-of"), n.Left)
+ } else {
+ // TODO(mdempsky): Fix why reason text.
+ e.expr(k.deref(n, "dot of pointer"), n.Left)
+ }
+ e.discard(n.Right)
+ case OINDEXMAP:
+ e.discard(n.Left)
+ e.discard(n.Right)
+ case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
+ e.expr(k.note(n, "slice"), n.Left)
+ low, high, max := n.SliceBounds()
+ e.discard(low)
+ e.discard(high)
+ e.discard(max)
+
+ case OCONV, OCONVNOP:
+ if checkPtr(e.curfn, 2) && n.Type.IsUnsafePtr() && n.Left.Type.IsPtr() {
+ // When -d=checkptr=2 is enabled, treat
+ // conversions to unsafe.Pointer as an
+ // escaping operation. This allows better
+ // runtime instrumentation, since we can more
+ // easily detect object boundaries on the heap
+ // than the stack.
+ e.assignHeap(n.Left, "conversion to unsafe.Pointer", n)
+ } else if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() {
+ e.unsafeValue(k, n.Left)
+ } else {
+ e.expr(k, n.Left)
+ }
+ case OCONVIFACE:
+ if !n.Left.Type.IsInterface() && !isdirectiface(n.Left.Type) {
+ k = e.spill(k, n)
+ }
+ e.expr(k.note(n, "interface-converted"), n.Left)
+
+ case ORECV:
+ e.discard(n.Left)
+
+ case OCALLMETH, OCALLFUNC, OCALLINTER, OLEN, OCAP, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCOPY:
+ e.call([]EscHole{k}, n, nil)
+
+ case ONEW:
+ e.spill(k, n)
+
+ case OMAKESLICE:
+ e.spill(k, n)
+ e.discard(n.Left)
+ e.discard(n.Right)
+ case OMAKECHAN:
+ e.discard(n.Left)
+ case OMAKEMAP:
+ e.spill(k, n)
+ e.discard(n.Left)
+
+ case ORECOVER:
+ // nop
+
+ case OCALLPART:
+ // Flow the receiver argument to both the closure and
+ // to the receiver parameter.
+
+ closureK := e.spill(k, n)
+
+ m := callpartMethod(n)
+
+ // We don't know how the method value will be called
+ // later, so conservatively assume the result
+ // parameters all flow to the heap.
+ //
+ // TODO(mdempsky): Change ks into a callback, so that
+ // we don't have to create this dummy slice?
+ var ks []EscHole
+ for i := m.Type.NumResults(); i > 0; i-- {
+ ks = append(ks, e.heapHole())
+ }
+ paramK := e.tagHole(ks, asNode(m.Type.Nname()), m.Type.Recv())
+
+ e.expr(e.teeHole(paramK, closureK), n.Left)
+
+ case OPTRLIT:
+ e.expr(e.spill(k, n), n.Left)
+
+ case OARRAYLIT:
+ for _, elt := range n.List.Slice() {
+ if elt.Op == OKEY {
+ elt = elt.Right
+ }
+ e.expr(k.note(n, "array literal element"), elt)
+ }
+
+ case OSLICELIT:
+ k = e.spill(k, n)
+ k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
+
+ for _, elt := range n.List.Slice() {
+ if elt.Op == OKEY {
+ elt = elt.Right
+ }
+ e.expr(k.note(n, "slice-literal-element"), elt)
+ }
+
+ case OSTRUCTLIT:
+ for _, elt := range n.List.Slice() {
+ e.expr(k.note(n, "struct literal element"), elt.Left)
+ }
+
+ case OMAPLIT:
+ e.spill(k, n)
+
+ // Map keys and values are always stored in the heap.
+ for _, elt := range n.List.Slice() {
+ e.assignHeap(elt.Left, "map literal key", n)
+ e.assignHeap(elt.Right, "map literal value", n)
+ }
+
+ case OCLOSURE:
+ k = e.spill(k, n)
+
+ // Link addresses of captured variables to closure.
+ for _, v := range n.Func.Closure.Func.Cvars.Slice() {
+ if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs
+ continue
+ }
+
+ k := k
+ if !v.Name.Byval() {
+ k = k.addr(v, "reference")
+ }
+
+ e.expr(k.note(n, "captured by a closure"), v.Name.Defn)
+ }
+
+ case ORUNES2STR, OBYTES2STR, OSTR2RUNES, OSTR2BYTES, ORUNESTR:
+ e.spill(k, n)
+ e.discard(n.Left)
+
+ case OADDSTR:
+ e.spill(k, n)
+
+ // Arguments of OADDSTR never escape;
+ // runtime.concatstrings makes sure of that.
+ e.discards(n.List)
+ }
+}
+
+// unsafeValue evaluates a uintptr-typed arithmetic expression looking
+// for conversions from an unsafe.Pointer.
+func (e *Escape) unsafeValue(k EscHole, n *Node) {
+ if n.Type.Etype != TUINTPTR {
+ Fatalf("unexpected type %v for %v", n.Type, n)
+ }
+
+ e.stmts(n.Ninit)
+
+ switch n.Op {
+ case OCONV, OCONVNOP:
+ if n.Left.Type.IsUnsafePtr() {
+ e.expr(k, n.Left)
+ } else {
+ e.discard(n.Left)
+ }
+ case ODOTPTR:
+ if isReflectHeaderDataField(n) {
+ e.expr(k.deref(n, "reflect.Header.Data"), n.Left)
+ } else {
+ e.discard(n.Left)
+ }
+ case OPLUS, ONEG, OBITNOT:
+ e.unsafeValue(k, n.Left)
+ case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OAND, OANDNOT:
+ e.unsafeValue(k, n.Left)
+ e.unsafeValue(k, n.Right)
+ case OLSH, ORSH:
+ e.unsafeValue(k, n.Left)
+ // RHS need not be uintptr-typed (#32959) and can't meaningfully
+ // flow pointers anyway.
+ e.discard(n.Right)
+ default:
+ e.exprSkipInit(e.discardHole(), n)
+ }
+}
+
+// discard evaluates an expression n for side-effects, but discards
+// its value.
+func (e *Escape) discard(n *Node) {
+ e.expr(e.discardHole(), n)
+}
+
+func (e *Escape) discards(l Nodes) {
+ for _, n := range l.Slice() {
+ e.discard(n)
+ }
+}
+
+// addr evaluates an addressable expression n and returns an EscHole
+// that represents storing into the represented location.
+func (e *Escape) addr(n *Node) EscHole {
+ if n == nil || n.isBlank() {
+ // Can happen at least in OSELRECV.
+ // TODO(mdempsky): Anywhere else?
+ return e.discardHole()
+ }
+
+ k := e.heapHole()
+
+ switch n.Op {
+ default:
+ Fatalf("unexpected addr: %v", n)
+ case ONAME:
+ if n.Class() == PEXTERN {
+ break
+ }
+ k = e.oldLoc(n).asHole()
+ case ODOT:
+ k = e.addr(n.Left)
+ case OINDEX:
+ e.discard(n.Right)
+ if n.Left.Type.IsArray() {
+ k = e.addr(n.Left)
+ } else {
+ e.discard(n.Left)
+ }
+ case ODEREF, ODOTPTR:
+ e.discard(n)
+ case OINDEXMAP:
+ e.discard(n.Left)
+ e.assignHeap(n.Right, "key of map put", n)
+ }
+
+ if !n.Type.HasPointers() {
+ k = e.discardHole()
+ }
+
+ return k
+}
+
+func (e *Escape) addrs(l Nodes) []EscHole {
+ var ks []EscHole
+ for _, n := range l.Slice() {
+ ks = append(ks, e.addr(n))
+ }
+ return ks
+}
+
+// assign evaluates the assignment dst = src.
+func (e *Escape) assign(dst, src *Node, why string, where *Node) {
+ // Filter out some no-op assignments for escape analysis.
+ ignore := dst != nil && src != nil && isSelfAssign(dst, src)
+ if ignore && Debug.m != 0 {
+ Warnl(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
+ }
+
+ k := e.addr(dst)
+ if dst != nil && dst.Op == ODOTPTR && isReflectHeaderDataField(dst) {
+ e.unsafeValue(e.heapHole().note(where, why), src)
+ } else {
+ if ignore {
+ k = e.discardHole()
+ }
+ e.expr(k.note(where, why), src)
+ }
+}
+
+func (e *Escape) assignHeap(src *Node, why string, where *Node) {
+ e.expr(e.heapHole().note(where, why), src)
+}
+
+// call evaluates a call expressions, including builtin calls. ks
+// should contain the holes representing where the function callee's
+// results flows; where is the OGO/ODEFER context of the call, if any.
+func (e *Escape) call(ks []EscHole, call, where *Node) {
+ topLevelDefer := where != nil && where.Op == ODEFER && e.loopDepth == 1
+ if topLevelDefer {
+ // force stack allocation of defer record, unless
+ // open-coded defers are used (see ssa.go)
+ where.Esc = EscNever
+ }
+
+ argument := func(k EscHole, arg *Node) {
+ if topLevelDefer {
+ // Top level defers arguments don't escape to
+ // heap, but they do need to last until end of
+ // function.
+ k = e.later(k)
+ } else if where != nil {
+ k = e.heapHole()
+ }
+
+ e.expr(k.note(call, "call parameter"), arg)
+ }
+
+ switch call.Op {
+ default:
+ Fatalf("unexpected call op: %v", call.Op)
+
+ case OCALLFUNC, OCALLMETH, OCALLINTER:
+ fixVariadicCall(call)
+
+ // Pick out the function callee, if statically known.
+ var fn *Node
+ switch call.Op {
+ case OCALLFUNC:
+ switch v := staticValue(call.Left); {
+ case v.Op == ONAME && v.Class() == PFUNC:
+ fn = v
+ case v.Op == OCLOSURE:
+ fn = v.Func.Closure.Func.Nname
+ }
+ case OCALLMETH:
+ fn = asNode(call.Left.Type.FuncType().Nname)
+ }
+
+ fntype := call.Left.Type
+ if fn != nil {
+ fntype = fn.Type
+ }
+
+ if ks != nil && fn != nil && e.inMutualBatch(fn) {
+ for i, result := range fn.Type.Results().FieldSlice() {
+ e.expr(ks[i], asNode(result.Nname))
+ }
+ }
+
+ if r := fntype.Recv(); r != nil {
+ argument(e.tagHole(ks, fn, r), call.Left.Left)
+ } else {
+ // Evaluate callee function expression.
+ argument(e.discardHole(), call.Left)
+ }
+
+ args := call.List.Slice()
+ for i, param := range fntype.Params().FieldSlice() {
+ argument(e.tagHole(ks, fn, param), args[i])
+ }
+
+ case OAPPEND:
+ args := call.List.Slice()
+
+ // Appendee slice may flow directly to the result, if
+ // it has enough capacity. Alternatively, a new heap
+ // slice might be allocated, and all slice elements
+ // might flow to heap.
+ appendeeK := ks[0]
+ if args[0].Type.Elem().HasPointers() {
+ appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
+ }
+ argument(appendeeK, args[0])
+
+ if call.IsDDD() {
+ appendedK := e.discardHole()
+ if args[1].Type.IsSlice() && args[1].Type.Elem().HasPointers() {
+ appendedK = e.heapHole().deref(call, "appended slice...")
+ }
+ argument(appendedK, args[1])
+ } else {
+ for _, arg := range args[1:] {
+ argument(e.heapHole(), arg)
+ }
+ }
+
+ case OCOPY:
+ argument(e.discardHole(), call.Left)
+
+ copiedK := e.discardHole()
+ if call.Right.Type.IsSlice() && call.Right.Type.Elem().HasPointers() {
+ copiedK = e.heapHole().deref(call, "copied slice")
+ }
+ argument(copiedK, call.Right)
+
+ case OPANIC:
+ argument(e.heapHole(), call.Left)
+
+ case OCOMPLEX:
+ argument(e.discardHole(), call.Left)
+ argument(e.discardHole(), call.Right)
+ case ODELETE, OPRINT, OPRINTN, ORECOVER:
+ for _, arg := range call.List.Slice() {
+ argument(e.discardHole(), arg)
+ }
+ case OLEN, OCAP, OREAL, OIMAG, OCLOSE:
+ argument(e.discardHole(), call.Left)
+ }
+}
+
+// tagHole returns a hole for evaluating an argument passed to param.
+// ks should contain the holes representing where the function
+// callee's results flows. fn is the statically-known callee function,
+// if any.
+func (e *Escape) tagHole(ks []EscHole, fn *Node, param *types.Field) EscHole {
+ // If this is a dynamic call, we can't rely on param.Note.
+ if fn == nil {
+ return e.heapHole()
+ }
+
+ if e.inMutualBatch(fn) {
+ return e.addr(asNode(param.Nname))
+ }
+
+ // Call to previously tagged function.
+
+ if param.Note == uintptrEscapesTag {
+ k := e.heapHole()
+ k.uintptrEscapesHack = true
+ return k
+ }
+
+ var tagKs []EscHole
+
+ esc := ParseLeaks(param.Note)
+ if x := esc.Heap(); x >= 0 {
+ tagKs = append(tagKs, e.heapHole().shift(x))
+ }
+
+ if ks != nil {
+ for i := 0; i < numEscResults; i++ {
+ if x := esc.Result(i); x >= 0 {
+ tagKs = append(tagKs, ks[i].shift(x))
+ }
+ }
+ }
+
+ return e.teeHole(tagKs...)
+}
+
+// inMutualBatch reports whether function fn is in the batch of
+// mutually recursive functions being analyzed. When this is true,
+// fn has not yet been analyzed, so its parameters and results
+// should be incorporated directly into the flow graph instead of
+// relying on its escape analysis tagging.
+func (e *Escape) inMutualBatch(fn *Node) bool {
+ if fn.Name.Defn != nil && fn.Name.Defn.Esc < EscFuncTagged {
+ if fn.Name.Defn.Esc == EscFuncUnknown {
+ Fatalf("graph inconsistency")
+ }
+ return true
+ }
+ return false
+}
+
+// An EscHole represents a context for evaluation a Go
+// expression. E.g., when evaluating p in "x = **p", we'd have a hole
+// with dst==x and derefs==2.
+type EscHole struct {
+ dst *EscLocation
+ derefs int // >= -1
+ notes *EscNote
+
+ // uintptrEscapesHack indicates this context is evaluating an
+ // argument for a //go:uintptrescapes function.
+ uintptrEscapesHack bool
+}
+
+type EscNote struct {
+ next *EscNote
+ where *Node
+ why string
+}
+
+func (k EscHole) note(where *Node, why string) EscHole {
+ if where == nil || why == "" {
+ Fatalf("note: missing where/why")
+ }
+ if Debug.m >= 2 || logopt.Enabled() {
+ k.notes = &EscNote{
+ next: k.notes,
+ where: where,
+ why: why,
+ }
+ }
+ return k
+}
+
+func (k EscHole) shift(delta int) EscHole {
+ k.derefs += delta
+ if k.derefs < -1 {
+ Fatalf("derefs underflow: %v", k.derefs)
+ }
+ return k
+}
+
+func (k EscHole) deref(where *Node, why string) EscHole { return k.shift(1).note(where, why) }
+func (k EscHole) addr(where *Node, why string) EscHole { return k.shift(-1).note(where, why) }
+
+func (k EscHole) dotType(t *types.Type, where *Node, why string) EscHole {
+ if !t.IsInterface() && !isdirectiface(t) {
+ k = k.shift(1)
+ }
+ return k.note(where, why)
+}
+
+// teeHole returns a new hole that flows into each hole of ks,
+// similar to the Unix tee(1) command.
+func (e *Escape) teeHole(ks ...EscHole) EscHole {
+ if len(ks) == 0 {
+ return e.discardHole()
+ }
+ if len(ks) == 1 {
+ return ks[0]
+ }
+ // TODO(mdempsky): Optimize if there's only one non-discard hole?
+
+ // Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
+ // new temporary location ltmp, wire it into place, and return
+ // a hole for "ltmp = _".
+ loc := e.newLoc(nil, true)
+ for _, k := range ks {
+ // N.B., "p = &q" and "p = &tmp; tmp = q" are not
+ // semantically equivalent. To combine holes like "l1
+ // = _" and "l2 = &_", we'd need to wire them as "l1 =
+ // *ltmp" and "l2 = ltmp" and return "ltmp = &_"
+ // instead.
+ if k.derefs < 0 {
+ Fatalf("teeHole: negative derefs")
+ }
+
+ e.flow(k, loc)
+ }
+ return loc.asHole()
+}
+
+func (e *Escape) dcl(n *Node) EscHole {
+ loc := e.oldLoc(n)
+ loc.loopDepth = e.loopDepth
+ return loc.asHole()
+}
+
+// spill allocates a new location associated with expression n, flows
+// its address to k, and returns a hole that flows values to it. It's
+// intended for use with most expressions that allocate storage.
+func (e *Escape) spill(k EscHole, n *Node) EscHole {
+ loc := e.newLoc(n, true)
+ e.flow(k.addr(n, "spill"), loc)
+ return loc.asHole()
+}
+
+// later returns a new hole that flows into k, but some time later.
+// Its main effect is to prevent immediate reuse of temporary
+// variables introduced during Order.
+func (e *Escape) later(k EscHole) EscHole {
+ loc := e.newLoc(nil, false)
+ e.flow(k, loc)
+ return loc.asHole()
+}
+
+// canonicalNode returns the canonical *Node that n logically
+// represents.
+func canonicalNode(n *Node) *Node {
+ if n != nil && n.Op == ONAME && n.Name.IsClosureVar() {
+ n = n.Name.Defn
+ if n.Name.IsClosureVar() {
+ Fatalf("still closure var")
+ }
+ }
+
+ return n
+}
+
+func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
+ if e.curfn == nil {
+ Fatalf("e.curfn isn't set")
+ }
+ if n != nil && n.Type != nil && n.Type.NotInHeap() {
+ yyerrorl(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type)
+ }
+
+ n = canonicalNode(n)
+ loc := &EscLocation{
+ n: n,
+ curfn: e.curfn,
+ loopDepth: e.loopDepth,
+ transient: transient,
+ }
+ e.allLocs = append(e.allLocs, loc)
+ if n != nil {
+ if n.Op == ONAME && n.Name.Curfn != e.curfn {
+ Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn)
+ }
+
+ if n.HasOpt() {
+ Fatalf("%v already has a location", n)
+ }
+ n.SetOpt(loc)
+
+ if why := heapAllocReason(n); why != "" {
+ e.flow(e.heapHole().addr(n, why), loc)
+ }
+ }
+ return loc
+}
+
+func (e *Escape) oldLoc(n *Node) *EscLocation {
+ n = canonicalNode(n)
+ return n.Opt().(*EscLocation)
+}
+
+func (l *EscLocation) asHole() EscHole {
+ return EscHole{dst: l}
+}
+
+func (e *Escape) flow(k EscHole, src *EscLocation) {
+ dst := k.dst
+ if dst == &e.blankLoc {
+ return
+ }
+ if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
+ return
+ }
+ if dst.escapes && k.derefs < 0 { // dst = &src
+ if Debug.m >= 2 || logopt.Enabled() {
+ pos := linestr(src.n.Pos)
+ if Debug.m >= 2 {
+ fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
+ }
+ explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
+ if logopt.Enabled() {
+ logopt.LogOpt(src.n.Pos, "escapes", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", src.n), explanation)
+ }
+
+ }
+ src.escapes = true
+ return
+ }
+
+ // TODO(mdempsky): Deduplicate edges?
+ dst.edges = append(dst.edges, EscEdge{src: src, derefs: k.derefs, notes: k.notes})
+}
+
+func (e *Escape) heapHole() EscHole { return e.heapLoc.asHole() }
+func (e *Escape) discardHole() EscHole { return e.blankLoc.asHole() }
+
+// walkAll computes the minimal dereferences between all pairs of
+// locations.
+func (e *Escape) walkAll() {
+ // We use a work queue to keep track of locations that we need
+ // to visit, and repeatedly walk until we reach a fixed point.
+ //
+ // We walk once from each location (including the heap), and
+ // then re-enqueue each location on its transition from
+ // transient->!transient and !escapes->escapes, which can each
+ // happen at most once. So we take Θ(len(e.allLocs)) walks.
+
+ // LIFO queue, has enough room for e.allLocs and e.heapLoc.
+ todo := make([]*EscLocation, 0, len(e.allLocs)+1)
+ enqueue := func(loc *EscLocation) {
+ if !loc.queued {
+ todo = append(todo, loc)
+ loc.queued = true
+ }
+ }
+
+ for _, loc := range e.allLocs {
+ enqueue(loc)
+ }
+ enqueue(&e.heapLoc)
+
+ var walkgen uint32
+ for len(todo) > 0 {
+ root := todo[len(todo)-1]
+ todo = todo[:len(todo)-1]
+ root.queued = false
+
+ walkgen++
+ e.walkOne(root, walkgen, enqueue)
+ }
+}
+
+// walkOne computes the minimal number of dereferences from root to
+// all other locations.
+func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLocation)) {
+ // The data flow graph has negative edges (from addressing
+ // operations), so we use the Bellman-Ford algorithm. However,
+ // we don't have to worry about infinite negative cycles since
+ // we bound intermediate dereference counts to 0.
+
+ root.walkgen = walkgen
+ root.derefs = 0
+ root.dst = nil
+
+ todo := []*EscLocation{root} // LIFO queue
+ for len(todo) > 0 {
+ l := todo[len(todo)-1]
+ todo = todo[:len(todo)-1]
+
+ base := l.derefs
+
+ // If l.derefs < 0, then l's address flows to root.
+ addressOf := base < 0
+ if addressOf {
+ // For a flow path like "root = &l; l = x",
+ // l's address flows to root, but x's does
+ // not. We recognize this by lower bounding
+ // base at 0.
+ base = 0
+
+ // If l's address flows to a non-transient
+ // location, then l can't be transiently
+ // allocated.
+ if !root.transient && l.transient {
+ l.transient = false
+ enqueue(l)
+ }
+ }
+
+ if e.outlives(root, l) {
+ // l's value flows to root. If l is a function
+ // parameter and root is the heap or a
+ // corresponding result parameter, then record
+ // that value flow for tagging the function
+ // later.
+ if l.isName(PPARAM) {
+ if (logopt.Enabled() || Debug.m >= 2) && !l.escapes {
+ if Debug.m >= 2 {
+ fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), base)
+ }
+ explanation := e.explainPath(root, l)
+ if logopt.Enabled() {
+ logopt.LogOpt(l.n.Pos, "leak", "escape", e.curfn.funcname(),
+ fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), base), explanation)
+ }
+ }
+ l.leakTo(root, base)
+ }
+
+ // If l's address flows somewhere that
+ // outlives it, then l needs to be heap
+ // allocated.
+ if addressOf && !l.escapes {
+ if logopt.Enabled() || Debug.m >= 2 {
+ if Debug.m >= 2 {
+ fmt.Printf("%s: %v escapes to heap:\n", linestr(l.n.Pos), l.n)
+ }
+ explanation := e.explainPath(root, l)
+ if logopt.Enabled() {
+ logopt.LogOpt(l.n.Pos, "escape", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", l.n), explanation)
+ }
+ }
+ l.escapes = true
+ enqueue(l)
+ continue
+ }
+ }
+
+ for i, edge := range l.edges {
+ if edge.src.escapes {
+ continue
+ }
+ derefs := base + edge.derefs
+ if edge.src.walkgen != walkgen || edge.src.derefs > derefs {
+ edge.src.walkgen = walkgen
+ edge.src.derefs = derefs
+ edge.src.dst = l
+ edge.src.dstEdgeIdx = i
+ todo = append(todo, edge.src)
+ }
+ }
+ }
+}
+
+// explainPath prints an explanation of how src flows to the walk root.
+func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt {
+ visited := make(map[*EscLocation]bool)
+ pos := linestr(src.n.Pos)
+ var explanation []*logopt.LoggedOpt
+ for {
+ // Prevent infinite loop.
+ if visited[src] {
+ if Debug.m >= 2 {
+ fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
+ }
+ break
+ }
+ visited[src] = true
+ dst := src.dst
+ edge := &dst.edges[src.dstEdgeIdx]
+ if edge.src != src {
+ Fatalf("path inconsistency: %v != %v", edge.src, src)
+ }
+
+ explanation = e.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
+
+ if dst == root {
+ break
+ }
+ src = dst
+ }
+
+ return explanation
+}
+
+func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, notes *EscNote, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
+ ops := "&"
+ if derefs >= 0 {
+ ops = strings.Repeat("*", derefs)
+ }
+ print := Debug.m >= 2
+
+ flow := fmt.Sprintf(" flow: %s = %s%v:", e.explainLoc(dst), ops, e.explainLoc(srcloc))
+ if print {
+ fmt.Printf("%s:%s\n", pos, flow)
+ }
+ if logopt.Enabled() {
+ var epos src.XPos
+ if notes != nil {
+ epos = notes.where.Pos
+ } else if srcloc != nil && srcloc.n != nil {
+ epos = srcloc.n.Pos
+ }
+ explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", e.curfn.funcname(), flow))
+ }
+
+ for note := notes; note != nil; note = note.next {
+ if print {
+ fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, linestr(note.where.Pos))
+ }
+ if logopt.Enabled() {
+ explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos, "escflow", "escape", e.curfn.funcname(),
+ fmt.Sprintf(" from %v (%v)", note.where, note.why)))
+ }
+ }
+ return explanation
+}
+
+func (e *Escape) explainLoc(l *EscLocation) string {
+ if l == &e.heapLoc {
+ return "{heap}"
+ }
+ if l.n == nil {
+ // TODO(mdempsky): Omit entirely.
+ return "{temp}"
+ }
+ if l.n.Op == ONAME {
+ return fmt.Sprintf("%v", l.n)
+ }
+ return fmt.Sprintf("{storage for %v}", l.n)
+}
+
+// outlives reports whether values stored in l may survive beyond
+// other's lifetime if stack allocated.
+func (e *Escape) outlives(l, other *EscLocation) bool {
+ // The heap outlives everything.
+ if l.escapes {
+ return true
+ }
+
+ // We don't know what callers do with returned values, so
+ // pessimistically we need to assume they flow to the heap and
+ // outlive everything too.
+ if l.isName(PPARAMOUT) {
+ // Exception: Directly called closures can return
+ // locations allocated outside of them without forcing
+ // them to the heap. For example:
+ //
+ // var u int // okay to stack allocate
+ // *(func() *int { return &u }()) = 42
+ if containsClosure(other.curfn, l.curfn) && l.curfn.Func.Closure.Func.Top&ctxCallee != 0 {
+ return false
+ }
+
+ return true
+ }
+
+ // If l and other are within the same function, then l
+ // outlives other if it was declared outside other's loop
+ // scope. For example:
+ //
+ // var l *int
+ // for {
+ // l = new(int)
+ // }
+ if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
+ return true
+ }
+
+ // If other is declared within a child closure of where l is
+ // declared, then l outlives it. For example:
+ //
+ // var l *int
+ // func() {
+ // l = new(int)
+ // }
+ if containsClosure(l.curfn, other.curfn) {
+ return true
+ }
+
+ return false
+}
+
+// containsClosure reports whether c is a closure contained within f.
+func containsClosure(f, c *Node) bool {
+ if f.Op != ODCLFUNC || c.Op != ODCLFUNC {
+ Fatalf("bad containsClosure: %v, %v", f, c)
+ }
+
+ // Common case.
+ if f == c {
+ return false
+ }
+
+ // Closures within function Foo are named like "Foo.funcN..."
+ // TODO(mdempsky): Better way to recognize this.
+ fn := f.Func.Nname.Sym.Name
+ cn := c.Func.Nname.Sym.Name
+ return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
+}
+
+// leak records that parameter l leaks to sink.
+func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
+ // If sink is a result parameter that doesn't escape (#44614)
+ // and we can fit return bits into the escape analysis tag,
+ // then record as a result leak.
+ if !sink.escapes && sink.isName(PPARAMOUT) && sink.curfn == l.curfn {
+ // TODO(mdempsky): Eliminate dependency on Vargen here.
+ ri := int(sink.n.Name.Vargen) - 1
+ if ri < numEscResults {
+ // Leak to result parameter.
+ l.paramEsc.AddResult(ri, derefs)
+ return
+ }
+ }
+
+ // Otherwise, record as heap leak.
+ l.paramEsc.AddHeap(derefs)
+}
+
+func (e *Escape) finish(fns []*Node) {
+ // Record parameter tags for package export data.
+ for _, fn := range fns {
+ fn.Esc = EscFuncTagged
+
+ narg := 0
+ for _, fs := range &types.RecvsParams {
+ for _, f := range fs(fn.Type).Fields().Slice() {
+ narg++
+ f.Note = e.paramTag(fn, narg, f)
+ }
+ }
+ }
+
+ for _, loc := range e.allLocs {
+ n := loc.n
+ if n == nil {
+ continue
+ }
+ n.SetOpt(nil)
+
+ // Update n.Esc based on escape analysis results.
+
+ if loc.escapes {
+ if n.Op != ONAME {
+ if Debug.m != 0 {
+ Warnl(n.Pos, "%S escapes to heap", n)
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(n.Pos, "escape", "escape", e.curfn.funcname())
+ }
+ }
+ n.Esc = EscHeap
+ addrescapes(n)
+ } else {
+ if Debug.m != 0 && n.Op != ONAME {
+ Warnl(n.Pos, "%S does not escape", n)
+ }
+ n.Esc = EscNone
+ if loc.transient {
+ n.SetTransient(true)
+ }
+ }
+ }
+}
+
+func (l *EscLocation) isName(c Class) bool {
+ return l.n != nil && l.n.Op == ONAME && l.n.Class() == c
+}
+
+const numEscResults = 7
+
+// An EscLeaks represents a set of assignment flows from a parameter
+// to the heap or to any of its function's (first numEscResults)
+// result parameters.
+type EscLeaks [1 + numEscResults]uint8
+
+// Empty reports whether l is an empty set (i.e., no assignment flows).
+func (l EscLeaks) Empty() bool { return l == EscLeaks{} }
+
+// Heap returns the minimum deref count of any assignment flow from l
+// to the heap. If no such flows exist, Heap returns -1.
+func (l EscLeaks) Heap() int { return l.get(0) }
+
+// Result returns the minimum deref count of any assignment flow from
+// l to its function's i'th result parameter. If no such flows exist,
+// Result returns -1.
+func (l EscLeaks) Result(i int) int { return l.get(1 + i) }
+
+// AddHeap adds an assignment flow from l to the heap.
+func (l *EscLeaks) AddHeap(derefs int) { l.add(0, derefs) }
+
+// AddResult adds an assignment flow from l to its function's i'th
+// result parameter.
+func (l *EscLeaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
+
+func (l *EscLeaks) setResult(i, derefs int) { l.set(1+i, derefs) }
+
+func (l EscLeaks) get(i int) int { return int(l[i]) - 1 }
+
+func (l *EscLeaks) add(i, derefs int) {
+ if old := l.get(i); old < 0 || derefs < old {
+ l.set(i, derefs)
+ }
+}
+
+func (l *EscLeaks) set(i, derefs int) {
+ v := derefs + 1
+ if v < 0 {
+ Fatalf("invalid derefs count: %v", derefs)
+ }
+ if v > math.MaxUint8 {
+ v = math.MaxUint8
+ }
+
+ l[i] = uint8(v)
+}
+
+// Optimize removes result flow paths that are equal in length or
+// longer than the shortest heap flow path.
+func (l *EscLeaks) Optimize() {
+ // If we have a path to the heap, then there's no use in
+ // keeping equal or longer paths elsewhere.
+ if x := l.Heap(); x >= 0 {
+ for i := 0; i < numEscResults; i++ {
+ if l.Result(i) >= x {
+ l.setResult(i, -1)
+ }
+ }
+ }
+}
+
+var leakTagCache = map[EscLeaks]string{}
+
+// Encode converts l into a binary string for export data.
+func (l EscLeaks) Encode() string {
+ if l.Heap() == 0 {
+ // Space optimization: empty string encodes more
+ // efficiently in export data.
+ return ""
+ }
+ if s, ok := leakTagCache[l]; ok {
+ return s
+ }
+
+ n := len(l)
+ for n > 0 && l[n-1] == 0 {
+ n--
+ }
+ s := "esc:" + string(l[:n])
+ leakTagCache[l] = s
+ return s
+}
+
+// ParseLeaks parses a binary string representing an EscLeaks.
+func ParseLeaks(s string) EscLeaks {
+ var l EscLeaks
+ if !strings.HasPrefix(s, "esc:") {
+ l.AddHeap(0)
+ return l
+ }
+ copy(l[:], s[4:])
+ return l
+}
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
new file mode 100644
index 0000000..c6917e0
--- /dev/null
+++ b/src/cmd/compile/internal/gc/export.go
@@ -0,0 +1,233 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/bio"
+ "cmd/internal/src"
+ "fmt"
+)
+
+var (
+ Debug_export int // if set, print debugging information about export data
+)
+
+func exportf(bout *bio.Writer, format string, args ...interface{}) {
+ fmt.Fprintf(bout, format, args...)
+ if Debug_export != 0 {
+ fmt.Printf(format, args...)
+ }
+}
+
+var asmlist []*Node
+
+// exportsym marks n for export (or reexport).
+func exportsym(n *Node) {
+ if n.Sym.OnExportList() {
+ return
+ }
+ n.Sym.SetOnExportList(true)
+
+ if Debug.E != 0 {
+ fmt.Printf("export symbol %v\n", n.Sym)
+ }
+
+ exportlist = append(exportlist, n)
+}
+
+func initname(s string) bool {
+ return s == "init"
+}
+
+func autoexport(n *Node, ctxt Class) {
+ if n.Sym.Pkg != localpkg {
+ return
+ }
+ if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
+ return
+ }
+ if n.Type != nil && n.Type.IsKind(TFUNC) && n.IsMethod() {
+ return
+ }
+
+ if types.IsExported(n.Sym.Name) || initname(n.Sym.Name) {
+ exportsym(n)
+ }
+ if asmhdr != "" && !n.Sym.Asm() {
+ n.Sym.SetAsm(true)
+ asmlist = append(asmlist, n)
+ }
+}
+
+func dumpexport(bout *bio.Writer) {
+ // The linker also looks for the $$ marker - use char after $$ to distinguish format.
+ exportf(bout, "\n$$B\n") // indicate binary export format
+ off := bout.Offset()
+ iexport(bout.Writer)
+ size := bout.Offset() - off
+ exportf(bout, "\n$$\n")
+
+ if Debug_export != 0 {
+ fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", myimportpath, size)
+ }
+}
+
+func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
+ n := asNode(s.PkgDef())
+ if n == nil {
+ // iimport should have created a stub ONONAME
+ // declaration for all imported symbols. The exception
+ // is declarations for Runtimepkg, which are populated
+ // by loadsys instead.
+ if s.Pkg != Runtimepkg {
+ Fatalf("missing ONONAME for %v\n", s)
+ }
+
+ n = dclname(s)
+ s.SetPkgDef(asTypesNode(n))
+ s.Importdef = ipkg
+ }
+ if n.Op != ONONAME && n.Op != op {
+ redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
+ }
+ return n
+}
+
+// importtype returns the named type declared by symbol s.
+// If no such type has been declared yet, a forward declaration is returned.
+// ipkg is the package being imported
+func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
+ n := importsym(ipkg, s, OTYPE)
+ if n.Op != OTYPE {
+ t := types.New(TFORW)
+ t.Sym = s
+ t.Nod = asTypesNode(n)
+
+ n.Op = OTYPE
+ n.Pos = pos
+ n.Type = t
+ n.SetClass(PEXTERN)
+ }
+
+ t := n.Type
+ if t == nil {
+ Fatalf("importtype %v", s)
+ }
+ return t
+}
+
+// importobj declares symbol s as an imported object representable by op.
+// ipkg is the package being imported
+func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op Op, ctxt Class, t *types.Type) *Node {
+ n := importsym(ipkg, s, op)
+ if n.Op != ONONAME {
+ if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) {
+ redeclare(lineno, s, fmt.Sprintf("during import %q", ipkg.Path))
+ }
+ return nil
+ }
+
+ n.Op = op
+ n.Pos = pos
+ n.SetClass(ctxt)
+ if ctxt == PFUNC {
+ n.Sym.SetFunc(true)
+ }
+ n.Type = t
+ return n
+}
+
+// importconst declares symbol s as an imported constant with type t and value val.
+// ipkg is the package being imported
+func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val Val) {
+ n := importobj(ipkg, pos, s, OLITERAL, PEXTERN, t)
+ if n == nil { // TODO: Check that value matches.
+ return
+ }
+
+ n.SetVal(val)
+
+ if Debug.E != 0 {
+ fmt.Printf("import const %v %L = %v\n", s, t, val)
+ }
+}
+
+// importfunc declares symbol s as an imported function with type t.
+// ipkg is the package being imported
+func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
+ n := importobj(ipkg, pos, s, ONAME, PFUNC, t)
+ if n == nil {
+ return
+ }
+
+ n.Func = new(Func)
+ t.SetNname(asTypesNode(n))
+
+ if Debug.E != 0 {
+ fmt.Printf("import func %v%S\n", s, t)
+ }
+}
+
+// importvar declares symbol s as an imported variable with type t.
+// ipkg is the package being imported
+func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
+ n := importobj(ipkg, pos, s, ONAME, PEXTERN, t)
+ if n == nil {
+ return
+ }
+
+ if Debug.E != 0 {
+ fmt.Printf("import var %v %L\n", s, t)
+ }
+}
+
+// importalias declares symbol s as an imported type alias with type t.
+// ipkg is the package being imported
+func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) {
+ n := importobj(ipkg, pos, s, OTYPE, PEXTERN, t)
+ if n == nil {
+ return
+ }
+
+ if Debug.E != 0 {
+ fmt.Printf("import type %v = %L\n", s, t)
+ }
+}
+
+func dumpasmhdr() {
+ b, err := bio.Create(asmhdr)
+ if err != nil {
+ Fatalf("%v", err)
+ }
+ fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", localpkg.Name)
+ for _, n := range asmlist {
+ if n.Sym.IsBlank() {
+ continue
+ }
+ switch n.Op {
+ case OLITERAL:
+ t := n.Val().Ctype()
+ if t == CTFLT || t == CTCPLX {
+ break
+ }
+ fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val())
+
+ case OTYPE:
+ t := n.Type
+ if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
+ break
+ }
+ fmt.Fprintf(b, "#define %s__size %d\n", n.Sym.Name, int(t.Width))
+ for _, f := range t.Fields().Slice() {
+ if !f.Sym.IsBlank() {
+ fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, f.Sym.Name, int(f.Offset))
+ }
+ }
+ }
+ }
+
+ b.Close()
+}
diff --git a/src/cmd/compile/internal/gc/fixedbugs_test.go b/src/cmd/compile/internal/gc/fixedbugs_test.go
new file mode 100644
index 0000000..8ac4436
--- /dev/null
+++ b/src/cmd/compile/internal/gc/fixedbugs_test.go
@@ -0,0 +1,92 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+type T struct {
+ x [2]int64 // field that will be clobbered. Also makes type not SSAable.
+ p *byte // has a pointer
+}
+
+//go:noinline
+func makeT() T {
+ return T{}
+}
+
+var g T
+
+var sink interface{}
+
+func TestIssue15854(t *testing.T) {
+ for i := 0; i < 10000; i++ {
+ if g.x[0] != 0 {
+ t.Fatalf("g.x[0] clobbered with %x\n", g.x[0])
+ }
+ // The bug was in the following assignment. The return
+ // value of makeT() is not copied out of the args area of
+ // stack frame in a timely fashion. So when write barriers
+ // are enabled, the marshaling of the args for the write
+ // barrier call clobbers the result of makeT() before it is
+ // read by the write barrier code.
+ g = makeT()
+ sink = make([]byte, 1000) // force write barriers to eventually happen
+ }
+}
+func TestIssue15854b(t *testing.T) {
+ const N = 10000
+ a := make([]T, N)
+ for i := 0; i < N; i++ {
+ a = append(a, makeT())
+ sink = make([]byte, 1000) // force write barriers to eventually happen
+ }
+ for i, v := range a {
+ if v.x[0] != 0 {
+ t.Fatalf("a[%d].x[0] clobbered with %x\n", i, v.x[0])
+ }
+ }
+}
+
+// Test that the generated assembly has line numbers (Issue #16214).
+func TestIssue16214(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ dir, err := ioutil.TempDir("", "TestLineNumber")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ src := filepath.Join(dir, "x.go")
+ err = ioutil.WriteFile(src, []byte(issue16214src), 0644)
+ if err != nil {
+ t.Fatalf("could not write file: %v", err)
+ }
+
+ cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-S", "-o", filepath.Join(dir, "out.o"), src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("fail to run go tool compile: %v", err)
+ }
+
+ if strings.Contains(string(out), "unknown line number") {
+ t.Errorf("line number missing in assembly:\n%s", out)
+ }
+}
+
+var issue16214src = `
+package main
+
+func Mod32(x uint32) uint32 {
+ return x % 3 // frontend rewrites it as HMUL with 2863311531, the LITERAL node has unknown Pos
+}
+`
diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/gc/float_test.go
new file mode 100644
index 0000000..c619d25
--- /dev/null
+++ b/src/cmd/compile/internal/gc/float_test.go
@@ -0,0 +1,544 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "math"
+ "testing"
+)
+
+//go:noinline
+func compare1(a, b float64) bool {
+ return a < b
+}
+
+//go:noinline
+func compare2(a, b float32) bool {
+ return a < b
+}
+
+func TestFloatCompare(t *testing.T) {
+ if !compare1(3, 5) {
+ t.Errorf("compare1 returned false")
+ }
+ if !compare2(3, 5) {
+ t.Errorf("compare2 returned false")
+ }
+}
+
+func TestFloatCompareFolded(t *testing.T) {
+ // float64 comparisons
+ d1, d3, d5, d9 := float64(1), float64(3), float64(5), float64(9)
+ if d3 == d5 {
+ t.Errorf("d3 == d5 returned true")
+ }
+ if d3 != d3 {
+ t.Errorf("d3 != d3 returned true")
+ }
+ if d3 > d5 {
+ t.Errorf("d3 > d5 returned true")
+ }
+ if d3 >= d9 {
+ t.Errorf("d3 >= d9 returned true")
+ }
+ if d5 < d1 {
+ t.Errorf("d5 < d1 returned true")
+ }
+ if d9 <= d1 {
+ t.Errorf("d9 <= d1 returned true")
+ }
+ if math.NaN() == math.NaN() {
+ t.Errorf("math.NaN() == math.NaN() returned true")
+ }
+ if math.NaN() >= math.NaN() {
+ t.Errorf("math.NaN() >= math.NaN() returned true")
+ }
+ if math.NaN() <= math.NaN() {
+ t.Errorf("math.NaN() <= math.NaN() returned true")
+ }
+ if math.Copysign(math.NaN(), -1) < math.NaN() {
+ t.Errorf("math.Copysign(math.NaN(), -1) < math.NaN() returned true")
+ }
+ if math.Inf(1) != math.Inf(1) {
+ t.Errorf("math.Inf(1) != math.Inf(1) returned true")
+ }
+ if math.Inf(-1) != math.Inf(-1) {
+ t.Errorf("math.Inf(-1) != math.Inf(-1) returned true")
+ }
+ if math.Copysign(0, -1) != 0 {
+ t.Errorf("math.Copysign(0, -1) != 0 returned true")
+ }
+ if math.Copysign(0, -1) < 0 {
+ t.Errorf("math.Copysign(0, -1) < 0 returned true")
+ }
+ if 0 > math.Copysign(0, -1) {
+ t.Errorf("0 > math.Copysign(0, -1) returned true")
+ }
+
+ // float32 comparisons
+ s1, s3, s5, s9 := float32(1), float32(3), float32(5), float32(9)
+ if s3 == s5 {
+ t.Errorf("s3 == s5 returned true")
+ }
+ if s3 != s3 {
+ t.Errorf("s3 != s3 returned true")
+ }
+ if s3 > s5 {
+ t.Errorf("s3 > s5 returned true")
+ }
+ if s3 >= s9 {
+ t.Errorf("s3 >= s9 returned true")
+ }
+ if s5 < s1 {
+ t.Errorf("s5 < s1 returned true")
+ }
+ if s9 <= s1 {
+ t.Errorf("s9 <= s1 returned true")
+ }
+ sPosNaN, sNegNaN := float32(math.NaN()), float32(math.Copysign(math.NaN(), -1))
+ if sPosNaN == sPosNaN {
+ t.Errorf("sPosNaN == sPosNaN returned true")
+ }
+ if sPosNaN >= sPosNaN {
+ t.Errorf("sPosNaN >= sPosNaN returned true")
+ }
+ if sPosNaN <= sPosNaN {
+ t.Errorf("sPosNaN <= sPosNaN returned true")
+ }
+ if sNegNaN < sPosNaN {
+ t.Errorf("sNegNaN < sPosNaN returned true")
+ }
+ sPosInf, sNegInf := float32(math.Inf(1)), float32(math.Inf(-1))
+ if sPosInf != sPosInf {
+ t.Errorf("sPosInf != sPosInf returned true")
+ }
+ if sNegInf != sNegInf {
+ t.Errorf("sNegInf != sNegInf returned true")
+ }
+ sNegZero := float32(math.Copysign(0, -1))
+ if sNegZero != 0 {
+ t.Errorf("sNegZero != 0 returned true")
+ }
+ if sNegZero < 0 {
+ t.Errorf("sNegZero < 0 returned true")
+ }
+ if 0 > sNegZero {
+ t.Errorf("0 > sNegZero returned true")
+ }
+}
+
+//go:noinline
+func cvt1(a float64) uint64 {
+ return uint64(a)
+}
+
+//go:noinline
+func cvt2(a float64) uint32 {
+ return uint32(a)
+}
+
+//go:noinline
+func cvt3(a float32) uint64 {
+ return uint64(a)
+}
+
+//go:noinline
+func cvt4(a float32) uint32 {
+ return uint32(a)
+}
+
+//go:noinline
+func cvt5(a float64) int64 {
+ return int64(a)
+}
+
+//go:noinline
+func cvt6(a float64) int32 {
+ return int32(a)
+}
+
+//go:noinline
+func cvt7(a float32) int64 {
+ return int64(a)
+}
+
+//go:noinline
+func cvt8(a float32) int32 {
+ return int32(a)
+}
+
+// make sure to cover int, uint cases (issue #16738)
+//go:noinline
+func cvt9(a float64) int {
+ return int(a)
+}
+
+//go:noinline
+func cvt10(a float64) uint {
+ return uint(a)
+}
+
+//go:noinline
+func cvt11(a float32) int {
+ return int(a)
+}
+
+//go:noinline
+func cvt12(a float32) uint {
+ return uint(a)
+}
+
+//go:noinline
+func f2i64p(v float64) *int64 {
+ return ip64(int64(v / 0.1))
+}
+
+//go:noinline
+func ip64(v int64) *int64 {
+ return &v
+}
+
+func TestFloatConvert(t *testing.T) {
+ if got := cvt1(3.5); got != 3 {
+ t.Errorf("cvt1 got %d, wanted 3", got)
+ }
+ if got := cvt2(3.5); got != 3 {
+ t.Errorf("cvt2 got %d, wanted 3", got)
+ }
+ if got := cvt3(3.5); got != 3 {
+ t.Errorf("cvt3 got %d, wanted 3", got)
+ }
+ if got := cvt4(3.5); got != 3 {
+ t.Errorf("cvt4 got %d, wanted 3", got)
+ }
+ if got := cvt5(3.5); got != 3 {
+ t.Errorf("cvt5 got %d, wanted 3", got)
+ }
+ if got := cvt6(3.5); got != 3 {
+ t.Errorf("cvt6 got %d, wanted 3", got)
+ }
+ if got := cvt7(3.5); got != 3 {
+ t.Errorf("cvt7 got %d, wanted 3", got)
+ }
+ if got := cvt8(3.5); got != 3 {
+ t.Errorf("cvt8 got %d, wanted 3", got)
+ }
+ if got := cvt9(3.5); got != 3 {
+ t.Errorf("cvt9 got %d, wanted 3", got)
+ }
+ if got := cvt10(3.5); got != 3 {
+ t.Errorf("cvt10 got %d, wanted 3", got)
+ }
+ if got := cvt11(3.5); got != 3 {
+ t.Errorf("cvt11 got %d, wanted 3", got)
+ }
+ if got := cvt12(3.5); got != 3 {
+ t.Errorf("cvt12 got %d, wanted 3", got)
+ }
+ if got := *f2i64p(10); got != 100 {
+ t.Errorf("f2i64p got %d, wanted 100", got)
+ }
+}
+
+func TestFloatConvertFolded(t *testing.T) {
+ // Assign constants to variables so that they are (hopefully) constant folded
+ // by the SSA backend rather than the frontend.
+ u64, u32, u16, u8 := uint64(1<<63), uint32(1<<31), uint16(1<<15), uint8(1<<7)
+ i64, i32, i16, i8 := int64(-1<<63), int32(-1<<31), int16(-1<<15), int8(-1<<7)
+ du64, du32, du16, du8 := float64(1<<63), float64(1<<31), float64(1<<15), float64(1<<7)
+ di64, di32, di16, di8 := float64(-1<<63), float64(-1<<31), float64(-1<<15), float64(-1<<7)
+ su64, su32, su16, su8 := float32(1<<63), float32(1<<31), float32(1<<15), float32(1<<7)
+ si64, si32, si16, si8 := float32(-1<<63), float32(-1<<31), float32(-1<<15), float32(-1<<7)
+
+ // integer to float
+ if float64(u64) != du64 {
+ t.Errorf("float64(u64) != du64")
+ }
+ if float64(u32) != du32 {
+ t.Errorf("float64(u32) != du32")
+ }
+ if float64(u16) != du16 {
+ t.Errorf("float64(u16) != du16")
+ }
+ if float64(u8) != du8 {
+ t.Errorf("float64(u8) != du8")
+ }
+ if float64(i64) != di64 {
+ t.Errorf("float64(i64) != di64")
+ }
+ if float64(i32) != di32 {
+ t.Errorf("float64(i32) != di32")
+ }
+ if float64(i16) != di16 {
+ t.Errorf("float64(i16) != di16")
+ }
+ if float64(i8) != di8 {
+ t.Errorf("float64(i8) != di8")
+ }
+ if float32(u64) != su64 {
+ t.Errorf("float32(u64) != su64")
+ }
+ if float32(u32) != su32 {
+ t.Errorf("float32(u32) != su32")
+ }
+ if float32(u16) != su16 {
+ t.Errorf("float32(u16) != su16")
+ }
+ if float32(u8) != su8 {
+ t.Errorf("float32(u8) != su8")
+ }
+ if float32(i64) != si64 {
+ t.Errorf("float32(i64) != si64")
+ }
+ if float32(i32) != si32 {
+ t.Errorf("float32(i32) != si32")
+ }
+ if float32(i16) != si16 {
+ t.Errorf("float32(i16) != si16")
+ }
+ if float32(i8) != si8 {
+ t.Errorf("float32(i8) != si8")
+ }
+
+ // float to integer
+ if uint64(du64) != u64 {
+ t.Errorf("uint64(du64) != u64")
+ }
+ if uint32(du32) != u32 {
+ t.Errorf("uint32(du32) != u32")
+ }
+ if uint16(du16) != u16 {
+ t.Errorf("uint16(du16) != u16")
+ }
+ if uint8(du8) != u8 {
+ t.Errorf("uint8(du8) != u8")
+ }
+ if int64(di64) != i64 {
+ t.Errorf("int64(di64) != i64")
+ }
+ if int32(di32) != i32 {
+ t.Errorf("int32(di32) != i32")
+ }
+ if int16(di16) != i16 {
+ t.Errorf("int16(di16) != i16")
+ }
+ if int8(di8) != i8 {
+ t.Errorf("int8(di8) != i8")
+ }
+ if uint64(su64) != u64 {
+ t.Errorf("uint64(su64) != u64")
+ }
+ if uint32(su32) != u32 {
+ t.Errorf("uint32(su32) != u32")
+ }
+ if uint16(su16) != u16 {
+ t.Errorf("uint16(su16) != u16")
+ }
+ if uint8(su8) != u8 {
+ t.Errorf("uint8(su8) != u8")
+ }
+ if int64(si64) != i64 {
+ t.Errorf("int64(si64) != i64")
+ }
+ if int32(si32) != i32 {
+ t.Errorf("int32(si32) != i32")
+ }
+ if int16(si16) != i16 {
+ t.Errorf("int16(si16) != i16")
+ }
+ if int8(si8) != i8 {
+ t.Errorf("int8(si8) != i8")
+ }
+}
+
+func TestFloat32StoreToLoadConstantFold(t *testing.T) {
+ // Test that math.Float32{,from}bits constant fold correctly.
+ // In particular we need to be careful that signaling NaN (sNaN) values
+ // are not converted to quiet NaN (qNaN) values during compilation.
+ // See issue #27193 for more information.
+
+ // signaling NaNs
+ {
+ const nan = uint32(0x7f800001) // sNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0x7fbfffff) // sNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0xff800001) // sNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0xffbfffff) // sNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+
+ // quiet NaNs
+ {
+ const nan = uint32(0x7fc00000) // qNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0x7fffffff) // qNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0x8fc00000) // qNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0x8fffffff) // qNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+
+ // infinities
+ {
+ const inf = uint32(0x7f800000) // +∞
+ if x := math.Float32bits(math.Float32frombits(inf)); x != inf {
+ t.Errorf("got %#x, want %#x", x, inf)
+ }
+ }
+ {
+ const negInf = uint32(0xff800000) // -∞
+ if x := math.Float32bits(math.Float32frombits(negInf)); x != negInf {
+ t.Errorf("got %#x, want %#x", x, negInf)
+ }
+ }
+
+ // numbers
+ {
+ const zero = uint32(0) // +0.0
+ if x := math.Float32bits(math.Float32frombits(zero)); x != zero {
+ t.Errorf("got %#x, want %#x", x, zero)
+ }
+ }
+ {
+ const negZero = uint32(1 << 31) // -0.0
+ if x := math.Float32bits(math.Float32frombits(negZero)); x != negZero {
+ t.Errorf("got %#x, want %#x", x, negZero)
+ }
+ }
+ {
+ const one = uint32(0x3f800000) // 1.0
+ if x := math.Float32bits(math.Float32frombits(one)); x != one {
+ t.Errorf("got %#x, want %#x", x, one)
+ }
+ }
+ {
+ const negOne = uint32(0xbf800000) // -1.0
+ if x := math.Float32bits(math.Float32frombits(negOne)); x != negOne {
+ t.Errorf("got %#x, want %#x", x, negOne)
+ }
+ }
+ {
+ const frac = uint32(0x3fc00000) // +1.5
+ if x := math.Float32bits(math.Float32frombits(frac)); x != frac {
+ t.Errorf("got %#x, want %#x", x, frac)
+ }
+ }
+ {
+ const negFrac = uint32(0xbfc00000) // -1.5
+ if x := math.Float32bits(math.Float32frombits(negFrac)); x != negFrac {
+ t.Errorf("got %#x, want %#x", x, negFrac)
+ }
+ }
+}
+
+// Signaling NaN values as constants.
+const (
+ snan32bits uint32 = 0x7f800001
+ snan64bits uint64 = 0x7ff0000000000001
+)
+
+// Signaling NaNs as variables.
+var snan32bitsVar uint32 = snan32bits
+var snan64bitsVar uint64 = snan64bits
+
+func TestFloatSignalingNaN(t *testing.T) {
+ // Make sure we generate a signaling NaN from a constant properly.
+ // See issue 36400.
+ f32 := math.Float32frombits(snan32bits)
+ g32 := math.Float32frombits(snan32bitsVar)
+ x32 := math.Float32bits(f32)
+ y32 := math.Float32bits(g32)
+ if x32 != y32 {
+ t.Errorf("got %x, want %x (diff=%x)", x32, y32, x32^y32)
+ }
+
+ f64 := math.Float64frombits(snan64bits)
+ g64 := math.Float64frombits(snan64bitsVar)
+ x64 := math.Float64bits(f64)
+ y64 := math.Float64bits(g64)
+ if x64 != y64 {
+ t.Errorf("got %x, want %x (diff=%x)", x64, y64, x64^y64)
+ }
+}
+
+func TestFloatSignalingNaNConversion(t *testing.T) {
+ // Test to make sure when we convert a signaling NaN, we get a NaN.
+ // (Ideally we want a quiet NaN, but some platforms don't agree.)
+ // See issue 36399.
+ s32 := math.Float32frombits(snan32bitsVar)
+ if s32 == s32 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+ s64 := math.Float64frombits(snan64bitsVar)
+ if s64 == s64 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+}
+
+func TestFloatSignalingNaNConversionConst(t *testing.T) {
+ // Test to make sure when we convert a signaling NaN, it converts to a NaN.
+ // (Ideally we want a quiet NaN, but some platforms don't agree.)
+ // See issue 36399 and 36400.
+ s32 := math.Float32frombits(snan32bits)
+ if s32 == s32 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+ s64 := math.Float64frombits(snan64bits)
+ if s64 == s64 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+}
+
+var sinkFloat float64
+
+func BenchmarkMul2(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var m float64 = 1
+ for j := 0; j < 500; j++ {
+ m *= 2
+ }
+ sinkFloat = m
+ }
+}
+func BenchmarkMulNeg2(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var m float64 = 1
+ for j := 0; j < 500; j++ {
+ m *= -2
+ }
+ sinkFloat = m
+ }
+}
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go
new file mode 100644
index 0000000..f92f5d0
--- /dev/null
+++ b/src/cmd/compile/internal/gc/fmt.go
@@ -0,0 +1,1986 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode/utf8"
+)
+
+// A FmtFlag value is a set of flags (or 0).
+// They control how the Xconv functions format their values.
+// See the respective function's documentation for details.
+type FmtFlag int
+
+const ( // fmt.Format flag/prec or verb
+ FmtLeft FmtFlag = 1 << iota // '-'
+ FmtSharp // '#'
+ FmtSign // '+'
+ FmtUnsigned // internal use only (historic: u flag)
+ FmtShort // verb == 'S' (historic: h flag)
+ FmtLong // verb == 'L' (historic: l flag)
+ FmtComma // '.' (== hasPrec) (historic: , flag)
+ FmtByte // '0' (historic: hh flag)
+)
+
+// fmtFlag computes the (internal) FmtFlag
+// value given the fmt.State and format verb.
+func fmtFlag(s fmt.State, verb rune) FmtFlag {
+ var flag FmtFlag
+ if s.Flag('-') {
+ flag |= FmtLeft
+ }
+ if s.Flag('#') {
+ flag |= FmtSharp
+ }
+ if s.Flag('+') {
+ flag |= FmtSign
+ }
+ if s.Flag(' ') {
+ Fatalf("FmtUnsigned in format string")
+ }
+ if _, ok := s.Precision(); ok {
+ flag |= FmtComma
+ }
+ if s.Flag('0') {
+ flag |= FmtByte
+ }
+ switch verb {
+ case 'S':
+ flag |= FmtShort
+ case 'L':
+ flag |= FmtLong
+ }
+ return flag
+}
+
+// Format conversions:
+// TODO(gri) verify these; eliminate those not used anymore
+//
+// %v Op Node opcodes
+// Flags: #: print Go syntax (automatic unless mode == FDbg)
+//
+// %j *Node Node details
+// Flags: 0: suppresses things not relevant until walk
+//
+// %v *Val Constant values
+//
+// %v *types.Sym Symbols
+// %S unqualified identifier in any mode
+// Flags: +,- #: mode (see below)
+// 0: in export mode: unqualified identifier if exported, qualified if not
+//
+// %v *types.Type Types
+// %S omit "func" and receiver in function types
+// %L definition instead of name.
+// Flags: +,- #: mode (see below)
+// ' ' (only in -/Sym mode) print type identifiers wit package name instead of prefix.
+//
+// %v *Node Nodes
+// %S (only in +/debug mode) suppress recursion
+// %L (only in Error mode) print "foo (type Bar)"
+// Flags: +,- #: mode (see below)
+//
+// %v Nodes Node lists
+// Flags: those of *Node
+// .: separate items with ',' instead of ';'
+
+// *types.Sym, *types.Type, and *Node types use the flags below to set the format mode
+const (
+ FErr fmtMode = iota
+ FDbg
+ FTypeId
+ FTypeIdName // same as FTypeId, but use package name instead of prefix
+)
+
+// The mode flags '+', '-', and '#' are sticky; they persist through
+// recursions of *Node, *types.Type, and *types.Sym values. The ' ' flag is
+// sticky only on *types.Type recursions and only used in %-/*types.Sym mode.
+//
+// Example: given a *types.Sym: %+v %#v %-v print an identifier properly qualified for debug/export/internal mode
+
+// Useful format combinations:
+// TODO(gri): verify these
+//
+// *Node, Nodes:
+// %+v multiline recursive debug dump of *Node/Nodes
+// %+S non-recursive debug dump
+//
+// *Node:
+// %#v Go format
+// %L "foo (type Bar)" for error messages
+//
+// *types.Type:
+// %#v Go format
+// %#L type definition instead of name
+// %#S omit "func" and receiver in function signature
+//
+// %-v type identifiers
+// %-S type identifiers without "func" and arg names in type signatures (methodsym)
+// %- v type identifiers with package name instead of prefix (typesym, dcommontype, typehash)
+
+// update returns the results of applying f to mode.
+func (f FmtFlag) update(mode fmtMode) (FmtFlag, fmtMode) {
+ switch {
+ case f&FmtSign != 0:
+ mode = FDbg
+ case f&FmtSharp != 0:
+ // ignore (textual export format no longer supported)
+ case f&FmtUnsigned != 0:
+ mode = FTypeIdName
+ case f&FmtLeft != 0:
+ mode = FTypeId
+ }
+
+ f &^= FmtSharp | FmtLeft | FmtSign
+ return f, mode
+}
+
+var goopnames = []string{
+ OADDR: "&",
+ OADD: "+",
+ OADDSTR: "+",
+ OALIGNOF: "unsafe.Alignof",
+ OANDAND: "&&",
+ OANDNOT: "&^",
+ OAND: "&",
+ OAPPEND: "append",
+ OAS: "=",
+ OAS2: "=",
+ OBREAK: "break",
+ OCALL: "function call", // not actual syntax
+ OCAP: "cap",
+ OCASE: "case",
+ OCLOSE: "close",
+ OCOMPLEX: "complex",
+ OBITNOT: "^",
+ OCONTINUE: "continue",
+ OCOPY: "copy",
+ ODELETE: "delete",
+ ODEFER: "defer",
+ ODIV: "/",
+ OEQ: "==",
+ OFALL: "fallthrough",
+ OFOR: "for",
+ OFORUNTIL: "foruntil", // not actual syntax; used to avoid off-end pointer live on backedge.892
+ OGE: ">=",
+ OGOTO: "goto",
+ OGT: ">",
+ OIF: "if",
+ OIMAG: "imag",
+ OINLMARK: "inlmark",
+ ODEREF: "*",
+ OLEN: "len",
+ OLE: "<=",
+ OLSH: "<<",
+ OLT: "<",
+ OMAKE: "make",
+ ONEG: "-",
+ OMOD: "%",
+ OMUL: "*",
+ ONEW: "new",
+ ONE: "!=",
+ ONOT: "!",
+ OOFFSETOF: "unsafe.Offsetof",
+ OOROR: "||",
+ OOR: "|",
+ OPANIC: "panic",
+ OPLUS: "+",
+ OPRINTN: "println",
+ OPRINT: "print",
+ ORANGE: "range",
+ OREAL: "real",
+ ORECV: "<-",
+ ORECOVER: "recover",
+ ORETURN: "return",
+ ORSH: ">>",
+ OSELECT: "select",
+ OSEND: "<-",
+ OSIZEOF: "unsafe.Sizeof",
+ OSUB: "-",
+ OSWITCH: "switch",
+ OXOR: "^",
+}
+
+func (o Op) GoString() string {
+ return fmt.Sprintf("%#v", o)
+}
+
+func (o Op) format(s fmt.State, verb rune, mode fmtMode) {
+ switch verb {
+ case 'v':
+ o.oconv(s, fmtFlag(s, verb), mode)
+
+ default:
+ fmt.Fprintf(s, "%%!%c(Op=%d)", verb, int(o))
+ }
+}
+
+func (o Op) oconv(s fmt.State, flag FmtFlag, mode fmtMode) {
+ if flag&FmtSharp != 0 || mode != FDbg {
+ if int(o) < len(goopnames) && goopnames[o] != "" {
+ fmt.Fprint(s, goopnames[o])
+ return
+ }
+ }
+
+ // 'o.String()' instead of just 'o' to avoid infinite recursion
+ fmt.Fprint(s, o.String())
+}
+
+type (
+ fmtMode int
+
+ fmtNodeErr Node
+ fmtNodeDbg Node
+ fmtNodeTypeId Node
+ fmtNodeTypeIdName Node
+
+ fmtOpErr Op
+ fmtOpDbg Op
+ fmtOpTypeId Op
+ fmtOpTypeIdName Op
+
+ fmtTypeErr types.Type
+ fmtTypeDbg types.Type
+ fmtTypeTypeId types.Type
+ fmtTypeTypeIdName types.Type
+
+ fmtSymErr types.Sym
+ fmtSymDbg types.Sym
+ fmtSymTypeId types.Sym
+ fmtSymTypeIdName types.Sym
+
+ fmtNodesErr Nodes
+ fmtNodesDbg Nodes
+ fmtNodesTypeId Nodes
+ fmtNodesTypeIdName Nodes
+)
+
+func (n *fmtNodeErr) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FErr) }
+func (n *fmtNodeDbg) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FDbg) }
+func (n *fmtNodeTypeId) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FTypeId) }
+func (n *fmtNodeTypeIdName) Format(s fmt.State, verb rune) { (*Node)(n).format(s, verb, FTypeIdName) }
+func (n *Node) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) }
+
+func (o fmtOpErr) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FErr) }
+func (o fmtOpDbg) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FDbg) }
+func (o fmtOpTypeId) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FTypeId) }
+func (o fmtOpTypeIdName) Format(s fmt.State, verb rune) { Op(o).format(s, verb, FTypeIdName) }
+func (o Op) Format(s fmt.State, verb rune) { o.format(s, verb, FErr) }
+
+func (t *fmtTypeErr) Format(s fmt.State, verb rune) { typeFormat((*types.Type)(t), s, verb, FErr) }
+func (t *fmtTypeDbg) Format(s fmt.State, verb rune) { typeFormat((*types.Type)(t), s, verb, FDbg) }
+func (t *fmtTypeTypeId) Format(s fmt.State, verb rune) {
+ typeFormat((*types.Type)(t), s, verb, FTypeId)
+}
+func (t *fmtTypeTypeIdName) Format(s fmt.State, verb rune) {
+ typeFormat((*types.Type)(t), s, verb, FTypeIdName)
+}
+
+// func (t *types.Type) Format(s fmt.State, verb rune) // in package types
+
+func (y *fmtSymErr) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FErr) }
+func (y *fmtSymDbg) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FDbg) }
+func (y *fmtSymTypeId) Format(s fmt.State, verb rune) { symFormat((*types.Sym)(y), s, verb, FTypeId) }
+func (y *fmtSymTypeIdName) Format(s fmt.State, verb rune) {
+ symFormat((*types.Sym)(y), s, verb, FTypeIdName)
+}
+
+// func (y *types.Sym) Format(s fmt.State, verb rune) // in package types { y.format(s, verb, FErr) }
+
+func (n fmtNodesErr) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FErr) }
+func (n fmtNodesDbg) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FDbg) }
+func (n fmtNodesTypeId) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FTypeId) }
+func (n fmtNodesTypeIdName) Format(s fmt.State, verb rune) { (Nodes)(n).format(s, verb, FTypeIdName) }
+func (n Nodes) Format(s fmt.State, verb rune) { n.format(s, verb, FErr) }
+
+func (m fmtMode) Fprintf(s fmt.State, format string, args ...interface{}) {
+ m.prepareArgs(args)
+ fmt.Fprintf(s, format, args...)
+}
+
+func (m fmtMode) Sprintf(format string, args ...interface{}) string {
+ m.prepareArgs(args)
+ return fmt.Sprintf(format, args...)
+}
+
+func (m fmtMode) Sprint(args ...interface{}) string {
+ m.prepareArgs(args)
+ return fmt.Sprint(args...)
+}
+
+func (m fmtMode) prepareArgs(args []interface{}) {
+ switch m {
+ case FErr:
+ for i, arg := range args {
+ switch arg := arg.(type) {
+ case Op:
+ args[i] = fmtOpErr(arg)
+ case *Node:
+ args[i] = (*fmtNodeErr)(arg)
+ case *types.Type:
+ args[i] = (*fmtTypeErr)(arg)
+ case *types.Sym:
+ args[i] = (*fmtSymErr)(arg)
+ case Nodes:
+ args[i] = fmtNodesErr(arg)
+ case Val, int32, int64, string, types.EType:
+ // OK: printing these types doesn't depend on mode
+ default:
+ Fatalf("mode.prepareArgs type %T", arg)
+ }
+ }
+ case FDbg:
+ for i, arg := range args {
+ switch arg := arg.(type) {
+ case Op:
+ args[i] = fmtOpDbg(arg)
+ case *Node:
+ args[i] = (*fmtNodeDbg)(arg)
+ case *types.Type:
+ args[i] = (*fmtTypeDbg)(arg)
+ case *types.Sym:
+ args[i] = (*fmtSymDbg)(arg)
+ case Nodes:
+ args[i] = fmtNodesDbg(arg)
+ case Val, int32, int64, string, types.EType:
+ // OK: printing these types doesn't depend on mode
+ default:
+ Fatalf("mode.prepareArgs type %T", arg)
+ }
+ }
+ case FTypeId:
+ for i, arg := range args {
+ switch arg := arg.(type) {
+ case Op:
+ args[i] = fmtOpTypeId(arg)
+ case *Node:
+ args[i] = (*fmtNodeTypeId)(arg)
+ case *types.Type:
+ args[i] = (*fmtTypeTypeId)(arg)
+ case *types.Sym:
+ args[i] = (*fmtSymTypeId)(arg)
+ case Nodes:
+ args[i] = fmtNodesTypeId(arg)
+ case Val, int32, int64, string, types.EType:
+ // OK: printing these types doesn't depend on mode
+ default:
+ Fatalf("mode.prepareArgs type %T", arg)
+ }
+ }
+ case FTypeIdName:
+ for i, arg := range args {
+ switch arg := arg.(type) {
+ case Op:
+ args[i] = fmtOpTypeIdName(arg)
+ case *Node:
+ args[i] = (*fmtNodeTypeIdName)(arg)
+ case *types.Type:
+ args[i] = (*fmtTypeTypeIdName)(arg)
+ case *types.Sym:
+ args[i] = (*fmtSymTypeIdName)(arg)
+ case Nodes:
+ args[i] = fmtNodesTypeIdName(arg)
+ case Val, int32, int64, string, types.EType:
+ // OK: printing these types doesn't depend on mode
+ default:
+ Fatalf("mode.prepareArgs type %T", arg)
+ }
+ }
+ default:
+ Fatalf("mode.prepareArgs mode %d", m)
+ }
+}
+
+func (n *Node) format(s fmt.State, verb rune, mode fmtMode) {
+ switch verb {
+ case 'v', 'S', 'L':
+ n.nconv(s, fmtFlag(s, verb), mode)
+
+ case 'j':
+ n.jconv(s, fmtFlag(s, verb))
+
+ default:
+ fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n)
+ }
+}
+
+// *Node details
+func (n *Node) jconv(s fmt.State, flag FmtFlag) {
+ c := flag & FmtShort
+
+ // Useful to see which nodes in a Node Dump/dumplist are actually identical
+ if Debug_dumpptrs != 0 {
+ fmt.Fprintf(s, " p(%p)", n)
+ }
+ if c == 0 && n.Name != nil && n.Name.Vargen != 0 {
+ fmt.Fprintf(s, " g(%d)", n.Name.Vargen)
+ }
+
+ if Debug_dumpptrs != 0 && c == 0 && n.Name != nil && n.Name.Defn != nil {
+ // Useful to see where Defn is set and what node it points to
+ fmt.Fprintf(s, " defn(%p)", n.Name.Defn)
+ }
+
+ if n.Pos.IsKnown() {
+ pfx := ""
+ switch n.Pos.IsStmt() {
+ case src.PosNotStmt:
+ pfx = "_" // "-" would be confusing
+ case src.PosIsStmt:
+ pfx = "+"
+ }
+ fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos.Line())
+ }
+
+ if c == 0 && n.Xoffset != BADWIDTH {
+ fmt.Fprintf(s, " x(%d)", n.Xoffset)
+ }
+
+ if n.Class() != 0 {
+ fmt.Fprintf(s, " class(%v)", n.Class())
+ }
+
+ if n.Colas() {
+ fmt.Fprintf(s, " colas(%v)", n.Colas())
+ }
+
+ switch n.Esc {
+ case EscUnknown:
+ break
+
+ case EscHeap:
+ fmt.Fprint(s, " esc(h)")
+
+ case EscNone:
+ fmt.Fprint(s, " esc(no)")
+
+ case EscNever:
+ if c == 0 {
+ fmt.Fprint(s, " esc(N)")
+ }
+
+ default:
+ fmt.Fprintf(s, " esc(%d)", n.Esc)
+ }
+
+ if e, ok := n.Opt().(*EscLocation); ok && e.loopDepth != 0 {
+ fmt.Fprintf(s, " ld(%d)", e.loopDepth)
+ }
+
+ if c == 0 && n.Typecheck() != 0 {
+ fmt.Fprintf(s, " tc(%d)", n.Typecheck())
+ }
+
+ if n.IsDDD() {
+ fmt.Fprintf(s, " isddd(%v)", n.IsDDD())
+ }
+
+ if n.Implicit() {
+ fmt.Fprintf(s, " implicit(%v)", n.Implicit())
+ }
+
+ if n.Embedded() {
+ fmt.Fprintf(s, " embedded")
+ }
+
+ if n.Op == ONAME {
+ if n.Name.Addrtaken() {
+ fmt.Fprint(s, " addrtaken")
+ }
+ if n.Name.Assigned() {
+ fmt.Fprint(s, " assigned")
+ }
+ if n.Name.IsClosureVar() {
+ fmt.Fprint(s, " closurevar")
+ }
+ if n.Name.Captured() {
+ fmt.Fprint(s, " captured")
+ }
+ if n.Name.IsOutputParamHeapAddr() {
+ fmt.Fprint(s, " outputparamheapaddr")
+ }
+ }
+ if n.Bounded() {
+ fmt.Fprint(s, " bounded")
+ }
+ if n.NonNil() {
+ fmt.Fprint(s, " nonnil")
+ }
+
+ if c == 0 && n.HasCall() {
+ fmt.Fprint(s, " hascall")
+ }
+
+ if c == 0 && n.Name != nil && n.Name.Used() {
+ fmt.Fprint(s, " used")
+ }
+}
+
+func (v Val) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ v.vconv(s, fmtFlag(s, verb))
+
+ default:
+ fmt.Fprintf(s, "%%!%c(Val=%T)", verb, v)
+ }
+}
+
+func (v Val) vconv(s fmt.State, flag FmtFlag) {
+ switch u := v.U.(type) {
+ case *Mpint:
+ if !u.Rune {
+ if flag&FmtSharp != 0 {
+ fmt.Fprint(s, u.String())
+ return
+ }
+ fmt.Fprint(s, u.GoString())
+ return
+ }
+
+ switch x := u.Int64(); {
+ case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'':
+ fmt.Fprintf(s, "'%c'", int(x))
+
+ case 0 <= x && x < 1<<16:
+ fmt.Fprintf(s, "'\\u%04x'", uint(int(x)))
+
+ case 0 <= x && x <= utf8.MaxRune:
+ fmt.Fprintf(s, "'\\U%08x'", uint64(x))
+
+ default:
+ fmt.Fprintf(s, "('\\x00' + %v)", u)
+ }
+
+ case *Mpflt:
+ if flag&FmtSharp != 0 {
+ fmt.Fprint(s, u.String())
+ return
+ }
+ fmt.Fprint(s, u.GoString())
+ return
+
+ case *Mpcplx:
+ if flag&FmtSharp != 0 {
+ fmt.Fprint(s, u.String())
+ return
+ }
+ fmt.Fprint(s, u.GoString())
+ return
+
+ case string:
+ fmt.Fprint(s, strconv.Quote(u))
+
+ case bool:
+ fmt.Fprint(s, u)
+
+ case *NilVal:
+ fmt.Fprint(s, "nil")
+
+ default:
+ fmt.Fprintf(s, "<ctype=%d>", v.Ctype())
+ }
+}
+
+/*
+s%,%,\n%g
+s%\n+%\n%g
+s%^[ ]*T%%g
+s%,.*%%g
+s%.+% [T&] = "&",%g
+s%^ ........*\]%&~%g
+s%~ %%g
+*/
+
+func symfmt(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) {
+ if flag&FmtShort == 0 {
+ switch mode {
+ case FErr: // This is for the user
+ if s.Pkg == builtinpkg || s.Pkg == localpkg {
+ b.WriteString(s.Name)
+ return
+ }
+
+ // If the name was used by multiple packages, display the full path,
+ if s.Pkg.Name != "" && numImport[s.Pkg.Name] > 1 {
+ fmt.Fprintf(b, "%q.%s", s.Pkg.Path, s.Name)
+ return
+ }
+ b.WriteString(s.Pkg.Name)
+ b.WriteByte('.')
+ b.WriteString(s.Name)
+ return
+
+ case FDbg:
+ b.WriteString(s.Pkg.Name)
+ b.WriteByte('.')
+ b.WriteString(s.Name)
+ return
+
+ case FTypeIdName:
+ // dcommontype, typehash
+ b.WriteString(s.Pkg.Name)
+ b.WriteByte('.')
+ b.WriteString(s.Name)
+ return
+
+ case FTypeId:
+ // (methodsym), typesym, weaksym
+ b.WriteString(s.Pkg.Prefix)
+ b.WriteByte('.')
+ b.WriteString(s.Name)
+ return
+ }
+ }
+
+ if flag&FmtByte != 0 {
+ // FmtByte (hh) implies FmtShort (h)
+ // skip leading "type." in method name
+ name := s.Name
+ if i := strings.LastIndex(name, "."); i >= 0 {
+ name = name[i+1:]
+ }
+
+ if mode == FDbg {
+ fmt.Fprintf(b, "@%q.%s", s.Pkg.Path, name)
+ return
+ }
+
+ b.WriteString(name)
+ return
+ }
+
+ b.WriteString(s.Name)
+}
+
+var basicnames = []string{
+ TINT: "int",
+ TUINT: "uint",
+ TINT8: "int8",
+ TUINT8: "uint8",
+ TINT16: "int16",
+ TUINT16: "uint16",
+ TINT32: "int32",
+ TUINT32: "uint32",
+ TINT64: "int64",
+ TUINT64: "uint64",
+ TUINTPTR: "uintptr",
+ TFLOAT32: "float32",
+ TFLOAT64: "float64",
+ TCOMPLEX64: "complex64",
+ TCOMPLEX128: "complex128",
+ TBOOL: "bool",
+ TANY: "any",
+ TSTRING: "string",
+ TNIL: "nil",
+ TIDEAL: "untyped number",
+ TBLANK: "blank",
+}
+
+var fmtBufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+func tconv(t *types.Type, flag FmtFlag, mode fmtMode) string {
+ buf := fmtBufferPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer fmtBufferPool.Put(buf)
+
+ tconv2(buf, t, flag, mode, nil)
+ return types.InternString(buf.Bytes())
+}
+
+// tconv2 writes a string representation of t to b.
+// flag and mode control exactly what is printed.
+// Any types x that are already in the visited map get printed as @%d where %d=visited[x].
+// See #16897 before changing the implementation of tconv.
+func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited map[*types.Type]int) {
+ if off, ok := visited[t]; ok {
+ // We've seen this type before, so we're trying to print it recursively.
+ // Print a reference to it instead.
+ fmt.Fprintf(b, "@%d", off)
+ return
+ }
+ if t == nil {
+ b.WriteString("<T>")
+ return
+ }
+ if t.Etype == types.TSSA {
+ b.WriteString(t.Extra.(string))
+ return
+ }
+ if t.Etype == types.TTUPLE {
+ b.WriteString(t.FieldType(0).String())
+ b.WriteByte(',')
+ b.WriteString(t.FieldType(1).String())
+ return
+ }
+
+ if t.Etype == types.TRESULTS {
+ tys := t.Extra.(*types.Results).Types
+ for i, et := range tys {
+ if i > 0 {
+ b.WriteByte(',')
+ }
+ b.WriteString(et.String())
+ }
+ return
+ }
+
+ flag, mode = flag.update(mode)
+ if mode == FTypeIdName {
+ flag |= FmtUnsigned
+ }
+ if t == types.Bytetype || t == types.Runetype {
+ // in %-T mode collapse rune and byte with their originals.
+ switch mode {
+ case FTypeIdName, FTypeId:
+ t = types.Types[t.Etype]
+ default:
+ sconv2(b, t.Sym, FmtShort, mode)
+ return
+ }
+ }
+ if t == types.Errortype {
+ b.WriteString("error")
+ return
+ }
+
+ // Unless the 'L' flag was specified, if the type has a name, just print that name.
+ if flag&FmtLong == 0 && t.Sym != nil && t != types.Types[t.Etype] {
+ switch mode {
+ case FTypeId, FTypeIdName:
+ if flag&FmtShort != 0 {
+ if t.Vargen != 0 {
+ sconv2(b, t.Sym, FmtShort, mode)
+ fmt.Fprintf(b, "·%d", t.Vargen)
+ return
+ }
+ sconv2(b, t.Sym, FmtShort, mode)
+ return
+ }
+
+ if mode == FTypeIdName {
+ sconv2(b, t.Sym, FmtUnsigned, mode)
+ return
+ }
+
+ if t.Sym.Pkg == localpkg && t.Vargen != 0 {
+ b.WriteString(mode.Sprintf("%v·%d", t.Sym, t.Vargen))
+ return
+ }
+ }
+
+ sconv2(b, t.Sym, 0, mode)
+ return
+ }
+
+ if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
+ var name string
+ switch t {
+ case types.UntypedBool:
+ name = "untyped bool"
+ case types.UntypedString:
+ name = "untyped string"
+ case types.UntypedInt:
+ name = "untyped int"
+ case types.UntypedRune:
+ name = "untyped rune"
+ case types.UntypedFloat:
+ name = "untyped float"
+ case types.UntypedComplex:
+ name = "untyped complex"
+ default:
+ name = basicnames[t.Etype]
+ }
+ b.WriteString(name)
+ return
+ }
+
+ if mode == FDbg {
+ b.WriteString(t.Etype.String())
+ b.WriteByte('-')
+ tconv2(b, t, flag, FErr, visited)
+ return
+ }
+
+ // At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't
+ // try to print it recursively.
+ // We record the offset in the result buffer where the type's text starts. This offset serves as a reference
+ // point for any later references to the same type.
+ // Note that we remove the type from the visited map as soon as the recursive call is done.
+ // This prevents encoding types like map[*int]*int as map[*int]@4. (That encoding would work,
+ // but I'd like to use the @ notation only when strictly necessary.)
+ if visited == nil {
+ visited = map[*types.Type]int{}
+ }
+ visited[t] = b.Len()
+ defer delete(visited, t)
+
+ switch t.Etype {
+ case TPTR:
+ b.WriteByte('*')
+ switch mode {
+ case FTypeId, FTypeIdName:
+ if flag&FmtShort != 0 {
+ tconv2(b, t.Elem(), FmtShort, mode, visited)
+ return
+ }
+ }
+ tconv2(b, t.Elem(), 0, mode, visited)
+
+ case TARRAY:
+ b.WriteByte('[')
+ b.WriteString(strconv.FormatInt(t.NumElem(), 10))
+ b.WriteByte(']')
+ tconv2(b, t.Elem(), 0, mode, visited)
+
+ case TSLICE:
+ b.WriteString("[]")
+ tconv2(b, t.Elem(), 0, mode, visited)
+
+ case TCHAN:
+ switch t.ChanDir() {
+ case types.Crecv:
+ b.WriteString("<-chan ")
+ tconv2(b, t.Elem(), 0, mode, visited)
+ case types.Csend:
+ b.WriteString("chan<- ")
+ tconv2(b, t.Elem(), 0, mode, visited)
+ default:
+ b.WriteString("chan ")
+ if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym == nil && t.Elem().ChanDir() == types.Crecv {
+ b.WriteByte('(')
+ tconv2(b, t.Elem(), 0, mode, visited)
+ b.WriteByte(')')
+ } else {
+ tconv2(b, t.Elem(), 0, mode, visited)
+ }
+ }
+
+ case TMAP:
+ b.WriteString("map[")
+ tconv2(b, t.Key(), 0, mode, visited)
+ b.WriteByte(']')
+ tconv2(b, t.Elem(), 0, mode, visited)
+
+ case TINTER:
+ if t.IsEmptyInterface() {
+ b.WriteString("interface {}")
+ break
+ }
+ b.WriteString("interface {")
+ for i, f := range t.Fields().Slice() {
+ if i != 0 {
+ b.WriteByte(';')
+ }
+ b.WriteByte(' ')
+ switch {
+ case f.Sym == nil:
+ // Check first that a symbol is defined for this type.
+ // Wrong interface definitions may have types lacking a symbol.
+ break
+ case types.IsExported(f.Sym.Name):
+ sconv2(b, f.Sym, FmtShort, mode)
+ default:
+ flag1 := FmtLeft
+ if flag&FmtUnsigned != 0 {
+ flag1 = FmtUnsigned
+ }
+ sconv2(b, f.Sym, flag1, mode)
+ }
+ tconv2(b, f.Type, FmtShort, mode, visited)
+ }
+ if t.NumFields() != 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteByte('}')
+
+ case TFUNC:
+ if flag&FmtShort != 0 {
+ // no leading func
+ } else {
+ if t.Recv() != nil {
+ b.WriteString("method")
+ tconv2(b, t.Recvs(), 0, mode, visited)
+ b.WriteByte(' ')
+ }
+ b.WriteString("func")
+ }
+ tconv2(b, t.Params(), 0, mode, visited)
+
+ switch t.NumResults() {
+ case 0:
+ // nothing to do
+
+ case 1:
+ b.WriteByte(' ')
+ tconv2(b, t.Results().Field(0).Type, 0, mode, visited) // struct->field->field's type
+
+ default:
+ b.WriteByte(' ')
+ tconv2(b, t.Results(), 0, mode, visited)
+ }
+
+ case TSTRUCT:
+ if m := t.StructType().Map; m != nil {
+ mt := m.MapType()
+ // Format the bucket struct for map[x]y as map.bucket[x]y.
+ // This avoids a recursive print that generates very long names.
+ switch t {
+ case mt.Bucket:
+ b.WriteString("map.bucket[")
+ case mt.Hmap:
+ b.WriteString("map.hdr[")
+ case mt.Hiter:
+ b.WriteString("map.iter[")
+ default:
+ Fatalf("unknown internal map type")
+ }
+ tconv2(b, m.Key(), 0, mode, visited)
+ b.WriteByte(']')
+ tconv2(b, m.Elem(), 0, mode, visited)
+ break
+ }
+
+ if funarg := t.StructType().Funarg; funarg != types.FunargNone {
+ b.WriteByte('(')
+ var flag1 FmtFlag
+ switch mode {
+ case FTypeId, FTypeIdName, FErr:
+ // no argument names on function signature, and no "noescape"/"nosplit" tags
+ flag1 = FmtShort
+ }
+ for i, f := range t.Fields().Slice() {
+ if i != 0 {
+ b.WriteString(", ")
+ }
+ fldconv(b, f, flag1, mode, visited, funarg)
+ }
+ b.WriteByte(')')
+ } else {
+ b.WriteString("struct {")
+ for i, f := range t.Fields().Slice() {
+ if i != 0 {
+ b.WriteByte(';')
+ }
+ b.WriteByte(' ')
+ fldconv(b, f, FmtLong, mode, visited, funarg)
+ }
+ if t.NumFields() != 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteByte('}')
+ }
+
+ case TFORW:
+ b.WriteString("undefined")
+ if t.Sym != nil {
+ b.WriteByte(' ')
+ sconv2(b, t.Sym, 0, mode)
+ }
+
+ case TUNSAFEPTR:
+ b.WriteString("unsafe.Pointer")
+
+ case Txxx:
+ b.WriteString("Txxx")
+ default:
+ // Don't know how to handle - fall back to detailed prints.
+ b.WriteString(mode.Sprintf("%v <%v>", t.Etype, t.Sym))
+ }
+}
+
+// Statements which may be rendered with a simplestmt as init.
+func stmtwithinit(op Op) bool {
+ switch op {
+ case OIF, OFOR, OFORUNTIL, OSWITCH:
+ return true
+ }
+
+ return false
+}
+
+func (n *Node) stmtfmt(s fmt.State, mode fmtMode) {
+ // some statements allow for an init, but at most one,
+ // but we may have an arbitrary number added, eg by typecheck
+ // and inlining. If it doesn't fit the syntax, emit an enclosing
+ // block starting with the init statements.
+
+ // if we can just say "for" n->ninit; ... then do so
+ simpleinit := n.Ninit.Len() == 1 && n.Ninit.First().Ninit.Len() == 0 && stmtwithinit(n.Op)
+
+ // otherwise, print the inits as separate statements
+ complexinit := n.Ninit.Len() != 0 && !simpleinit && (mode != FErr)
+
+ // but if it was for if/for/switch, put in an extra surrounding block to limit the scope
+ extrablock := complexinit && stmtwithinit(n.Op)
+
+ if extrablock {
+ fmt.Fprint(s, "{")
+ }
+
+ if complexinit {
+ mode.Fprintf(s, " %v; ", n.Ninit)
+ }
+
+ switch n.Op {
+ case ODCL:
+ mode.Fprintf(s, "var %v %v", n.Left.Sym, n.Left.Type)
+
+ case ODCLFIELD:
+ if n.Sym != nil {
+ mode.Fprintf(s, "%v %v", n.Sym, n.Left)
+ } else {
+ mode.Fprintf(s, "%v", n.Left)
+ }
+
+ // Don't export "v = <N>" initializing statements, hope they're always
+ // preceded by the DCL which will be re-parsed and typechecked to reproduce
+ // the "v = <N>" again.
+ case OAS:
+ if n.Colas() && !complexinit {
+ mode.Fprintf(s, "%v := %v", n.Left, n.Right)
+ } else {
+ mode.Fprintf(s, "%v = %v", n.Left, n.Right)
+ }
+
+ case OASOP:
+ if n.Implicit() {
+ if n.SubOp() == OADD {
+ mode.Fprintf(s, "%v++", n.Left)
+ } else {
+ mode.Fprintf(s, "%v--", n.Left)
+ }
+ break
+ }
+
+ mode.Fprintf(s, "%v %#v= %v", n.Left, n.SubOp(), n.Right)
+
+ case OAS2:
+ if n.Colas() && !complexinit {
+ mode.Fprintf(s, "%.v := %.v", n.List, n.Rlist)
+ break
+ }
+ fallthrough
+
+ case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ mode.Fprintf(s, "%.v = %v", n.List, n.Right)
+
+ case ORETURN:
+ mode.Fprintf(s, "return %.v", n.List)
+
+ case ORETJMP:
+ mode.Fprintf(s, "retjmp %v", n.Sym)
+
+ case OINLMARK:
+ mode.Fprintf(s, "inlmark %d", n.Xoffset)
+
+ case OGO:
+ mode.Fprintf(s, "go %v", n.Left)
+
+ case ODEFER:
+ mode.Fprintf(s, "defer %v", n.Left)
+
+ case OIF:
+ if simpleinit {
+ mode.Fprintf(s, "if %v; %v { %v }", n.Ninit.First(), n.Left, n.Nbody)
+ } else {
+ mode.Fprintf(s, "if %v { %v }", n.Left, n.Nbody)
+ }
+ if n.Rlist.Len() != 0 {
+ mode.Fprintf(s, " else { %v }", n.Rlist)
+ }
+
+ case OFOR, OFORUNTIL:
+ opname := "for"
+ if n.Op == OFORUNTIL {
+ opname = "foruntil"
+ }
+ if mode == FErr { // TODO maybe only if FmtShort, same below
+ fmt.Fprintf(s, "%s loop", opname)
+ break
+ }
+
+ fmt.Fprint(s, opname)
+ if simpleinit {
+ mode.Fprintf(s, " %v;", n.Ninit.First())
+ } else if n.Right != nil {
+ fmt.Fprint(s, " ;")
+ }
+
+ if n.Left != nil {
+ mode.Fprintf(s, " %v", n.Left)
+ }
+
+ if n.Right != nil {
+ mode.Fprintf(s, "; %v", n.Right)
+ } else if simpleinit {
+ fmt.Fprint(s, ";")
+ }
+
+ if n.Op == OFORUNTIL && n.List.Len() != 0 {
+ mode.Fprintf(s, "; %v", n.List)
+ }
+
+ mode.Fprintf(s, " { %v }", n.Nbody)
+
+ case ORANGE:
+ if mode == FErr {
+ fmt.Fprint(s, "for loop")
+ break
+ }
+
+ if n.List.Len() == 0 {
+ mode.Fprintf(s, "for range %v { %v }", n.Right, n.Nbody)
+ break
+ }
+
+ mode.Fprintf(s, "for %.v = range %v { %v }", n.List, n.Right, n.Nbody)
+
+ case OSELECT, OSWITCH:
+ if mode == FErr {
+ mode.Fprintf(s, "%v statement", n.Op)
+ break
+ }
+
+ mode.Fprintf(s, "%#v", n.Op)
+ if simpleinit {
+ mode.Fprintf(s, " %v;", n.Ninit.First())
+ }
+ if n.Left != nil {
+ mode.Fprintf(s, " %v ", n.Left)
+ }
+
+ mode.Fprintf(s, " { %v }", n.List)
+
+ case OCASE:
+ if n.List.Len() != 0 {
+ mode.Fprintf(s, "case %.v", n.List)
+ } else {
+ fmt.Fprint(s, "default")
+ }
+ mode.Fprintf(s, ": %v", n.Nbody)
+
+ case OBREAK, OCONTINUE, OGOTO, OFALL:
+ if n.Sym != nil {
+ mode.Fprintf(s, "%#v %v", n.Op, n.Sym)
+ } else {
+ mode.Fprintf(s, "%#v", n.Op)
+ }
+
+ case OEMPTY:
+ break
+
+ case OLABEL:
+ mode.Fprintf(s, "%v: ", n.Sym)
+ }
+
+ if extrablock {
+ fmt.Fprint(s, "}")
+ }
+}
+
+var opprec = []int{
+ OALIGNOF: 8,
+ OAPPEND: 8,
+ OBYTES2STR: 8,
+ OARRAYLIT: 8,
+ OSLICELIT: 8,
+ ORUNES2STR: 8,
+ OCALLFUNC: 8,
+ OCALLINTER: 8,
+ OCALLMETH: 8,
+ OCALL: 8,
+ OCAP: 8,
+ OCLOSE: 8,
+ OCONVIFACE: 8,
+ OCONVNOP: 8,
+ OCONV: 8,
+ OCOPY: 8,
+ ODELETE: 8,
+ OGETG: 8,
+ OLEN: 8,
+ OLITERAL: 8,
+ OMAKESLICE: 8,
+ OMAKESLICECOPY: 8,
+ OMAKE: 8,
+ OMAPLIT: 8,
+ ONAME: 8,
+ ONEW: 8,
+ ONONAME: 8,
+ OOFFSETOF: 8,
+ OPACK: 8,
+ OPANIC: 8,
+ OPAREN: 8,
+ OPRINTN: 8,
+ OPRINT: 8,
+ ORUNESTR: 8,
+ OSIZEOF: 8,
+ OSTR2BYTES: 8,
+ OSTR2RUNES: 8,
+ OSTRUCTLIT: 8,
+ OTARRAY: 8,
+ OTCHAN: 8,
+ OTFUNC: 8,
+ OTINTER: 8,
+ OTMAP: 8,
+ OTSTRUCT: 8,
+ OINDEXMAP: 8,
+ OINDEX: 8,
+ OSLICE: 8,
+ OSLICESTR: 8,
+ OSLICEARR: 8,
+ OSLICE3: 8,
+ OSLICE3ARR: 8,
+ OSLICEHEADER: 8,
+ ODOTINTER: 8,
+ ODOTMETH: 8,
+ ODOTPTR: 8,
+ ODOTTYPE2: 8,
+ ODOTTYPE: 8,
+ ODOT: 8,
+ OXDOT: 8,
+ OCALLPART: 8,
+ OPLUS: 7,
+ ONOT: 7,
+ OBITNOT: 7,
+ ONEG: 7,
+ OADDR: 7,
+ ODEREF: 7,
+ ORECV: 7,
+ OMUL: 6,
+ ODIV: 6,
+ OMOD: 6,
+ OLSH: 6,
+ ORSH: 6,
+ OAND: 6,
+ OANDNOT: 6,
+ OADD: 5,
+ OSUB: 5,
+ OOR: 5,
+ OXOR: 5,
+ OEQ: 4,
+ OLT: 4,
+ OLE: 4,
+ OGE: 4,
+ OGT: 4,
+ ONE: 4,
+ OSEND: 3,
+ OANDAND: 2,
+ OOROR: 1,
+
+ // Statements handled by stmtfmt
+ OAS: -1,
+ OAS2: -1,
+ OAS2DOTTYPE: -1,
+ OAS2FUNC: -1,
+ OAS2MAPR: -1,
+ OAS2RECV: -1,
+ OASOP: -1,
+ OBREAK: -1,
+ OCASE: -1,
+ OCONTINUE: -1,
+ ODCL: -1,
+ ODCLFIELD: -1,
+ ODEFER: -1,
+ OEMPTY: -1,
+ OFALL: -1,
+ OFOR: -1,
+ OFORUNTIL: -1,
+ OGOTO: -1,
+ OIF: -1,
+ OLABEL: -1,
+ OGO: -1,
+ ORANGE: -1,
+ ORETURN: -1,
+ OSELECT: -1,
+ OSWITCH: -1,
+
+ OEND: 0,
+}
+
+func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
+ for n != nil && n.Implicit() && (n.Op == ODEREF || n.Op == OADDR) {
+ n = n.Left
+ }
+
+ if n == nil {
+ fmt.Fprint(s, "<N>")
+ return
+ }
+
+ nprec := opprec[n.Op]
+ if n.Op == OTYPE && n.Sym != nil {
+ nprec = 8
+ }
+
+ if prec > nprec {
+ mode.Fprintf(s, "(%v)", n)
+ return
+ }
+
+ switch n.Op {
+ case OPAREN:
+ mode.Fprintf(s, "(%v)", n.Left)
+
+ case OLITERAL: // this is a bit of a mess
+ if mode == FErr {
+ if n.Orig != nil && n.Orig != n {
+ n.Orig.exprfmt(s, prec, mode)
+ return
+ }
+ if n.Sym != nil {
+ fmt.Fprint(s, smodeString(n.Sym, mode))
+ return
+ }
+ }
+ if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
+ n.Orig.exprfmt(s, prec, mode)
+ return
+ }
+ if n.Type != nil && !n.Type.IsUntyped() {
+ // Need parens when type begins with what might
+ // be misinterpreted as a unary operator: * or <-.
+ if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == types.Crecv) {
+ mode.Fprintf(s, "(%v)(%v)", n.Type, n.Val())
+ return
+ } else {
+ mode.Fprintf(s, "%v(%v)", n.Type, n.Val())
+ return
+ }
+ }
+
+ mode.Fprintf(s, "%v", n.Val())
+
+ // Special case: name used as local variable in export.
+ // _ becomes ~b%d internally; print as _ for export
+ case ONAME:
+ if mode == FErr && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
+ fmt.Fprint(s, "_")
+ return
+ }
+ fallthrough
+ case OPACK, ONONAME:
+ fmt.Fprint(s, smodeString(n.Sym, mode))
+
+ case OTYPE:
+ if n.Type == nil && n.Sym != nil {
+ fmt.Fprint(s, smodeString(n.Sym, mode))
+ return
+ }
+ mode.Fprintf(s, "%v", n.Type)
+
+ case OTARRAY:
+ if n.Left != nil {
+ mode.Fprintf(s, "[%v]%v", n.Left, n.Right)
+ return
+ }
+ mode.Fprintf(s, "[]%v", n.Right) // happens before typecheck
+
+ case OTMAP:
+ mode.Fprintf(s, "map[%v]%v", n.Left, n.Right)
+
+ case OTCHAN:
+ switch n.TChanDir() {
+ case types.Crecv:
+ mode.Fprintf(s, "<-chan %v", n.Left)
+
+ case types.Csend:
+ mode.Fprintf(s, "chan<- %v", n.Left)
+
+ default:
+ if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && n.Left.TChanDir() == types.Crecv {
+ mode.Fprintf(s, "chan (%v)", n.Left)
+ } else {
+ mode.Fprintf(s, "chan %v", n.Left)
+ }
+ }
+
+ case OTSTRUCT:
+ fmt.Fprint(s, "<struct>")
+
+ case OTINTER:
+ fmt.Fprint(s, "<inter>")
+
+ case OTFUNC:
+ fmt.Fprint(s, "<func>")
+
+ case OCLOSURE:
+ if mode == FErr {
+ fmt.Fprint(s, "func literal")
+ return
+ }
+ if n.Nbody.Len() != 0 {
+ mode.Fprintf(s, "%v { %v }", n.Type, n.Nbody)
+ return
+ }
+ mode.Fprintf(s, "%v { %v }", n.Type, n.Func.Closure.Nbody)
+
+ case OCOMPLIT:
+ if mode == FErr {
+ if n.Implicit() {
+ mode.Fprintf(s, "... argument")
+ return
+ }
+ if n.Right != nil {
+ mode.Fprintf(s, "%v{%s}", n.Right, ellipsisIf(n.List.Len() != 0))
+ return
+ }
+
+ fmt.Fprint(s, "composite literal")
+ return
+ }
+ mode.Fprintf(s, "(%v{ %.v })", n.Right, n.List)
+
+ case OPTRLIT:
+ mode.Fprintf(s, "&%v", n.Left)
+
+ case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
+ if mode == FErr {
+ mode.Fprintf(s, "%v{%s}", n.Type, ellipsisIf(n.List.Len() != 0))
+ return
+ }
+ mode.Fprintf(s, "(%v{ %.v })", n.Type, n.List)
+
+ case OKEY:
+ if n.Left != nil && n.Right != nil {
+ mode.Fprintf(s, "%v:%v", n.Left, n.Right)
+ return
+ }
+
+ if n.Left == nil && n.Right != nil {
+ mode.Fprintf(s, ":%v", n.Right)
+ return
+ }
+ if n.Left != nil && n.Right == nil {
+ mode.Fprintf(s, "%v:", n.Left)
+ return
+ }
+ fmt.Fprint(s, ":")
+
+ case OSTRUCTKEY:
+ mode.Fprintf(s, "%v:%v", n.Sym, n.Left)
+
+ case OCALLPART:
+ n.Left.exprfmt(s, nprec, mode)
+ if n.Right == nil || n.Right.Sym == nil {
+ fmt.Fprint(s, ".<nil>")
+ return
+ }
+ mode.Fprintf(s, ".%0S", n.Right.Sym)
+
+ case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
+ n.Left.exprfmt(s, nprec, mode)
+ if n.Sym == nil {
+ fmt.Fprint(s, ".<nil>")
+ return
+ }
+ mode.Fprintf(s, ".%0S", n.Sym)
+
+ case ODOTTYPE, ODOTTYPE2:
+ n.Left.exprfmt(s, nprec, mode)
+ if n.Right != nil {
+ mode.Fprintf(s, ".(%v)", n.Right)
+ return
+ }
+ mode.Fprintf(s, ".(%v)", n.Type)
+
+ case OINDEX, OINDEXMAP:
+ n.Left.exprfmt(s, nprec, mode)
+ mode.Fprintf(s, "[%v]", n.Right)
+
+ case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
+ n.Left.exprfmt(s, nprec, mode)
+ fmt.Fprint(s, "[")
+ low, high, max := n.SliceBounds()
+ if low != nil {
+ fmt.Fprint(s, low.modeString(mode))
+ }
+ fmt.Fprint(s, ":")
+ if high != nil {
+ fmt.Fprint(s, high.modeString(mode))
+ }
+ if n.Op.IsSlice3() {
+ fmt.Fprint(s, ":")
+ if max != nil {
+ fmt.Fprint(s, max.modeString(mode))
+ }
+ }
+ fmt.Fprint(s, "]")
+
+ case OSLICEHEADER:
+ if n.List.Len() != 2 {
+ Fatalf("bad OSLICEHEADER list length %d", n.List.Len())
+ }
+ mode.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left, n.List.First(), n.List.Second())
+
+ case OCOMPLEX, OCOPY:
+ if n.Left != nil {
+ mode.Fprintf(s, "%#v(%v, %v)", n.Op, n.Left, n.Right)
+ } else {
+ mode.Fprintf(s, "%#v(%.v)", n.Op, n.List)
+ }
+
+ case OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ OBYTES2STR,
+ ORUNES2STR,
+ OSTR2BYTES,
+ OSTR2RUNES,
+ ORUNESTR:
+ if n.Type == nil || n.Type.Sym == nil {
+ mode.Fprintf(s, "(%v)", n.Type)
+ } else {
+ mode.Fprintf(s, "%v", n.Type)
+ }
+ if n.Left != nil {
+ mode.Fprintf(s, "(%v)", n.Left)
+ } else {
+ mode.Fprintf(s, "(%.v)", n.List)
+ }
+
+ case OREAL,
+ OIMAG,
+ OAPPEND,
+ OCAP,
+ OCLOSE,
+ ODELETE,
+ OLEN,
+ OMAKE,
+ ONEW,
+ OPANIC,
+ ORECOVER,
+ OALIGNOF,
+ OOFFSETOF,
+ OSIZEOF,
+ OPRINT,
+ OPRINTN:
+ if n.Left != nil {
+ mode.Fprintf(s, "%#v(%v)", n.Op, n.Left)
+ return
+ }
+ if n.IsDDD() {
+ mode.Fprintf(s, "%#v(%.v...)", n.Op, n.List)
+ return
+ }
+ mode.Fprintf(s, "%#v(%.v)", n.Op, n.List)
+
+ case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
+ n.Left.exprfmt(s, nprec, mode)
+ if n.IsDDD() {
+ mode.Fprintf(s, "(%.v...)", n.List)
+ return
+ }
+ mode.Fprintf(s, "(%.v)", n.List)
+
+ case OMAKEMAP, OMAKECHAN, OMAKESLICE:
+ if n.List.Len() != 0 { // pre-typecheck
+ mode.Fprintf(s, "make(%v, %.v)", n.Type, n.List)
+ return
+ }
+ if n.Right != nil {
+ mode.Fprintf(s, "make(%v, %v, %v)", n.Type, n.Left, n.Right)
+ return
+ }
+ if n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()) {
+ mode.Fprintf(s, "make(%v, %v)", n.Type, n.Left)
+ return
+ }
+ mode.Fprintf(s, "make(%v)", n.Type)
+
+ case OMAKESLICECOPY:
+ mode.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type, n.Left, n.Right)
+
+ case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
+ // Unary
+ mode.Fprintf(s, "%#v", n.Op)
+ if n.Left != nil && n.Left.Op == n.Op {
+ fmt.Fprint(s, " ")
+ }
+ n.Left.exprfmt(s, nprec+1, mode)
+
+ // Binary
+ case OADD,
+ OAND,
+ OANDAND,
+ OANDNOT,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLT,
+ OLSH,
+ OMOD,
+ OMUL,
+ ONE,
+ OOR,
+ OOROR,
+ ORSH,
+ OSEND,
+ OSUB,
+ OXOR:
+ n.Left.exprfmt(s, nprec, mode)
+ mode.Fprintf(s, " %#v ", n.Op)
+ n.Right.exprfmt(s, nprec+1, mode)
+
+ case OADDSTR:
+ for i, n1 := range n.List.Slice() {
+ if i != 0 {
+ fmt.Fprint(s, " + ")
+ }
+ n1.exprfmt(s, nprec, mode)
+ }
+ case ODDD:
+ mode.Fprintf(s, "...")
+ default:
+ mode.Fprintf(s, "<node %v>", n.Op)
+ }
+}
+
+func (n *Node) nodefmt(s fmt.State, flag FmtFlag, mode fmtMode) {
+ t := n.Type
+
+ // We almost always want the original.
+ // TODO(gri) Why the special case for OLITERAL?
+ if n.Op != OLITERAL && n.Orig != nil {
+ n = n.Orig
+ }
+
+ if flag&FmtLong != 0 && t != nil {
+ if t.Etype == TNIL {
+ fmt.Fprint(s, "nil")
+ } else if n.Op == ONAME && n.Name.AutoTemp() {
+ mode.Fprintf(s, "%v value", t)
+ } else {
+ mode.Fprintf(s, "%v (type %v)", n, t)
+ }
+ return
+ }
+
+ // TODO inlining produces expressions with ninits. we can't print these yet.
+
+ if opprec[n.Op] < 0 {
+ n.stmtfmt(s, mode)
+ return
+ }
+
+ n.exprfmt(s, 0, mode)
+}
+
+func (n *Node) nodedump(s fmt.State, flag FmtFlag, mode fmtMode) {
+ recur := flag&FmtShort == 0
+
+ if recur {
+ indent(s)
+ if dumpdepth > 40 {
+ fmt.Fprint(s, "...")
+ return
+ }
+
+ if n.Ninit.Len() != 0 {
+ mode.Fprintf(s, "%v-init%v", n.Op, n.Ninit)
+ indent(s)
+ }
+ }
+
+ switch n.Op {
+ default:
+ mode.Fprintf(s, "%v%j", n.Op, n)
+
+ case OLITERAL:
+ mode.Fprintf(s, "%v-%v%j", n.Op, n.Val(), n)
+
+ case ONAME, ONONAME:
+ if n.Sym != nil {
+ mode.Fprintf(s, "%v-%v%j", n.Op, n.Sym, n)
+ } else {
+ mode.Fprintf(s, "%v%j", n.Op, n)
+ }
+ if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
+ indent(s)
+ mode.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
+ }
+
+ case OASOP:
+ mode.Fprintf(s, "%v-%v%j", n.Op, n.SubOp(), n)
+
+ case OTYPE:
+ mode.Fprintf(s, "%v %v%j type=%v", n.Op, n.Sym, n, n.Type)
+ if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
+ indent(s)
+ mode.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
+ }
+ }
+
+ if n.Op == OCLOSURE && n.Func.Closure != nil && n.Func.Closure.Func.Nname.Sym != nil {
+ mode.Fprintf(s, " fnName %v", n.Func.Closure.Func.Nname.Sym)
+ }
+ if n.Sym != nil && n.Op != ONAME {
+ mode.Fprintf(s, " %v", n.Sym)
+ }
+
+ if n.Type != nil {
+ mode.Fprintf(s, " %v", n.Type)
+ }
+
+ if recur {
+ if n.Left != nil {
+ mode.Fprintf(s, "%v", n.Left)
+ }
+ if n.Right != nil {
+ mode.Fprintf(s, "%v", n.Right)
+ }
+ if n.Func != nil && n.Func.Closure != nil && n.Func.Closure.Nbody.Len() != 0 {
+ indent(s)
+ // The function associated with a closure
+ mode.Fprintf(s, "%v-clofunc%v", n.Op, n.Func.Closure)
+ }
+ if n.Func != nil && n.Func.Dcl != nil && len(n.Func.Dcl) != 0 {
+ indent(s)
+ // The dcls for a func or closure
+ mode.Fprintf(s, "%v-dcl%v", n.Op, asNodes(n.Func.Dcl))
+ }
+ if n.List.Len() != 0 {
+ indent(s)
+ mode.Fprintf(s, "%v-list%v", n.Op, n.List)
+ }
+
+ if n.Rlist.Len() != 0 {
+ indent(s)
+ mode.Fprintf(s, "%v-rlist%v", n.Op, n.Rlist)
+ }
+
+ if n.Nbody.Len() != 0 {
+ indent(s)
+ mode.Fprintf(s, "%v-body%v", n.Op, n.Nbody)
+ }
+ }
+}
+
+// "%S" suppresses qualifying with package
+func symFormat(s *types.Sym, f fmt.State, verb rune, mode fmtMode) {
+ switch verb {
+ case 'v', 'S':
+ fmt.Fprint(f, sconv(s, fmtFlag(f, verb), mode))
+
+ default:
+ fmt.Fprintf(f, "%%!%c(*types.Sym=%p)", verb, s)
+ }
+}
+
+func smodeString(s *types.Sym, mode fmtMode) string { return sconv(s, 0, mode) }
+
+// See #16897 before changing the implementation of sconv.
+func sconv(s *types.Sym, flag FmtFlag, mode fmtMode) string {
+ if flag&FmtLong != 0 {
+ panic("linksymfmt")
+ }
+
+ if s == nil {
+ return "<S>"
+ }
+
+ if s.Name == "_" {
+ return "_"
+ }
+ buf := fmtBufferPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer fmtBufferPool.Put(buf)
+
+ flag, mode = flag.update(mode)
+ symfmt(buf, s, flag, mode)
+ return types.InternString(buf.Bytes())
+}
+
+func sconv2(b *bytes.Buffer, s *types.Sym, flag FmtFlag, mode fmtMode) {
+ if flag&FmtLong != 0 {
+ panic("linksymfmt")
+ }
+ if s == nil {
+ b.WriteString("<S>")
+ return
+ }
+ if s.Name == "_" {
+ b.WriteString("_")
+ return
+ }
+
+ flag, mode = flag.update(mode)
+ symfmt(b, s, flag, mode)
+}
+
+func fldconv(b *bytes.Buffer, f *types.Field, flag FmtFlag, mode fmtMode, visited map[*types.Type]int, funarg types.Funarg) {
+ if f == nil {
+ b.WriteString("<T>")
+ return
+ }
+ flag, mode = flag.update(mode)
+ if mode == FTypeIdName {
+ flag |= FmtUnsigned
+ }
+
+ var name string
+ if flag&FmtShort == 0 {
+ s := f.Sym
+
+ // Take the name from the original.
+ if mode == FErr {
+ s = origSym(s)
+ }
+
+ if s != nil && f.Embedded == 0 {
+ if funarg != types.FunargNone {
+ name = asNode(f.Nname).modeString(mode)
+ } else if flag&FmtLong != 0 {
+ name = mode.Sprintf("%0S", s)
+ if !types.IsExported(name) && flag&FmtUnsigned == 0 {
+ name = smodeString(s, mode) // qualify non-exported names (used on structs, not on funarg)
+ }
+ } else {
+ name = smodeString(s, mode)
+ }
+ }
+ }
+
+ if name != "" {
+ b.WriteString(name)
+ b.WriteString(" ")
+ }
+
+ if f.IsDDD() {
+ var et *types.Type
+ if f.Type != nil {
+ et = f.Type.Elem()
+ }
+ b.WriteString("...")
+ tconv2(b, et, 0, mode, visited)
+ } else {
+ tconv2(b, f.Type, 0, mode, visited)
+ }
+
+ if flag&FmtShort == 0 && funarg == types.FunargNone && f.Note != "" {
+ b.WriteString(" ")
+ b.WriteString(strconv.Quote(f.Note))
+ }
+}
+
+// "%L" print definition, not name
+// "%S" omit 'func' and receiver from function types, short type names
+func typeFormat(t *types.Type, s fmt.State, verb rune, mode fmtMode) {
+ switch verb {
+ case 'v', 'S', 'L':
+ fmt.Fprint(s, tconv(t, fmtFlag(s, verb), mode))
+ default:
+ fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t)
+ }
+}
+
+func (n *Node) String() string { return fmt.Sprint(n) }
+func (n *Node) modeString(mode fmtMode) string { return mode.Sprint(n) }
+
+// "%L" suffix with "(type %T)" where possible
+// "%+S" in debug mode, don't recurse, no multiline output
+func (n *Node) nconv(s fmt.State, flag FmtFlag, mode fmtMode) {
+ if n == nil {
+ fmt.Fprint(s, "<N>")
+ return
+ }
+
+ flag, mode = flag.update(mode)
+
+ switch mode {
+ case FErr:
+ n.nodefmt(s, flag, mode)
+
+ case FDbg:
+ dumpdepth++
+ n.nodedump(s, flag, mode)
+ dumpdepth--
+
+ default:
+ Fatalf("unhandled %%N mode: %d", mode)
+ }
+}
+
+func (l Nodes) format(s fmt.State, verb rune, mode fmtMode) {
+ switch verb {
+ case 'v':
+ l.hconv(s, fmtFlag(s, verb), mode)
+
+ default:
+ fmt.Fprintf(s, "%%!%c(Nodes)", verb)
+ }
+}
+
+func (n Nodes) String() string {
+ return fmt.Sprint(n)
+}
+
+// Flags: all those of %N plus '.': separate with comma's instead of semicolons.
+func (l Nodes) hconv(s fmt.State, flag FmtFlag, mode fmtMode) {
+ if l.Len() == 0 && mode == FDbg {
+ fmt.Fprint(s, "<nil>")
+ return
+ }
+
+ flag, mode = flag.update(mode)
+ sep := "; "
+ if mode == FDbg {
+ sep = "\n"
+ } else if flag&FmtComma != 0 {
+ sep = ", "
+ }
+
+ for i, n := range l.Slice() {
+ fmt.Fprint(s, n.modeString(mode))
+ if i+1 < l.Len() {
+ fmt.Fprint(s, sep)
+ }
+ }
+}
+
+func dumplist(s string, l Nodes) {
+ fmt.Printf("%s%+v\n", s, l)
+}
+
+func fdumplist(w io.Writer, s string, l Nodes) {
+ fmt.Fprintf(w, "%s%+v\n", s, l)
+}
+
+func Dump(s string, n *Node) {
+ fmt.Printf("%s [%p]%+v\n", s, n, n)
+}
+
+// TODO(gri) make variable local somehow
+var dumpdepth int
+
+// indent prints indentation to s.
+func indent(s fmt.State) {
+ fmt.Fprint(s, "\n")
+ for i := 0; i < dumpdepth; i++ {
+ fmt.Fprint(s, ". ")
+ }
+}
+
+func ellipsisIf(b bool) string {
+ if b {
+ return "..."
+ }
+ return ""
+}
diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go
new file mode 100644
index 0000000..929653e
--- /dev/null
+++ b/src/cmd/compile/internal/gc/gen.go
@@ -0,0 +1,86 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "strconv"
+)
+
+// sysfunc looks up Go function name in package runtime. This function
+// must follow the internal calling convention.
+func sysfunc(name string) *obj.LSym {
+ s := Runtimepkg.Lookup(name)
+ s.SetFunc(true)
+ return s.Linksym()
+}
+
+// sysvar looks up a variable (or assembly function) name in package
+// runtime. If this is a function, it may have a special calling
+// convention.
+func sysvar(name string) *obj.LSym {
+ return Runtimepkg.Lookup(name).Linksym()
+}
+
+// isParamStackCopy reports whether this is the on-stack copy of a
+// function parameter that moved to the heap.
+func (n *Node) isParamStackCopy() bool {
+ return n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Name.Param.Heapaddr != nil
+}
+
+// isParamHeapCopy reports whether this is the on-heap copy of
+// a function parameter that moved to the heap.
+func (n *Node) isParamHeapCopy() bool {
+ return n.Op == ONAME && n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy != nil
+}
+
+// autotmpname returns the name for an autotmp variable numbered n.
+func autotmpname(n int) string {
+ // Give each tmp a different name so that they can be registerized.
+ // Add a preceding . to avoid clashing with legal names.
+ const prefix = ".autotmp_"
+ // Start with a buffer big enough to hold a large n.
+ b := []byte(prefix + " ")[:len(prefix)]
+ b = strconv.AppendInt(b, int64(n), 10)
+ return types.InternString(b)
+}
+
+// make a new Node off the books
+func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node {
+ if curfn == nil {
+ Fatalf("no curfn for tempAt")
+ }
+ if curfn.Func.Closure != nil && curfn.Op == OCLOSURE {
+ Dump("tempAt", curfn)
+ Fatalf("adding tempAt to wrong closure function")
+ }
+ if t == nil {
+ Fatalf("tempAt called with nil type")
+ }
+
+ s := &types.Sym{
+ Name: autotmpname(len(curfn.Func.Dcl)),
+ Pkg: localpkg,
+ }
+ n := newnamel(pos, s)
+ s.Def = asTypesNode(n)
+ n.Type = t
+ n.SetClass(PAUTO)
+ n.Esc = EscNever
+ n.Name.Curfn = curfn
+ n.Name.SetUsed(true)
+ n.Name.SetAutoTemp(true)
+ curfn.Func.Dcl = append(curfn.Func.Dcl, n)
+
+ dowidth(t)
+
+ return n.Orig
+}
+
+func temp(t *types.Type) *Node {
+ return tempAt(lineno, Curfn, t)
+}
diff --git a/src/cmd/compile/internal/gc/global_test.go b/src/cmd/compile/internal/gc/global_test.go
new file mode 100644
index 0000000..edad6d0
--- /dev/null
+++ b/src/cmd/compile/internal/gc/global_test.go
@@ -0,0 +1,116 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+// Make sure "hello world" does not link in all the
+// fmt.scanf routines. See issue 6853.
+func TestScanfRemoval(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ // Make a directory to work in.
+ dir, err := ioutil.TempDir("", "issue6853a-")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ // Create source.
+ src := filepath.Join(dir, "test.go")
+ f, err := os.Create(src)
+ if err != nil {
+ t.Fatalf("could not create source file: %v", err)
+ }
+ f.Write([]byte(`
+package main
+import "fmt"
+func main() {
+ fmt.Println("hello world")
+}
+`))
+ f.Close()
+
+ // Name of destination.
+ dst := filepath.Join(dir, "test")
+
+ // Compile source.
+ cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", dst, src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("could not build target: %v", err)
+ }
+
+ // Check destination to see if scanf code was included.
+ cmd = exec.Command(testenv.GoToolPath(t), "tool", "nm", dst)
+ out, err = cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("could not read target: %v", err)
+ }
+ if bytes.Contains(out, []byte("scanInt")) {
+ t.Fatalf("scanf code not removed from helloworld")
+ }
+}
+
+// Make sure -S prints assembly code. See issue 14515.
+func TestDashS(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ // Make a directory to work in.
+ dir, err := ioutil.TempDir("", "issue14515-")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ // Create source.
+ src := filepath.Join(dir, "test.go")
+ f, err := os.Create(src)
+ if err != nil {
+ t.Fatalf("could not create source file: %v", err)
+ }
+ f.Write([]byte(`
+package main
+import "fmt"
+func main() {
+ fmt.Println("hello world")
+}
+`))
+ f.Close()
+
+ // Compile source.
+ cmd := exec.Command(testenv.GoToolPath(t), "build", "-gcflags", "-S", "-o", filepath.Join(dir, "test"), src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("could not build target: %v", err)
+ }
+
+ patterns := []string{
+ // It is hard to look for actual instructions in an
+ // arch-independent way. So we'll just look for
+ // pseudo-ops that are arch-independent.
+ "\tTEXT\t",
+ "\tFUNCDATA\t",
+ "\tPCDATA\t",
+ }
+ outstr := string(out)
+ for _, p := range patterns {
+ if !strings.Contains(outstr, p) {
+ println(outstr)
+ panic("can't find pattern " + p)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go
new file mode 100644
index 0000000..274930b
--- /dev/null
+++ b/src/cmd/compile/internal/gc/go.go
@@ -0,0 +1,349 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "sync"
+)
+
+const (
+ BADWIDTH = types.BADWIDTH
+)
+
+var (
+ // maximum size variable which we will allocate on the stack.
+ // This limit is for explicit variable declarations like "var x T" or "x := ...".
+ // Note: the flag smallframes can update this value.
+ maxStackVarSize = int64(10 * 1024 * 1024)
+
+ // maximum size of implicit variables that we will allocate on the stack.
+ // p := new(T) allocating T on the stack
+ // p := &T{} allocating T on the stack
+ // s := make([]T, n) allocating [n]T on the stack
+ // s := []byte("...") allocating [n]byte on the stack
+ // Note: the flag smallframes can update this value.
+ maxImplicitStackVarSize = int64(64 * 1024)
+
+ // smallArrayBytes is the maximum size of an array which is considered small.
+ // Small arrays will be initialized directly with a sequence of constant stores.
+ // Large arrays will be initialized by copying from a static temp.
+ // 256 bytes was chosen to minimize generated code + statictmp size.
+ smallArrayBytes = int64(256)
+)
+
+// isRuntimePkg reports whether p is package runtime.
+func isRuntimePkg(p *types.Pkg) bool {
+ if compiling_runtime && p == localpkg {
+ return true
+ }
+ return p.Path == "runtime"
+}
+
+// isReflectPkg reports whether p is package reflect.
+func isReflectPkg(p *types.Pkg) bool {
+ if p == localpkg {
+ return myimportpath == "reflect"
+ }
+ return p.Path == "reflect"
+}
+
+// The Class of a variable/function describes the "storage class"
+// of a variable or function. During parsing, storage classes are
+// called declaration contexts.
+type Class uint8
+
+//go:generate stringer -type=Class
+const (
+ Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
+ PEXTERN // global variables
+ PAUTO // local variables
+ PAUTOHEAP // local variables or parameters moved to heap
+ PPARAM // input arguments
+ PPARAMOUT // output results
+ PFUNC // global functions
+
+ // Careful: Class is stored in three bits in Node.flags.
+ _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
+)
+
+// Slices in the runtime are represented by three components:
+//
+// type slice struct {
+// ptr unsafe.Pointer
+// len int
+// cap int
+// }
+//
+// Strings in the runtime are represented by two components:
+//
+// type string struct {
+// ptr unsafe.Pointer
+// len int
+// }
+//
+// These variables are the offsets of fields and sizes of these structs.
+var (
+ slicePtrOffset int64
+ sliceLenOffset int64
+ sliceCapOffset int64
+
+ sizeofSlice int64
+ sizeofString int64
+)
+
+var pragcgobuf [][]string
+
+var outfile string
+var linkobj string
+
+// nerrors is the number of compiler errors reported
+// since the last call to saveerrors.
+var nerrors int
+
+// nsavederrors is the total number of compiler errors
+// reported before the last call to saveerrors.
+var nsavederrors int
+
+var nsyntaxerrors int
+
+var decldepth int32
+
+var nolocalimports bool
+
+// gc debug flags
+type DebugFlags struct {
+ P, B, C, E,
+ K, L, N, S,
+ W, e, h, j,
+ l, m, r, w int
+}
+
+var Debug DebugFlags
+
+var debugstr string
+
+var Debug_checknil int
+var Debug_typeassert int
+
+var localpkg *types.Pkg // package being compiled
+
+var inimport bool // set during import
+
+var itabpkg *types.Pkg // fake pkg for itab entries
+
+var itablinkpkg *types.Pkg // fake package for runtime itab entries
+
+var Runtimepkg *types.Pkg // fake package runtime
+
+var racepkg *types.Pkg // package runtime/race
+
+var msanpkg *types.Pkg // package runtime/msan
+
+var unsafepkg *types.Pkg // package unsafe
+
+var trackpkg *types.Pkg // fake package for field tracking
+
+var mappkg *types.Pkg // fake package for map zero value
+
+var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver types
+
+var zerosize int64
+
+var myimportpath string
+
+var localimport string
+
+var asmhdr string
+
+var simtype [NTYPE]types.EType
+
+var (
+ isInt [NTYPE]bool
+ isFloat [NTYPE]bool
+ isComplex [NTYPE]bool
+ issimple [NTYPE]bool
+)
+
+var (
+ okforeq [NTYPE]bool
+ okforadd [NTYPE]bool
+ okforand [NTYPE]bool
+ okfornone [NTYPE]bool
+ okforcmp [NTYPE]bool
+ okforbool [NTYPE]bool
+ okforcap [NTYPE]bool
+ okforlen [NTYPE]bool
+ okforarith [NTYPE]bool
+ okforconst [NTYPE]bool
+)
+
+var (
+ okfor [OEND][]bool
+ iscmp [OEND]bool
+)
+
+var minintval [NTYPE]*Mpint
+
+var maxintval [NTYPE]*Mpint
+
+var minfltval [NTYPE]*Mpflt
+
+var maxfltval [NTYPE]*Mpflt
+
+var xtop []*Node
+
+var exportlist []*Node
+
+var importlist []*Node // imported functions and methods with inlinable bodies
+
+var (
+ funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
+ funcsyms []*types.Sym
+)
+
+var dclcontext Class // PEXTERN/PAUTO
+
+var Curfn *Node
+
+var Widthptr int
+
+var Widthreg int
+
+var nblank *Node
+
+var typecheckok bool
+
+var compiling_runtime bool
+
+// Compiling the standard library
+var compiling_std bool
+
+var use_writebarrier bool
+
+var pure_go bool
+
+var flag_installsuffix string
+
+var flag_race bool
+
+var flag_msan bool
+
+var flagDWARF bool
+
+// Whether we are adding any sort of code instrumentation, such as
+// when the race detector is enabled.
+var instrumenting bool
+
+// Whether we are tracking lexical scopes for DWARF.
+var trackScopes bool
+
+// Controls generation of DWARF inlined instance records. Zero
+// disables, 1 emits inlined routines but suppresses var info,
+// and 2 emits inlined routines with tracking of formals/locals.
+var genDwarfInline int
+
+var debuglive int
+
+var Ctxt *obj.Link
+
+var writearchive bool
+
+var nodfp *Node
+
+var disable_checknil int
+
+var autogeneratedPos src.XPos
+
+// interface to back end
+
+type Arch struct {
+ LinkArch *obj.LinkArch
+
+ REGSP int
+ MAXWIDTH int64
+ SoftFloat bool
+
+ PadFrame func(int64) int64
+
+ // ZeroRange zeroes a range of memory on stack. It is only inserted
+ // at function entry, and it is ok to clobber registers.
+ ZeroRange func(*Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
+
+ Ginsnop func(*Progs) *obj.Prog
+ Ginsnopdefer func(*Progs) *obj.Prog // special ginsnop for deferreturn
+
+ // SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
+ SSAMarkMoves func(*SSAGenState, *ssa.Block)
+
+ // SSAGenValue emits Prog(s) for the Value.
+ SSAGenValue func(*SSAGenState, *ssa.Value)
+
+ // SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
+ // for all values in the block before SSAGenBlock.
+ SSAGenBlock func(s *SSAGenState, b, next *ssa.Block)
+}
+
+var thearch Arch
+
+var (
+ staticuint64s,
+ zerobase *Node
+
+ assertE2I,
+ assertE2I2,
+ assertI2I,
+ assertI2I2,
+ deferproc,
+ deferprocStack,
+ Deferreturn,
+ Duffcopy,
+ Duffzero,
+ gcWriteBarrier,
+ goschedguarded,
+ growslice,
+ msanread,
+ msanwrite,
+ msanmove,
+ newobject,
+ newproc,
+ panicdivide,
+ panicshift,
+ panicdottypeE,
+ panicdottypeI,
+ panicnildottype,
+ panicoverflow,
+ raceread,
+ racereadrange,
+ racewrite,
+ racewriterange,
+ x86HasPOPCNT,
+ x86HasSSE41,
+ x86HasFMA,
+ armHasVFPv4,
+ arm64HasATOMICS,
+ typedmemclr,
+ typedmemmove,
+ Udiv,
+ writeBarrier,
+ zerobaseSym *obj.LSym
+
+ BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
+ ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
+
+ // Wasm
+ WasmMove,
+ WasmZero,
+ WasmDiv,
+ WasmTruncS,
+ WasmTruncU,
+ SigPanic *obj.LSym
+)
+
+// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
+var GCWriteBarrierReg map[int16]*obj.LSym
diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go
new file mode 100644
index 0000000..d599a38
--- /dev/null
+++ b/src/cmd/compile/internal/gc/gsubr.go
@@ -0,0 +1,333 @@
+// Derived from Inferno utils/6c/txt.c
+// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package gc
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
+
+// Progs accumulates Progs for a function and converts them into machine code.
+type Progs struct {
+ Text *obj.Prog // ATEXT Prog for this function
+ next *obj.Prog // next Prog
+ pc int64 // virtual PC; count of Progs
+ pos src.XPos // position to use for new Progs
+ curfn *Node // fn these Progs are for
+ progcache []obj.Prog // local progcache
+ cacheidx int // first free element of progcache
+
+ nextLive LivenessIndex // liveness index for the next Prog
+ prevLive LivenessIndex // last emitted liveness index
+}
+
+// newProgs returns a new Progs for fn.
+// worker indicates which of the backend workers will use the Progs.
+func newProgs(fn *Node, worker int) *Progs {
+ pp := new(Progs)
+ if Ctxt.CanReuseProgs() {
+ sz := len(sharedProgArray) / nBackendWorkers
+ pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)]
+ }
+ pp.curfn = fn
+
+ // prime the pump
+ pp.next = pp.NewProg()
+ pp.clearp(pp.next)
+
+ pp.pos = fn.Pos
+ pp.settext(fn)
+ // PCDATA tables implicitly start with index -1.
+ pp.prevLive = LivenessIndex{-1, false}
+ pp.nextLive = pp.prevLive
+ return pp
+}
+
+func (pp *Progs) NewProg() *obj.Prog {
+ var p *obj.Prog
+ if pp.cacheidx < len(pp.progcache) {
+ p = &pp.progcache[pp.cacheidx]
+ pp.cacheidx++
+ } else {
+ p = new(obj.Prog)
+ }
+ p.Ctxt = Ctxt
+ return p
+}
+
+// Flush converts from pp to machine code.
+func (pp *Progs) Flush() {
+ plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn}
+ obj.Flushplist(Ctxt, plist, pp.NewProg, myimportpath)
+}
+
+// Free clears pp and any associated resources.
+func (pp *Progs) Free() {
+ if Ctxt.CanReuseProgs() {
+ // Clear progs to enable GC and avoid abuse.
+ s := pp.progcache[:pp.cacheidx]
+ for i := range s {
+ s[i] = obj.Prog{}
+ }
+ }
+ // Clear pp to avoid abuse.
+ *pp = Progs{}
+}
+
+// Prog adds a Prog with instruction As to pp.
+func (pp *Progs) Prog(as obj.As) *obj.Prog {
+ if pp.nextLive.StackMapValid() && pp.nextLive.stackMapIndex != pp.prevLive.stackMapIndex {
+ // Emit stack map index change.
+ idx := pp.nextLive.stackMapIndex
+ pp.prevLive.stackMapIndex = idx
+ p := pp.Prog(obj.APCDATA)
+ Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
+ Addrconst(&p.To, int64(idx))
+ }
+ if pp.nextLive.isUnsafePoint != pp.prevLive.isUnsafePoint {
+ // Emit unsafe-point marker.
+ pp.prevLive.isUnsafePoint = pp.nextLive.isUnsafePoint
+ p := pp.Prog(obj.APCDATA)
+ Addrconst(&p.From, objabi.PCDATA_UnsafePoint)
+ if pp.nextLive.isUnsafePoint {
+ Addrconst(&p.To, objabi.PCDATA_UnsafePointUnsafe)
+ } else {
+ Addrconst(&p.To, objabi.PCDATA_UnsafePointSafe)
+ }
+ }
+
+ p := pp.next
+ pp.next = pp.NewProg()
+ pp.clearp(pp.next)
+ p.Link = pp.next
+
+ if !pp.pos.IsKnown() && Debug.K != 0 {
+ Warn("prog: unknown position (line 0)")
+ }
+
+ p.As = as
+ p.Pos = pp.pos
+ if pp.pos.IsStmt() == src.PosIsStmt {
+ // Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
+ if ssa.LosesStmtMark(as) {
+ return p
+ }
+ pp.pos = pp.pos.WithNotStmt()
+ }
+ return p
+}
+
+func (pp *Progs) clearp(p *obj.Prog) {
+ obj.Nopout(p)
+ p.As = obj.AEND
+ p.Pc = pp.pc
+ pp.pc++
+}
+
+func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
+ q := pp.NewProg()
+ pp.clearp(q)
+ q.As = as
+ q.Pos = p.Pos
+ q.From.Type = ftype
+ q.From.Reg = freg
+ q.From.Offset = foffset
+ q.To.Type = ttype
+ q.To.Reg = treg
+ q.To.Offset = toffset
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+func (pp *Progs) settext(fn *Node) {
+ if pp.Text != nil {
+ Fatalf("Progs.settext called twice")
+ }
+ ptxt := pp.Prog(obj.ATEXT)
+ pp.Text = ptxt
+
+ fn.Func.lsym.Func().Text = ptxt
+ ptxt.From.Type = obj.TYPE_MEM
+ ptxt.From.Name = obj.NAME_EXTERN
+ ptxt.From.Sym = fn.Func.lsym
+}
+
+// initLSym defines f's obj.LSym and initializes it based on the
+// properties of f. This includes setting the symbol flags and ABI and
+// creating and initializing related DWARF symbols.
+//
+// initLSym must be called exactly once per function and must be
+// called for both functions with bodies and functions without bodies.
+func (f *Func) initLSym(hasBody bool) {
+ if f.lsym != nil {
+ Fatalf("Func.initLSym called twice")
+ }
+
+ if nam := f.Nname; !nam.isBlank() {
+ f.lsym = nam.Sym.Linksym()
+ if f.Pragma&Systemstack != 0 {
+ f.lsym.Set(obj.AttrCFunc, true)
+ }
+
+ var aliasABI obj.ABI
+ needABIAlias := false
+ defABI, hasDefABI := symabiDefs[f.lsym.Name]
+ if hasDefABI && defABI == obj.ABI0 {
+ // Symbol is defined as ABI0. Create an
+ // Internal -> ABI0 wrapper.
+ f.lsym.SetABI(obj.ABI0)
+ needABIAlias, aliasABI = true, obj.ABIInternal
+ } else {
+ // No ABI override. Check that the symbol is
+ // using the expected ABI.
+ want := obj.ABIInternal
+ if f.lsym.ABI() != want {
+ Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.lsym.Name, f.lsym.ABI(), want)
+ }
+ }
+
+ isLinknameExported := nam.Sym.Linkname != "" && (hasBody || hasDefABI)
+ if abi, ok := symabiRefs[f.lsym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
+ // Either 1) this symbol is definitely
+ // referenced as ABI0 from this package; or 2)
+ // this symbol is defined in this package but
+ // given a linkname, indicating that it may be
+ // referenced from another package. Create an
+ // ABI0 -> Internal wrapper so it can be
+ // called as ABI0. In case 2, it's important
+ // that we know it's defined in this package
+ // since other packages may "pull" symbols
+ // using linkname and we don't want to create
+ // duplicate ABI wrappers.
+ if f.lsym.ABI() != obj.ABI0 {
+ needABIAlias, aliasABI = true, obj.ABI0
+ }
+ }
+
+ if needABIAlias {
+ // These LSyms have the same name as the
+ // native function, so we create them directly
+ // rather than looking them up. The uniqueness
+ // of f.lsym ensures uniqueness of asym.
+ asym := &obj.LSym{
+ Name: f.lsym.Name,
+ Type: objabi.SABIALIAS,
+ R: []obj.Reloc{{Sym: f.lsym}}, // 0 size, so "informational"
+ }
+ asym.SetABI(aliasABI)
+ asym.Set(obj.AttrDuplicateOK, true)
+ Ctxt.ABIAliases = append(Ctxt.ABIAliases, asym)
+ }
+ }
+
+ if !hasBody {
+ // For body-less functions, we only create the LSym.
+ return
+ }
+
+ var flag int
+ if f.Dupok() {
+ flag |= obj.DUPOK
+ }
+ if f.Wrapper() {
+ flag |= obj.WRAPPER
+ }
+ if f.Needctxt() {
+ flag |= obj.NEEDCTXT
+ }
+ if f.Pragma&Nosplit != 0 {
+ flag |= obj.NOSPLIT
+ }
+ if f.ReflectMethod() {
+ flag |= obj.REFLECTMETHOD
+ }
+
+ // Clumsy but important.
+ // See test/recover.go for test cases and src/reflect/value.go
+ // for the actual functions being considered.
+ if myimportpath == "reflect" {
+ switch f.Nname.Sym.Name {
+ case "callReflect", "callMethod":
+ flag |= obj.WRAPPER
+ }
+ }
+
+ Ctxt.InitTextSym(f.lsym, flag)
+}
+
+func ggloblnod(nam *Node) {
+ s := nam.Sym.Linksym()
+ s.Gotype = ngotype(nam).Linksym()
+ flags := 0
+ if nam.Name.Readonly() {
+ flags = obj.RODATA
+ }
+ if nam.Type != nil && !nam.Type.HasPointers() {
+ flags |= obj.NOPTR
+ }
+ Ctxt.Globl(s, nam.Type.Width, flags)
+ if nam.Name.LibfuzzerExtraCounter() {
+ s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
+ }
+ if nam.Sym.Linkname != "" {
+ // Make sure linkname'd symbol is non-package. When a symbol is
+ // both imported and linkname'd, s.Pkg may not set to "_" in
+ // types.Sym.Linksym because LSym already exists. Set it here.
+ s.Pkg = "_"
+ }
+}
+
+func ggloblsym(s *obj.LSym, width int32, flags int16) {
+ if flags&obj.LOCAL != 0 {
+ s.Set(obj.AttrLocal, true)
+ flags &^= obj.LOCAL
+ }
+ Ctxt.Globl(s, int64(width), int(flags))
+}
+
+func Addrconst(a *obj.Addr, v int64) {
+ a.Sym = nil
+ a.Type = obj.TYPE_CONST
+ a.Offset = v
+}
+
+func Patch(p *obj.Prog, to *obj.Prog) {
+ if p.To.Type != obj.TYPE_BRANCH {
+ Fatalf("patch: not a branch")
+ }
+ p.To.SetTarget(to)
+ p.To.Offset = to.Pc
+}
diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go
new file mode 100644
index 0000000..1f53d8c
--- /dev/null
+++ b/src/cmd/compile/internal/gc/iexport.go
@@ -0,0 +1,1515 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package export.
+//
+// The indexed export data format is an evolution of the previous
+// binary export data format. Its chief contribution is introducing an
+// index table, which allows efficient random access of individual
+// declarations and inline function bodies. In turn, this allows
+// avoiding unnecessary work for compilation units that import large
+// packages.
+//
+//
+// The top-level data format is structured as:
+//
+// Header struct {
+// Tag byte // 'i'
+// Version uvarint
+// StringSize uvarint
+// DataSize uvarint
+// }
+//
+// Strings [StringSize]byte
+// Data [DataSize]byte
+//
+// MainIndex []struct{
+// PkgPath stringOff
+// PkgName stringOff
+// PkgHeight uvarint
+//
+// Decls []struct{
+// Name stringOff
+// Offset declOff
+// }
+// }
+//
+// Fingerprint [8]byte
+//
+// uvarint means a uint64 written out using uvarint encoding.
+//
+// []T means a uvarint followed by that many T objects. In other
+// words:
+//
+// Len uvarint
+// Elems [Len]T
+//
+// stringOff means a uvarint that indicates an offset within the
+// Strings section. At that offset is another uvarint, followed by
+// that many bytes, which form the string value.
+//
+// declOff means a uvarint that indicates an offset within the Data
+// section where the associated declaration can be found.
+//
+//
+// There are five kinds of declarations, distinguished by their first
+// byte:
+//
+// type Var struct {
+// Tag byte // 'V'
+// Pos Pos
+// Type typeOff
+// }
+//
+// type Func struct {
+// Tag byte // 'F'
+// Pos Pos
+// Signature Signature
+// }
+//
+// type Const struct {
+// Tag byte // 'C'
+// Pos Pos
+// Value Value
+// }
+//
+// type Type struct {
+// Tag byte // 'T'
+// Pos Pos
+// Underlying typeOff
+//
+// Methods []struct{ // omitted if Underlying is an interface type
+// Pos Pos
+// Name stringOff
+// Recv Param
+// Signature Signature
+// }
+// }
+//
+// type Alias struct {
+// Tag byte // 'A'
+// Pos Pos
+// Type typeOff
+// }
+//
+//
+// typeOff means a uvarint that either indicates a predeclared type,
+// or an offset into the Data section. If the uvarint is less than
+// predeclReserved, then it indicates the index into the predeclared
+// types list (see predeclared in bexport.go for order). Otherwise,
+// subtracting predeclReserved yields the offset of a type descriptor.
+//
+// Value means a type and type-specific value. See
+// (*exportWriter).value for details.
+//
+//
+// There are nine kinds of type descriptors, distinguished by an itag:
+//
+// type DefinedType struct {
+// Tag itag // definedType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// type PointerType struct {
+// Tag itag // pointerType
+// Elem typeOff
+// }
+//
+// type SliceType struct {
+// Tag itag // sliceType
+// Elem typeOff
+// }
+//
+// type ArrayType struct {
+// Tag itag // arrayType
+// Len uint64
+// Elem typeOff
+// }
+//
+// type ChanType struct {
+// Tag itag // chanType
+// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+// Elem typeOff
+// }
+//
+// type MapType struct {
+// Tag itag // mapType
+// Key typeOff
+// Elem typeOff
+// }
+//
+// type FuncType struct {
+// Tag itag // signatureType
+// PkgPath stringOff
+// Signature Signature
+// }
+//
+// type StructType struct {
+// Tag itag // structType
+// PkgPath stringOff
+// Fields []struct {
+// Pos Pos
+// Name stringOff
+// Type typeOff
+// Embedded bool
+// Note stringOff
+// }
+// }
+//
+// type InterfaceType struct {
+// Tag itag // interfaceType
+// PkgPath stringOff
+// Embeddeds []struct {
+// Pos Pos
+// Type typeOff
+// }
+// Methods []struct {
+// Pos Pos
+// Name stringOff
+// Signature Signature
+// }
+// }
+//
+//
+// type Signature struct {
+// Params []Param
+// Results []Param
+// Variadic bool // omitted if Results is empty
+// }
+//
+// type Param struct {
+// Pos Pos
+// Name stringOff
+// Type typOff
+// }
+//
+//
+// Pos encodes a file:line:column triple, incorporating a simple delta
+// encoding scheme within a data object. See exportWriter.pos for
+// details.
+//
+//
+// Compiler-specific details.
+//
+// cmd/compile writes out a second index for inline bodies and also
+// appends additional compiler-specific details after declarations.
+// Third-party tools are not expected to depend on these details and
+// they're expected to change much more rapidly, so they're omitted
+// here. See exportWriter's varExt/funcExt/etc methods for details.
+
+package gc
+
+import (
+ "bufio"
+ "bytes"
+ "cmd/compile/internal/types"
+ "cmd/internal/goobj"
+ "cmd/internal/src"
+ "crypto/md5"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math/big"
+ "sort"
+ "strings"
+)
+
+// Current indexed export format version. Increase with each format change.
+// 1: added column details to Pos
+// 0: Go1.11 encoding
+const iexportVersion = 1
+
+// predeclReserved is the number of type offsets reserved for types
+// implicitly declared in the universe block.
+const predeclReserved = 32
+
+// An itag distinguishes the kind of type that was written into the
+// indexed export format.
+type itag uint64
+
+const (
+ // Types
+ definedType itag = iota
+ pointerType
+ sliceType
+ arrayType
+ chanType
+ mapType
+ signatureType
+ structType
+ interfaceType
+)
+
+func iexport(out *bufio.Writer) {
+ // Mark inline bodies that are reachable through exported types.
+ // (Phase 0 of bexport.go.)
+ {
+ // TODO(mdempsky): Separate from bexport logic.
+ p := &exporter{marked: make(map[*types.Type]bool)}
+ for _, n := range exportlist {
+ sym := n.Sym
+ p.markType(asNode(sym.Def).Type)
+ }
+ }
+
+ p := iexporter{
+ allPkgs: map[*types.Pkg]bool{},
+ stringIndex: map[string]uint64{},
+ declIndex: map[*Node]uint64{},
+ inlineIndex: map[*Node]uint64{},
+ typIndex: map[*types.Type]uint64{},
+ }
+
+ for i, pt := range predeclared() {
+ p.typIndex[pt] = uint64(i)
+ }
+ if len(p.typIndex) > predeclReserved {
+ Fatalf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)
+ }
+
+ // Initialize work queue with exported declarations.
+ for _, n := range exportlist {
+ p.pushDecl(n)
+ }
+
+ // Loop until no more work. We use a queue because while
+ // writing out inline bodies, we may discover additional
+ // declarations that are needed.
+ for !p.declTodo.empty() {
+ p.doDecl(p.declTodo.popLeft())
+ }
+
+ // Append indices to data0 section.
+ dataLen := uint64(p.data0.Len())
+ w := p.newWriter()
+ w.writeIndex(p.declIndex, true)
+ w.writeIndex(p.inlineIndex, false)
+ w.flush()
+
+ // Assemble header.
+ var hdr intWriter
+ hdr.WriteByte('i')
+ hdr.uint64(iexportVersion)
+ hdr.uint64(uint64(p.strings.Len()))
+ hdr.uint64(dataLen)
+
+ // Flush output.
+ h := md5.New()
+ wr := io.MultiWriter(out, h)
+ io.Copy(wr, &hdr)
+ io.Copy(wr, &p.strings)
+ io.Copy(wr, &p.data0)
+
+ // Add fingerprint (used by linker object file).
+ // Attach this to the end, so tools (e.g. gcimporter) don't care.
+ copy(Ctxt.Fingerprint[:], h.Sum(nil)[:])
+ out.Write(Ctxt.Fingerprint[:])
+}
+
+// writeIndex writes out an object index. mainIndex indicates whether
+// we're writing out the main index, which is also read by
+// non-compiler tools and includes a complete package description
+// (i.e., name and height).
+func (w *exportWriter) writeIndex(index map[*Node]uint64, mainIndex bool) {
+ // Build a map from packages to objects from that package.
+ pkgObjs := map[*types.Pkg][]*Node{}
+
+ // For the main index, make sure to include every package that
+ // we reference, even if we're not exporting (or reexporting)
+ // any symbols from it.
+ if mainIndex {
+ pkgObjs[localpkg] = nil
+ for pkg := range w.p.allPkgs {
+ pkgObjs[pkg] = nil
+ }
+ }
+
+ for n := range index {
+ pkgObjs[n.Sym.Pkg] = append(pkgObjs[n.Sym.Pkg], n)
+ }
+
+ var pkgs []*types.Pkg
+ for pkg, objs := range pkgObjs {
+ pkgs = append(pkgs, pkg)
+
+ sort.Slice(objs, func(i, j int) bool {
+ return objs[i].Sym.Name < objs[j].Sym.Name
+ })
+ }
+
+ sort.Slice(pkgs, func(i, j int) bool {
+ return pkgs[i].Path < pkgs[j].Path
+ })
+
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.string(pkg.Path)
+ if mainIndex {
+ w.string(pkg.Name)
+ w.uint64(uint64(pkg.Height))
+ }
+
+ objs := pkgObjs[pkg]
+ w.uint64(uint64(len(objs)))
+ for _, n := range objs {
+ w.string(n.Sym.Name)
+ w.uint64(index[n])
+ }
+ }
+}
+
+type iexporter struct {
+ // allPkgs tracks all packages that have been referenced by
+ // the export data, so we can ensure to include them in the
+ // main index.
+ allPkgs map[*types.Pkg]bool
+
+ declTodo nodeQueue
+
+ strings intWriter
+ stringIndex map[string]uint64
+
+ data0 intWriter
+ declIndex map[*Node]uint64
+ inlineIndex map[*Node]uint64
+ typIndex map[*types.Type]uint64
+}
+
+// stringOff returns the offset of s within the string section.
+// If not already present, it's added to the end.
+func (p *iexporter) stringOff(s string) uint64 {
+ off, ok := p.stringIndex[s]
+ if !ok {
+ off = uint64(p.strings.Len())
+ p.stringIndex[s] = off
+
+ p.strings.uint64(uint64(len(s)))
+ p.strings.WriteString(s)
+ }
+ return off
+}
+
+// pushDecl adds n to the declaration work queue, if not already present.
+func (p *iexporter) pushDecl(n *Node) {
+ if n.Sym == nil || asNode(n.Sym.Def) != n && n.Op != OTYPE {
+ Fatalf("weird Sym: %v, %v", n, n.Sym)
+ }
+
+ // Don't export predeclared declarations.
+ if n.Sym.Pkg == builtinpkg || n.Sym.Pkg == unsafepkg {
+ return
+ }
+
+ if _, ok := p.declIndex[n]; ok {
+ return
+ }
+
+ p.declIndex[n] = ^uint64(0) // mark n present in work queue
+ p.declTodo.pushRight(n)
+}
+
+// exportWriter handles writing out individual data section chunks.
+type exportWriter struct {
+ p *iexporter
+
+ data intWriter
+ currPkg *types.Pkg
+ prevFile string
+ prevLine int64
+ prevColumn int64
+}
+
+func (p *iexporter) doDecl(n *Node) {
+ w := p.newWriter()
+ w.setPkg(n.Sym.Pkg, false)
+
+ switch n.Op {
+ case ONAME:
+ switch n.Class() {
+ case PEXTERN:
+ // Variable.
+ w.tag('V')
+ w.pos(n.Pos)
+ w.typ(n.Type)
+ w.varExt(n)
+
+ case PFUNC:
+ if n.IsMethod() {
+ Fatalf("unexpected method: %v", n)
+ }
+
+ // Function.
+ w.tag('F')
+ w.pos(n.Pos)
+ w.signature(n.Type)
+ w.funcExt(n)
+
+ default:
+ Fatalf("unexpected class: %v, %v", n, n.Class())
+ }
+
+ case OLITERAL:
+ // Constant.
+ n = typecheck(n, ctxExpr)
+ w.tag('C')
+ w.pos(n.Pos)
+ w.value(n.Type, n.Val())
+
+ case OTYPE:
+ if IsAlias(n.Sym) {
+ // Alias.
+ w.tag('A')
+ w.pos(n.Pos)
+ w.typ(n.Type)
+ break
+ }
+
+ // Defined type.
+ w.tag('T')
+ w.pos(n.Pos)
+
+ underlying := n.Type.Orig
+ if underlying == types.Errortype.Orig {
+ // For "type T error", use error as the
+ // underlying type instead of error's own
+ // underlying anonymous interface. This
+ // ensures consistency with how importers may
+ // declare error (e.g., go/types uses nil Pkg
+ // for predeclared objects).
+ underlying = types.Errortype
+ }
+ w.typ(underlying)
+
+ t := n.Type
+ if t.IsInterface() {
+ w.typeExt(t)
+ break
+ }
+
+ ms := t.Methods()
+ w.uint64(uint64(ms.Len()))
+ for _, m := range ms.Slice() {
+ w.pos(m.Pos)
+ w.selector(m.Sym)
+ w.param(m.Type.Recv())
+ w.signature(m.Type)
+ }
+
+ w.typeExt(t)
+ for _, m := range ms.Slice() {
+ w.methExt(m)
+ }
+
+ default:
+ Fatalf("unexpected node: %v", n)
+ }
+
+ p.declIndex[n] = w.flush()
+}
+
+func (w *exportWriter) tag(tag byte) {
+ w.data.WriteByte(tag)
+}
+
+func (p *iexporter) doInline(f *Node) {
+ w := p.newWriter()
+ w.setPkg(fnpkg(f), false)
+
+ w.stmtList(asNodes(f.Func.Inl.Body))
+
+ p.inlineIndex[f] = w.flush()
+}
+
+func (w *exportWriter) pos(pos src.XPos) {
+ p := Ctxt.PosTable.Pos(pos)
+ file := p.Base().AbsFilename()
+ line := int64(p.RelLine())
+ column := int64(p.RelCol())
+
+ // Encode position relative to the last position: column
+ // delta, then line delta, then file name. We reserve the
+ // bottom bit of the column and line deltas to encode whether
+ // the remaining fields are present.
+ //
+ // Note: Because data objects may be read out of order (or not
+ // at all), we can only apply delta encoding within a single
+ // object. This is handled implicitly by tracking prevFile,
+ // prevLine, and prevColumn as fields of exportWriter.
+
+ deltaColumn := (column - w.prevColumn) << 1
+ deltaLine := (line - w.prevLine) << 1
+
+ if file != w.prevFile {
+ deltaLine |= 1
+ }
+ if deltaLine != 0 {
+ deltaColumn |= 1
+ }
+
+ w.int64(deltaColumn)
+ if deltaColumn&1 != 0 {
+ w.int64(deltaLine)
+ if deltaLine&1 != 0 {
+ w.string(file)
+ }
+ }
+
+ w.prevFile = file
+ w.prevLine = line
+ w.prevColumn = column
+}
+
+func (w *exportWriter) pkg(pkg *types.Pkg) {
+ // Ensure any referenced packages are declared in the main index.
+ w.p.allPkgs[pkg] = true
+
+ w.string(pkg.Path)
+}
+
+func (w *exportWriter) qualifiedIdent(n *Node) {
+ // Ensure any referenced declarations are written out too.
+ w.p.pushDecl(n)
+
+ s := n.Sym
+ w.string(s.Name)
+ w.pkg(s.Pkg)
+}
+
+func (w *exportWriter) selector(s *types.Sym) {
+ if w.currPkg == nil {
+ Fatalf("missing currPkg")
+ }
+
+ // Method selectors are rewritten into method symbols (of the
+ // form T.M) during typechecking, but we want to write out
+ // just the bare method name.
+ name := s.Name
+ if i := strings.LastIndex(name, "."); i >= 0 {
+ name = name[i+1:]
+ } else {
+ pkg := w.currPkg
+ if types.IsExported(name) {
+ pkg = localpkg
+ }
+ if s.Pkg != pkg {
+ Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path)
+ }
+ }
+
+ w.string(name)
+}
+
+func (w *exportWriter) typ(t *types.Type) {
+ w.data.uint64(w.p.typOff(t))
+}
+
+func (p *iexporter) newWriter() *exportWriter {
+ return &exportWriter{p: p}
+}
+
+func (w *exportWriter) flush() uint64 {
+ off := uint64(w.p.data0.Len())
+ io.Copy(&w.p.data0, &w.data)
+ return off
+}
+
+func (p *iexporter) typOff(t *types.Type) uint64 {
+ off, ok := p.typIndex[t]
+ if !ok {
+ w := p.newWriter()
+ w.doTyp(t)
+ off = predeclReserved + w.flush()
+ p.typIndex[t] = off
+ }
+ return off
+}
+
+func (w *exportWriter) startType(k itag) {
+ w.data.uint64(uint64(k))
+}
+
+func (w *exportWriter) doTyp(t *types.Type) {
+ if t.Sym != nil {
+ if t.Sym.Pkg == builtinpkg || t.Sym.Pkg == unsafepkg {
+ Fatalf("builtin type missing from typIndex: %v", t)
+ }
+
+ w.startType(definedType)
+ w.qualifiedIdent(typenod(t))
+ return
+ }
+
+ switch t.Etype {
+ case TPTR:
+ w.startType(pointerType)
+ w.typ(t.Elem())
+
+ case TSLICE:
+ w.startType(sliceType)
+ w.typ(t.Elem())
+
+ case TARRAY:
+ w.startType(arrayType)
+ w.uint64(uint64(t.NumElem()))
+ w.typ(t.Elem())
+
+ case TCHAN:
+ w.startType(chanType)
+ w.uint64(uint64(t.ChanDir()))
+ w.typ(t.Elem())
+
+ case TMAP:
+ w.startType(mapType)
+ w.typ(t.Key())
+ w.typ(t.Elem())
+
+ case TFUNC:
+ w.startType(signatureType)
+ w.setPkg(t.Pkg(), true)
+ w.signature(t)
+
+ case TSTRUCT:
+ w.startType(structType)
+ w.setPkg(t.Pkg(), true)
+
+ w.uint64(uint64(t.NumFields()))
+ for _, f := range t.FieldSlice() {
+ w.pos(f.Pos)
+ w.selector(f.Sym)
+ w.typ(f.Type)
+ w.bool(f.Embedded != 0)
+ w.string(f.Note)
+ }
+
+ case TINTER:
+ var embeddeds, methods []*types.Field
+ for _, m := range t.Methods().Slice() {
+ if m.Sym != nil {
+ methods = append(methods, m)
+ } else {
+ embeddeds = append(embeddeds, m)
+ }
+ }
+
+ w.startType(interfaceType)
+ w.setPkg(t.Pkg(), true)
+
+ w.uint64(uint64(len(embeddeds)))
+ for _, f := range embeddeds {
+ w.pos(f.Pos)
+ w.typ(f.Type)
+ }
+
+ w.uint64(uint64(len(methods)))
+ for _, f := range methods {
+ w.pos(f.Pos)
+ w.selector(f.Sym)
+ w.signature(f.Type)
+ }
+
+ default:
+ Fatalf("unexpected type: %v", t)
+ }
+}
+
+func (w *exportWriter) setPkg(pkg *types.Pkg, write bool) {
+ if pkg == nil {
+ // TODO(mdempsky): Proactively set Pkg for types and
+ // remove this fallback logic.
+ pkg = localpkg
+ }
+
+ if write {
+ w.pkg(pkg)
+ }
+
+ w.currPkg = pkg
+}
+
+func (w *exportWriter) signature(t *types.Type) {
+ w.paramList(t.Params().FieldSlice())
+ w.paramList(t.Results().FieldSlice())
+ if n := t.Params().NumFields(); n > 0 {
+ w.bool(t.Params().Field(n - 1).IsDDD())
+ }
+}
+
+func (w *exportWriter) paramList(fs []*types.Field) {
+ w.uint64(uint64(len(fs)))
+ for _, f := range fs {
+ w.param(f)
+ }
+}
+
+func (w *exportWriter) param(f *types.Field) {
+ w.pos(f.Pos)
+ w.localIdent(origSym(f.Sym), 0)
+ w.typ(f.Type)
+}
+
+func constTypeOf(typ *types.Type) Ctype {
+ switch typ {
+ case types.UntypedInt, types.UntypedRune:
+ return CTINT
+ case types.UntypedFloat:
+ return CTFLT
+ case types.UntypedComplex:
+ return CTCPLX
+ }
+
+ switch typ.Etype {
+ case TCHAN, TFUNC, TMAP, TNIL, TINTER, TPTR, TSLICE, TUNSAFEPTR:
+ return CTNIL
+ case TBOOL:
+ return CTBOOL
+ case TSTRING:
+ return CTSTR
+ case TINT, TINT8, TINT16, TINT32, TINT64,
+ TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR:
+ return CTINT
+ case TFLOAT32, TFLOAT64:
+ return CTFLT
+ case TCOMPLEX64, TCOMPLEX128:
+ return CTCPLX
+ }
+
+ Fatalf("unexpected constant type: %v", typ)
+ return 0
+}
+
+func (w *exportWriter) value(typ *types.Type, v Val) {
+ if vt := idealType(v.Ctype()); typ.IsUntyped() && typ != vt {
+ Fatalf("exporter: untyped type mismatch, have: %v, want: %v", typ, vt)
+ }
+ w.typ(typ)
+
+ // Each type has only one admissible constant representation,
+ // so we could type switch directly on v.U here. However,
+ // switching on the type increases symmetry with import logic
+ // and provides a useful consistency check.
+
+ switch constTypeOf(typ) {
+ case CTNIL:
+ // Only one value; nothing to encode.
+ _ = v.U.(*NilVal)
+ case CTBOOL:
+ w.bool(v.U.(bool))
+ case CTSTR:
+ w.string(v.U.(string))
+ case CTINT:
+ w.mpint(&v.U.(*Mpint).Val, typ)
+ case CTFLT:
+ w.mpfloat(&v.U.(*Mpflt).Val, typ)
+ case CTCPLX:
+ x := v.U.(*Mpcplx)
+ w.mpfloat(&x.Real.Val, typ)
+ w.mpfloat(&x.Imag.Val, typ)
+ }
+}
+
+func intSize(typ *types.Type) (signed bool, maxBytes uint) {
+ if typ.IsUntyped() {
+ return true, Mpprec / 8
+ }
+
+ switch typ.Etype {
+ case TFLOAT32, TCOMPLEX64:
+ return true, 3
+ case TFLOAT64, TCOMPLEX128:
+ return true, 7
+ }
+
+ signed = typ.IsSigned()
+ maxBytes = uint(typ.Size())
+
+ // The go/types API doesn't expose sizes to importers, so they
+ // don't know how big these types are.
+ switch typ.Etype {
+ case TINT, TUINT, TUINTPTR:
+ maxBytes = 8
+ }
+
+ return
+}
+
+// mpint exports a multi-precision integer.
+//
+// For unsigned types, small values are written out as a single
+// byte. Larger values are written out as a length-prefixed big-endian
+// byte string, where the length prefix is encoded as its complement.
+// For example, bytes 0, 1, and 2 directly represent the integer
+// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
+// 2-, and 3-byte big-endian string follow.
+//
+// Encoding for signed types use the same general approach as for
+// unsigned types, except small values use zig-zag encoding and the
+// bottom bit of length prefix byte for large values is reserved as a
+// sign bit.
+//
+// The exact boundary between small and large encodings varies
+// according to the maximum number of bytes needed to encode a value
+// of type typ. As a special case, 8-bit types are always encoded as a
+// single byte.
+//
+// TODO(mdempsky): Is this level of complexity really worthwhile?
+func (w *exportWriter) mpint(x *big.Int, typ *types.Type) {
+ signed, maxBytes := intSize(typ)
+
+ negative := x.Sign() < 0
+ if !signed && negative {
+ Fatalf("negative unsigned integer; type %v, value %v", typ, x)
+ }
+
+ b := x.Bytes()
+ if len(b) > 0 && b[0] == 0 {
+ Fatalf("leading zeros")
+ }
+ if uint(len(b)) > maxBytes {
+ Fatalf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)
+ }
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ // Check if x can use small value encoding.
+ if len(b) <= 1 {
+ var ux uint
+ if len(b) == 1 {
+ ux = uint(b[0])
+ }
+ if signed {
+ ux <<= 1
+ if negative {
+ ux--
+ }
+ }
+ if ux < maxSmall {
+ w.data.WriteByte(byte(ux))
+ return
+ }
+ }
+
+ n := 256 - uint(len(b))
+ if signed {
+ n = 256 - 2*uint(len(b))
+ if negative {
+ n |= 1
+ }
+ }
+ if n < maxSmall || n >= 256 {
+ Fatalf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)
+ }
+
+ w.data.WriteByte(byte(n))
+ w.data.Write(b)
+}
+
+// mpfloat exports a multi-precision floating point number.
+//
+// The number's value is decomposed into mantissa × 2**exponent, where
+// mantissa is an integer. The value is written out as mantissa (as a
+// multi-precision integer) and then the exponent, except exponent is
+// omitted if mantissa is zero.
+func (w *exportWriter) mpfloat(f *big.Float, typ *types.Type) {
+ if f.IsInf() {
+ Fatalf("infinite constant")
+ }
+
+ // Break into f = mant × 2**exp, with 0.5 <= mant < 1.
+ var mant big.Float
+ exp := int64(f.MantExp(&mant))
+
+ // Scale so that mant is an integer.
+ prec := mant.MinPrec()
+ mant.SetMantExp(&mant, int(prec))
+ exp -= int64(prec)
+
+ manti, acc := mant.Int(nil)
+ if acc != big.Exact {
+ Fatalf("mantissa scaling failed for %f (%s)", f, acc)
+ }
+ w.mpint(manti, typ)
+ if manti.Sign() != 0 {
+ w.int64(exp)
+ }
+}
+
+func (w *exportWriter) bool(b bool) bool {
+ var x uint64
+ if b {
+ x = 1
+ }
+ w.uint64(x)
+ return b
+}
+
+func (w *exportWriter) int64(x int64) { w.data.int64(x) }
+func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
+func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
+
+// Compiler-specific extensions.
+
+func (w *exportWriter) varExt(n *Node) {
+ w.linkname(n.Sym)
+ w.symIdx(n.Sym)
+}
+
+func (w *exportWriter) funcExt(n *Node) {
+ w.linkname(n.Sym)
+ w.symIdx(n.Sym)
+
+ // Escape analysis.
+ for _, fs := range &types.RecvsParams {
+ for _, f := range fs(n.Type).FieldSlice() {
+ w.string(f.Note)
+ }
+ }
+
+ // Inline body.
+ if n.Func.Inl != nil {
+ w.uint64(1 + uint64(n.Func.Inl.Cost))
+ if n.Func.ExportInline() {
+ w.p.doInline(n)
+ }
+
+ // Endlineno for inlined function.
+ if n.Name.Defn != nil {
+ w.pos(n.Name.Defn.Func.Endlineno)
+ } else {
+ // When the exported node was defined externally,
+ // e.g. io exports atomic.(*Value).Load or bytes exports errors.New.
+ // Keep it as we don't distinguish this case in iimport.go.
+ w.pos(n.Func.Endlineno)
+ }
+ } else {
+ w.uint64(0)
+ }
+}
+
+func (w *exportWriter) methExt(m *types.Field) {
+ w.bool(m.Nointerface())
+ w.funcExt(asNode(m.Type.Nname()))
+}
+
+func (w *exportWriter) linkname(s *types.Sym) {
+ w.string(s.Linkname)
+}
+
+func (w *exportWriter) symIdx(s *types.Sym) {
+ lsym := s.Linksym()
+ if lsym.PkgIdx > goobj.PkgIdxSelf || (lsym.PkgIdx == goobj.PkgIdxInvalid && !lsym.Indexed()) || s.Linkname != "" {
+ // Don't export index for non-package symbols, linkname'd symbols,
+ // and symbols without an index. They can only be referenced by
+ // name.
+ w.int64(-1)
+ } else {
+ // For a defined symbol, export its index.
+ // For re-exporting an imported symbol, pass its index through.
+ w.int64(int64(lsym.SymIdx))
+ }
+}
+
+func (w *exportWriter) typeExt(t *types.Type) {
+ // Export whether this type is marked notinheap.
+ w.bool(t.NotInHeap())
+ // For type T, export the index of type descriptor symbols of T and *T.
+ if i, ok := typeSymIdx[t]; ok {
+ w.int64(i[0])
+ w.int64(i[1])
+ return
+ }
+ w.symIdx(typesym(t))
+ w.symIdx(typesym(t.PtrTo()))
+}
+
+// Inline bodies.
+
+func (w *exportWriter) stmtList(list Nodes) {
+ for _, n := range list.Slice() {
+ w.node(n)
+ }
+ w.op(OEND)
+}
+
+func (w *exportWriter) node(n *Node) {
+ if opprec[n.Op] < 0 {
+ w.stmt(n)
+ } else {
+ w.expr(n)
+ }
+}
+
+// Caution: stmt will emit more than one node for statement nodes n that have a non-empty
+// n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.).
+func (w *exportWriter) stmt(n *Node) {
+ if n.Ninit.Len() > 0 && !stmtwithinit(n.Op) {
+ // can't use stmtList here since we don't want the final OEND
+ for _, n := range n.Ninit.Slice() {
+ w.stmt(n)
+ }
+ }
+
+ switch op := n.Op; op {
+ case ODCL:
+ w.op(ODCL)
+ w.pos(n.Left.Pos)
+ w.localName(n.Left)
+ w.typ(n.Left.Type)
+
+ // case ODCLFIELD:
+ // unimplemented - handled by default case
+
+ case OAS:
+ // Don't export "v = <N>" initializing statements, hope they're always
+ // preceded by the DCL which will be re-parsed and typecheck to reproduce
+ // the "v = <N>" again.
+ if n.Right != nil {
+ w.op(OAS)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+ w.expr(n.Right)
+ }
+
+ case OASOP:
+ w.op(OASOP)
+ w.pos(n.Pos)
+ w.op(n.SubOp())
+ w.expr(n.Left)
+ if w.bool(!n.Implicit()) {
+ w.expr(n.Right)
+ }
+
+ case OAS2:
+ w.op(OAS2)
+ w.pos(n.Pos)
+ w.exprList(n.List)
+ w.exprList(n.Rlist)
+
+ case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ w.op(OAS2)
+ w.pos(n.Pos)
+ w.exprList(n.List)
+ w.exprList(asNodes([]*Node{n.Right}))
+
+ case ORETURN:
+ w.op(ORETURN)
+ w.pos(n.Pos)
+ w.exprList(n.List)
+
+ // case ORETJMP:
+ // unreachable - generated by compiler for trampolin routines
+
+ case OGO, ODEFER:
+ w.op(op)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+
+ case OIF:
+ w.op(OIF)
+ w.pos(n.Pos)
+ w.stmtList(n.Ninit)
+ w.expr(n.Left)
+ w.stmtList(n.Nbody)
+ w.stmtList(n.Rlist)
+
+ case OFOR:
+ w.op(OFOR)
+ w.pos(n.Pos)
+ w.stmtList(n.Ninit)
+ w.exprsOrNil(n.Left, n.Right)
+ w.stmtList(n.Nbody)
+
+ case ORANGE:
+ w.op(ORANGE)
+ w.pos(n.Pos)
+ w.stmtList(n.List)
+ w.expr(n.Right)
+ w.stmtList(n.Nbody)
+
+ case OSELECT, OSWITCH:
+ w.op(op)
+ w.pos(n.Pos)
+ w.stmtList(n.Ninit)
+ w.exprsOrNil(n.Left, nil)
+ w.caseList(n)
+
+ // case OCASE:
+ // handled by caseList
+
+ case OFALL:
+ w.op(OFALL)
+ w.pos(n.Pos)
+
+ case OBREAK, OCONTINUE:
+ w.op(op)
+ w.pos(n.Pos)
+ w.exprsOrNil(n.Left, nil)
+
+ case OEMPTY:
+ // nothing to emit
+
+ case OGOTO, OLABEL:
+ w.op(op)
+ w.pos(n.Pos)
+ w.string(n.Sym.Name)
+
+ default:
+ Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op)
+ }
+}
+
+func (w *exportWriter) caseList(sw *Node) {
+ namedTypeSwitch := sw.Op == OSWITCH && sw.Left != nil && sw.Left.Op == OTYPESW && sw.Left.Left != nil
+
+ cases := sw.List.Slice()
+ w.uint64(uint64(len(cases)))
+ for _, cas := range cases {
+ if cas.Op != OCASE {
+ Fatalf("expected OCASE, got %v", cas)
+ }
+ w.pos(cas.Pos)
+ w.stmtList(cas.List)
+ if namedTypeSwitch {
+ w.localName(cas.Rlist.First())
+ }
+ w.stmtList(cas.Nbody)
+ }
+}
+
+func (w *exportWriter) exprList(list Nodes) {
+ for _, n := range list.Slice() {
+ w.expr(n)
+ }
+ w.op(OEND)
+}
+
+func (w *exportWriter) expr(n *Node) {
+ // from nodefmt (fmt.go)
+ //
+ // nodefmt reverts nodes back to their original - we don't need to do
+ // it because we are not bound to produce valid Go syntax when exporting
+ //
+ // if (fmtmode != FExp || n.Op != OLITERAL) && n.Orig != nil {
+ // n = n.Orig
+ // }
+
+ // from exprfmt (fmt.go)
+ for n.Op == OPAREN || n.Implicit() && (n.Op == ODEREF || n.Op == OADDR || n.Op == ODOT || n.Op == ODOTPTR) {
+ n = n.Left
+ }
+
+ switch op := n.Op; op {
+ // expressions
+ // (somewhat closely following the structure of exprfmt in fmt.go)
+ case OLITERAL:
+ if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
+ w.expr(n.Orig)
+ break
+ }
+ w.op(OLITERAL)
+ w.pos(n.Pos)
+ w.value(n.Type, n.Val())
+
+ case ONAME:
+ // Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method,
+ // but for export, this should be rendered as (*pkg.T).meth.
+ // These nodes have the special property that they are names with a left OTYPE and a right ONAME.
+ if n.isMethodExpression() {
+ w.op(OXDOT)
+ w.pos(n.Pos)
+ w.expr(n.Left) // n.Left.Op == OTYPE
+ w.selector(n.Right.Sym)
+ break
+ }
+
+ // Package scope name.
+ if (n.Class() == PEXTERN || n.Class() == PFUNC) && !n.isBlank() {
+ w.op(ONONAME)
+ w.qualifiedIdent(n)
+ break
+ }
+
+ // Function scope name.
+ w.op(ONAME)
+ w.localName(n)
+
+ // case OPACK, ONONAME:
+ // should have been resolved by typechecking - handled by default case
+
+ case OTYPE:
+ w.op(OTYPE)
+ w.typ(n.Type)
+
+ case OTYPESW:
+ w.op(OTYPESW)
+ w.pos(n.Pos)
+ var s *types.Sym
+ if n.Left != nil {
+ if n.Left.Op != ONONAME {
+ Fatalf("expected ONONAME, got %v", n.Left)
+ }
+ s = n.Left.Sym
+ }
+ w.localIdent(s, 0) // declared pseudo-variable, if any
+ w.exprsOrNil(n.Right, nil)
+
+ // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
+ // should have been resolved by typechecking - handled by default case
+
+ // case OCLOSURE:
+ // unimplemented - handled by default case
+
+ // case OCOMPLIT:
+ // should have been resolved by typechecking - handled by default case
+
+ case OPTRLIT:
+ w.op(OADDR)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+
+ case OSTRUCTLIT:
+ w.op(OSTRUCTLIT)
+ w.pos(n.Pos)
+ w.typ(n.Type)
+ w.elemList(n.List) // special handling of field names
+
+ case OARRAYLIT, OSLICELIT, OMAPLIT:
+ w.op(OCOMPLIT)
+ w.pos(n.Pos)
+ w.typ(n.Type)
+ w.exprList(n.List)
+
+ case OKEY:
+ w.op(OKEY)
+ w.pos(n.Pos)
+ w.exprsOrNil(n.Left, n.Right)
+
+ // case OSTRUCTKEY:
+ // unreachable - handled in case OSTRUCTLIT by elemList
+
+ case OCALLPART:
+ // An OCALLPART is an OXDOT before type checking.
+ w.op(OXDOT)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+ // Right node should be ONAME
+ w.selector(n.Right.Sym)
+
+ case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
+ w.op(OXDOT)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+ w.selector(n.Sym)
+
+ case ODOTTYPE, ODOTTYPE2:
+ w.op(ODOTTYPE)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+ w.typ(n.Type)
+
+ case OINDEX, OINDEXMAP:
+ w.op(OINDEX)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+ w.expr(n.Right)
+
+ case OSLICE, OSLICESTR, OSLICEARR:
+ w.op(OSLICE)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+ low, high, _ := n.SliceBounds()
+ w.exprsOrNil(low, high)
+
+ case OSLICE3, OSLICE3ARR:
+ w.op(OSLICE3)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+ low, high, max := n.SliceBounds()
+ w.exprsOrNil(low, high)
+ w.expr(max)
+
+ case OCOPY, OCOMPLEX:
+ // treated like other builtin calls (see e.g., OREAL)
+ w.op(op)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+ w.expr(n.Right)
+ w.op(OEND)
+
+ case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, ORUNES2STR, OSTR2BYTES, OSTR2RUNES, ORUNESTR:
+ w.op(OCONV)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+ w.typ(n.Type)
+
+ case OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN:
+ w.op(op)
+ w.pos(n.Pos)
+ if n.Left != nil {
+ w.expr(n.Left)
+ w.op(OEND)
+ } else {
+ w.exprList(n.List) // emits terminating OEND
+ }
+ // only append() calls may contain '...' arguments
+ if op == OAPPEND {
+ w.bool(n.IsDDD())
+ } else if n.IsDDD() {
+ Fatalf("exporter: unexpected '...' with %v call", op)
+ }
+
+ case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
+ w.op(OCALL)
+ w.pos(n.Pos)
+ w.stmtList(n.Ninit)
+ w.expr(n.Left)
+ w.exprList(n.List)
+ w.bool(n.IsDDD())
+
+ case OMAKEMAP, OMAKECHAN, OMAKESLICE:
+ w.op(op) // must keep separate from OMAKE for importer
+ w.pos(n.Pos)
+ w.typ(n.Type)
+ switch {
+ default:
+ // empty list
+ w.op(OEND)
+ case n.List.Len() != 0: // pre-typecheck
+ w.exprList(n.List) // emits terminating OEND
+ case n.Right != nil:
+ w.expr(n.Left)
+ w.expr(n.Right)
+ w.op(OEND)
+ case n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()):
+ w.expr(n.Left)
+ w.op(OEND)
+ }
+
+ // unary expressions
+ case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
+ w.op(op)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+
+ // binary expressions
+ case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
+ OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR:
+ w.op(op)
+ w.pos(n.Pos)
+ w.expr(n.Left)
+ w.expr(n.Right)
+
+ case OADDSTR:
+ w.op(OADDSTR)
+ w.pos(n.Pos)
+ w.exprList(n.List)
+
+ case ODCLCONST:
+ // if exporting, DCLCONST should just be removed as its usage
+ // has already been replaced with literals
+
+ default:
+ Fatalf("cannot export %v (%d) node\n"+
+ "\t==> please file an issue and assign to gri@", n.Op, int(n.Op))
+ }
+}
+
+func (w *exportWriter) op(op Op) {
+ w.uint64(uint64(op))
+}
+
+func (w *exportWriter) exprsOrNil(a, b *Node) {
+ ab := 0
+ if a != nil {
+ ab |= 1
+ }
+ if b != nil {
+ ab |= 2
+ }
+ w.uint64(uint64(ab))
+ if ab&1 != 0 {
+ w.expr(a)
+ }
+ if ab&2 != 0 {
+ w.node(b)
+ }
+}
+
+func (w *exportWriter) elemList(list Nodes) {
+ w.uint64(uint64(list.Len()))
+ for _, n := range list.Slice() {
+ w.selector(n.Sym)
+ w.expr(n.Left)
+ }
+}
+
+func (w *exportWriter) localName(n *Node) {
+ // Escape analysis happens after inline bodies are saved, but
+ // we're using the same ONAME nodes, so we might still see
+ // PAUTOHEAP here.
+ //
+ // Check for Stackcopy to identify PAUTOHEAP that came from
+ // PPARAM/PPARAMOUT, because we only want to include vargen in
+ // non-param names.
+ var v int32
+ if n.Class() == PAUTO || (n.Class() == PAUTOHEAP && n.Name.Param.Stackcopy == nil) {
+ v = n.Name.Vargen
+ }
+
+ w.localIdent(n.Sym, v)
+}
+
+func (w *exportWriter) localIdent(s *types.Sym, v int32) {
+ // Anonymous parameters.
+ if s == nil {
+ w.string("")
+ return
+ }
+
+ name := s.Name
+ if name == "_" {
+ w.string("_")
+ return
+ }
+
+ // TODO(mdempsky): Fix autotmp hack.
+ if i := strings.LastIndex(name, "."); i >= 0 && !strings.HasPrefix(name, ".autotmp_") {
+ Fatalf("unexpected dot in identifier: %v", name)
+ }
+
+ if v > 0 {
+ if strings.Contains(name, "·") {
+ Fatalf("exporter: unexpected · in symbol name")
+ }
+ name = fmt.Sprintf("%s·%d", name, v)
+ }
+
+ if !types.IsExported(name) && s.Pkg != w.currPkg {
+ Fatalf("weird package in name: %v => %v, not %q", s, name, w.currPkg.Path)
+ }
+
+ w.string(name)
+}
+
+type intWriter struct {
+ bytes.Buffer
+}
+
+func (w *intWriter) int64(x int64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func (w *intWriter) uint64(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ w.Write(buf[:n])
+}
diff --git a/src/cmd/compile/internal/gc/iface_test.go b/src/cmd/compile/internal/gc/iface_test.go
new file mode 100644
index 0000000..21c6587
--- /dev/null
+++ b/src/cmd/compile/internal/gc/iface_test.go
@@ -0,0 +1,128 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+// Test to make sure we make copies of the values we
+// put in interfaces.
+
+import (
+ "testing"
+)
+
+var x int
+
+func TestEfaceConv1(t *testing.T) {
+ a := 5
+ i := interface{}(a)
+ a += 2
+ if got := i.(int); got != 5 {
+ t.Errorf("wanted 5, got %d\n", got)
+ }
+}
+
+func TestEfaceConv2(t *testing.T) {
+ a := 5
+ sink = &a
+ i := interface{}(a)
+ a += 2
+ if got := i.(int); got != 5 {
+ t.Errorf("wanted 5, got %d\n", got)
+ }
+}
+
+func TestEfaceConv3(t *testing.T) {
+ x = 5
+ if got := e2int3(x); got != 5 {
+ t.Errorf("wanted 5, got %d\n", got)
+ }
+}
+
+//go:noinline
+func e2int3(i interface{}) int {
+ x = 7
+ return i.(int)
+}
+
+func TestEfaceConv4(t *testing.T) {
+ a := 5
+ if got := e2int4(a, &a); got != 5 {
+ t.Errorf("wanted 5, got %d\n", got)
+ }
+}
+
+//go:noinline
+func e2int4(i interface{}, p *int) int {
+ *p = 7
+ return i.(int)
+}
+
+type Int int
+
+var y Int
+
+type I interface {
+ foo()
+}
+
+func (i Int) foo() {
+}
+
+func TestIfaceConv1(t *testing.T) {
+ a := Int(5)
+ i := interface{}(a)
+ a += 2
+ if got := i.(Int); got != 5 {
+ t.Errorf("wanted 5, got %d\n", int(got))
+ }
+}
+
+func TestIfaceConv2(t *testing.T) {
+ a := Int(5)
+ sink = &a
+ i := interface{}(a)
+ a += 2
+ if got := i.(Int); got != 5 {
+ t.Errorf("wanted 5, got %d\n", int(got))
+ }
+}
+
+func TestIfaceConv3(t *testing.T) {
+ y = 5
+ if got := i2Int3(y); got != 5 {
+ t.Errorf("wanted 5, got %d\n", int(got))
+ }
+}
+
+//go:noinline
+func i2Int3(i I) Int {
+ y = 7
+ return i.(Int)
+}
+
+func TestIfaceConv4(t *testing.T) {
+ a := Int(5)
+ if got := i2Int4(a, &a); got != 5 {
+ t.Errorf("wanted 5, got %d\n", int(got))
+ }
+}
+
+//go:noinline
+func i2Int4(i I, p *Int) Int {
+ *p = 7
+ return i.(Int)
+}
+
+func BenchmarkEfaceInteger(b *testing.B) {
+ sum := 0
+ for i := 0; i < b.N; i++ {
+ sum += i2int(i)
+ }
+ sink = sum
+}
+
+//go:noinline
+func i2int(i interface{}) int {
+ return i.(int)
+}
diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go
new file mode 100644
index 0000000..c0114d0
--- /dev/null
+++ b/src/cmd/compile/internal/gc/iimport.go
@@ -0,0 +1,1117 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See iexport.go for the export data format.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/bio"
+ "cmd/internal/goobj"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math/big"
+ "os"
+ "strings"
+)
+
+// An iimporterAndOffset identifies an importer and an offset within
+// its data section.
+type iimporterAndOffset struct {
+ p *iimporter
+ off uint64
+}
+
+var (
+ // declImporter maps from imported identifiers to an importer
+ // and offset where that identifier's declaration can be read.
+ declImporter = map[*types.Sym]iimporterAndOffset{}
+
+ // inlineImporter is like declImporter, but for inline bodies
+ // for function and method symbols.
+ inlineImporter = map[*types.Sym]iimporterAndOffset{}
+)
+
+func expandDecl(n *Node) {
+ if n.Op != ONONAME {
+ return
+ }
+
+ r := importReaderFor(n, declImporter)
+ if r == nil {
+ // Can happen if user tries to reference an undeclared name.
+ return
+ }
+
+ r.doDecl(n)
+}
+
+func expandInline(fn *Node) {
+ if fn.Func.Inl.Body != nil {
+ return
+ }
+
+ r := importReaderFor(fn, inlineImporter)
+ if r == nil {
+ Fatalf("missing import reader for %v", fn)
+ }
+
+ r.doInline(fn)
+}
+
+func importReaderFor(n *Node, importers map[*types.Sym]iimporterAndOffset) *importReader {
+ x, ok := importers[n.Sym]
+ if !ok {
+ return nil
+ }
+
+ return x.p.newReader(x.off, n.Sym.Pkg)
+}
+
+type intReader struct {
+ *bio.Reader
+ pkg *types.Pkg
+}
+
+func (r *intReader) int64() int64 {
+ i, err := binary.ReadVarint(r.Reader)
+ if err != nil {
+ yyerror("import %q: read error: %v", r.pkg.Path, err)
+ errorexit()
+ }
+ return i
+}
+
+func (r *intReader) uint64() uint64 {
+ i, err := binary.ReadUvarint(r.Reader)
+ if err != nil {
+ yyerror("import %q: read error: %v", r.pkg.Path, err)
+ errorexit()
+ }
+ return i
+}
+
+func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) {
+ ir := &intReader{in, pkg}
+
+ version := ir.uint64()
+ if version != iexportVersion {
+ yyerror("import %q: unknown export format version %d", pkg.Path, version)
+ errorexit()
+ }
+
+ sLen := ir.uint64()
+ dLen := ir.uint64()
+
+ // Map string (and data) section into memory as a single large
+ // string. This reduces heap fragmentation and allows
+ // returning individual substrings very efficiently.
+ data, err := mapFile(in.File(), in.Offset(), int64(sLen+dLen))
+ if err != nil {
+ yyerror("import %q: mapping input: %v", pkg.Path, err)
+ errorexit()
+ }
+ stringData := data[:sLen]
+ declData := data[sLen:]
+
+ in.MustSeek(int64(sLen+dLen), os.SEEK_CUR)
+
+ p := &iimporter{
+ ipkg: pkg,
+
+ pkgCache: map[uint64]*types.Pkg{},
+ posBaseCache: map[uint64]*src.PosBase{},
+ typCache: map[uint64]*types.Type{},
+
+ stringData: stringData,
+ declData: declData,
+ }
+
+ for i, pt := range predeclared() {
+ p.typCache[uint64(i)] = pt
+ }
+
+ // Declaration index.
+ for nPkgs := ir.uint64(); nPkgs > 0; nPkgs-- {
+ pkg := p.pkgAt(ir.uint64())
+ pkgName := p.stringAt(ir.uint64())
+ pkgHeight := int(ir.uint64())
+ if pkg.Name == "" {
+ pkg.Name = pkgName
+ pkg.Height = pkgHeight
+ numImport[pkgName]++
+
+ // TODO(mdempsky): This belongs somewhere else.
+ pkg.Lookup("_").Def = asTypesNode(nblank)
+ } else {
+ if pkg.Name != pkgName {
+ Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path)
+ }
+ if pkg.Height != pkgHeight {
+ Fatalf("conflicting package heights %v and %v for path %q", pkg.Height, pkgHeight, pkg.Path)
+ }
+ }
+
+ for nSyms := ir.uint64(); nSyms > 0; nSyms-- {
+ s := pkg.Lookup(p.stringAt(ir.uint64()))
+ off := ir.uint64()
+
+ if _, ok := declImporter[s]; ok {
+ continue
+ }
+ declImporter[s] = iimporterAndOffset{p, off}
+
+ // Create stub declaration. If used, this will
+ // be overwritten by expandDecl.
+ if s.Def != nil {
+ Fatalf("unexpected definition for %v: %v", s, asNode(s.Def))
+ }
+ s.Def = asTypesNode(npos(src.NoXPos, dclname(s)))
+ }
+ }
+
+ // Inline body index.
+ for nPkgs := ir.uint64(); nPkgs > 0; nPkgs-- {
+ pkg := p.pkgAt(ir.uint64())
+
+ for nSyms := ir.uint64(); nSyms > 0; nSyms-- {
+ s := pkg.Lookup(p.stringAt(ir.uint64()))
+ off := ir.uint64()
+
+ if _, ok := inlineImporter[s]; ok {
+ continue
+ }
+ inlineImporter[s] = iimporterAndOffset{p, off}
+ }
+ }
+
+ // Fingerprint.
+ _, err = io.ReadFull(in, fingerprint[:])
+ if err != nil {
+ yyerror("import %s: error reading fingerprint", pkg.Path)
+ errorexit()
+ }
+ return fingerprint
+}
+
+type iimporter struct {
+ ipkg *types.Pkg
+
+ pkgCache map[uint64]*types.Pkg
+ posBaseCache map[uint64]*src.PosBase
+ typCache map[uint64]*types.Type
+
+ stringData string
+ declData string
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+ var x [binary.MaxVarintLen64]byte
+ n := copy(x[:], p.stringData[off:])
+
+ slen, n := binary.Uvarint(x[:n])
+ if n <= 0 {
+ Fatalf("varint failed")
+ }
+ spos := off + uint64(n)
+ return p.stringData[spos : spos+slen]
+}
+
+func (p *iimporter) posBaseAt(off uint64) *src.PosBase {
+ if posBase, ok := p.posBaseCache[off]; ok {
+ return posBase
+ }
+
+ file := p.stringAt(off)
+ posBase := src.NewFileBase(file, file)
+ p.posBaseCache[off] = posBase
+ return posBase
+}
+
+func (p *iimporter) pkgAt(off uint64) *types.Pkg {
+ if pkg, ok := p.pkgCache[off]; ok {
+ return pkg
+ }
+
+ pkg := p.ipkg
+ if pkgPath := p.stringAt(off); pkgPath != "" {
+ pkg = types.NewPkg(pkgPath, "")
+ }
+ p.pkgCache[off] = pkg
+ return pkg
+}
+
+// An importReader keeps state for reading an individual imported
+// object (declaration or inline body).
+type importReader struct {
+ strings.Reader
+ p *iimporter
+
+ currPkg *types.Pkg
+ prevBase *src.PosBase
+ prevLine int64
+ prevColumn int64
+}
+
+func (p *iimporter) newReader(off uint64, pkg *types.Pkg) *importReader {
+ r := &importReader{
+ p: p,
+ currPkg: pkg,
+ }
+ // (*strings.Reader).Reset wasn't added until Go 1.7, and we
+ // need to build with Go 1.4.
+ r.Reader = *strings.NewReader(p.declData[off:])
+ return r
+}
+
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+func (r *importReader) posBase() *src.PosBase { return r.p.posBaseAt(r.uint64()) }
+func (r *importReader) pkg() *types.Pkg { return r.p.pkgAt(r.uint64()) }
+
+func (r *importReader) setPkg() {
+ r.currPkg = r.pkg()
+}
+
+func (r *importReader) doDecl(n *Node) {
+ if n.Op != ONONAME {
+ Fatalf("doDecl: unexpected Op for %v: %v", n.Sym, n.Op)
+ }
+
+ tag := r.byte()
+ pos := r.pos()
+
+ switch tag {
+ case 'A':
+ typ := r.typ()
+
+ importalias(r.p.ipkg, pos, n.Sym, typ)
+
+ case 'C':
+ typ, val := r.value()
+
+ importconst(r.p.ipkg, pos, n.Sym, typ, val)
+
+ case 'F':
+ typ := r.signature(nil)
+
+ importfunc(r.p.ipkg, pos, n.Sym, typ)
+ r.funcExt(n)
+
+ case 'T':
+ // Types can be recursive. We need to setup a stub
+ // declaration before recursing.
+ t := importtype(r.p.ipkg, pos, n.Sym)
+
+ // We also need to defer width calculations until
+ // after the underlying type has been assigned.
+ defercheckwidth()
+ underlying := r.typ()
+ setUnderlying(t, underlying)
+ resumecheckwidth()
+
+ if underlying.IsInterface() {
+ r.typeExt(t)
+ break
+ }
+
+ ms := make([]*types.Field, r.uint64())
+ for i := range ms {
+ mpos := r.pos()
+ msym := r.ident()
+ recv := r.param()
+ mtyp := r.signature(recv)
+
+ f := types.NewField()
+ f.Pos = mpos
+ f.Sym = msym
+ f.Type = mtyp
+ ms[i] = f
+
+ m := newfuncnamel(mpos, methodSym(recv.Type, msym))
+ m.Type = mtyp
+ m.SetClass(PFUNC)
+ // methodSym already marked m.Sym as a function.
+
+ // (comment from parser.go)
+ // inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
+ // (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
+ // out by typecheck's lookdot as this $$.ttype. So by providing
+ // this back link here we avoid special casing there.
+ mtyp.SetNname(asTypesNode(m))
+ }
+ t.Methods().Set(ms)
+
+ r.typeExt(t)
+ for _, m := range ms {
+ r.methExt(m)
+ }
+
+ case 'V':
+ typ := r.typ()
+
+ importvar(r.p.ipkg, pos, n.Sym, typ)
+ r.varExt(n)
+
+ default:
+ Fatalf("unexpected tag: %v", tag)
+ }
+}
+
+func (p *importReader) value() (typ *types.Type, v Val) {
+ typ = p.typ()
+
+ switch constTypeOf(typ) {
+ case CTNIL:
+ v.U = &NilVal{}
+ case CTBOOL:
+ v.U = p.bool()
+ case CTSTR:
+ v.U = p.string()
+ case CTINT:
+ x := new(Mpint)
+ x.Rune = typ == types.UntypedRune
+ p.mpint(&x.Val, typ)
+ v.U = x
+ case CTFLT:
+ x := newMpflt()
+ p.float(x, typ)
+ v.U = x
+ case CTCPLX:
+ x := newMpcmplx()
+ p.float(&x.Real, typ)
+ p.float(&x.Imag, typ)
+ v.U = x
+ }
+ return
+}
+
+func (p *importReader) mpint(x *big.Int, typ *types.Type) {
+ signed, maxBytes := intSize(typ)
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ n, _ := p.ReadByte()
+ if uint(n) < maxSmall {
+ v := int64(n)
+ if signed {
+ v >>= 1
+ if n&1 != 0 {
+ v = ^v
+ }
+ }
+ x.SetInt64(v)
+ return
+ }
+
+ v := -n
+ if signed {
+ v = -(n &^ 1) >> 1
+ }
+ if v < 1 || uint(v) > maxBytes {
+ Fatalf("weird decoding: %v, %v => %v", n, signed, v)
+ }
+ b := make([]byte, v)
+ p.Read(b)
+ x.SetBytes(b)
+ if signed && n&1 != 0 {
+ x.Neg(x)
+ }
+}
+
+func (p *importReader) float(x *Mpflt, typ *types.Type) {
+ var mant big.Int
+ p.mpint(&mant, typ)
+ m := x.Val.SetInt(&mant)
+ if m.Sign() == 0 {
+ return
+ }
+ m.SetMantExp(m, int(p.int64()))
+}
+
+func (r *importReader) ident() *types.Sym {
+ name := r.string()
+ if name == "" {
+ return nil
+ }
+ pkg := r.currPkg
+ if types.IsExported(name) {
+ pkg = localpkg
+ }
+ return pkg.Lookup(name)
+}
+
+func (r *importReader) qualifiedIdent() *types.Sym {
+ name := r.string()
+ pkg := r.pkg()
+ return pkg.Lookup(name)
+}
+
+func (r *importReader) pos() src.XPos {
+ delta := r.int64()
+ r.prevColumn += delta >> 1
+ if delta&1 != 0 {
+ delta = r.int64()
+ r.prevLine += delta >> 1
+ if delta&1 != 0 {
+ r.prevBase = r.posBase()
+ }
+ }
+
+ if (r.prevBase == nil || r.prevBase.AbsFilename() == "") && r.prevLine == 0 && r.prevColumn == 0 {
+ // TODO(mdempsky): Remove once we reliably write
+ // position information for all nodes.
+ return src.NoXPos
+ }
+
+ if r.prevBase == nil {
+ Fatalf("missing posbase")
+ }
+ pos := src.MakePos(r.prevBase, uint(r.prevLine), uint(r.prevColumn))
+ return Ctxt.PosTable.XPos(pos)
+}
+
+func (r *importReader) typ() *types.Type {
+ return r.p.typAt(r.uint64())
+}
+
+func (p *iimporter) typAt(off uint64) *types.Type {
+ t, ok := p.typCache[off]
+ if !ok {
+ if off < predeclReserved {
+ Fatalf("predeclared type missing from cache: %d", off)
+ }
+ t = p.newReader(off-predeclReserved, nil).typ1()
+ p.typCache[off] = t
+ }
+ return t
+}
+
+func (r *importReader) typ1() *types.Type {
+ switch k := r.kind(); k {
+ default:
+ Fatalf("unexpected kind tag in %q: %v", r.p.ipkg.Path, k)
+ return nil
+
+ case definedType:
+ // We might be called from within doInline, in which
+ // case Sym.Def can point to declared parameters
+ // instead of the top-level types. Also, we don't
+ // support inlining functions with local defined
+ // types. Therefore, this must be a package-scope
+ // type.
+ n := asNode(r.qualifiedIdent().PkgDef())
+ if n.Op == ONONAME {
+ expandDecl(n)
+ }
+ if n.Op != OTYPE {
+ Fatalf("expected OTYPE, got %v: %v, %v", n.Op, n.Sym, n)
+ }
+ return n.Type
+ case pointerType:
+ return types.NewPtr(r.typ())
+ case sliceType:
+ return types.NewSlice(r.typ())
+ case arrayType:
+ n := r.uint64()
+ return types.NewArray(r.typ(), int64(n))
+ case chanType:
+ dir := types.ChanDir(r.uint64())
+ return types.NewChan(r.typ(), dir)
+ case mapType:
+ return types.NewMap(r.typ(), r.typ())
+
+ case signatureType:
+ r.setPkg()
+ return r.signature(nil)
+
+ case structType:
+ r.setPkg()
+
+ fs := make([]*types.Field, r.uint64())
+ for i := range fs {
+ pos := r.pos()
+ sym := r.ident()
+ typ := r.typ()
+ emb := r.bool()
+ note := r.string()
+
+ f := types.NewField()
+ f.Pos = pos
+ f.Sym = sym
+ f.Type = typ
+ if emb {
+ f.Embedded = 1
+ }
+ f.Note = note
+ fs[i] = f
+ }
+
+ t := types.New(TSTRUCT)
+ t.SetPkg(r.currPkg)
+ t.SetFields(fs)
+ return t
+
+ case interfaceType:
+ r.setPkg()
+
+ embeddeds := make([]*types.Field, r.uint64())
+ for i := range embeddeds {
+ pos := r.pos()
+ typ := r.typ()
+
+ f := types.NewField()
+ f.Pos = pos
+ f.Type = typ
+ embeddeds[i] = f
+ }
+
+ methods := make([]*types.Field, r.uint64())
+ for i := range methods {
+ pos := r.pos()
+ sym := r.ident()
+ typ := r.signature(fakeRecvField())
+
+ f := types.NewField()
+ f.Pos = pos
+ f.Sym = sym
+ f.Type = typ
+ methods[i] = f
+ }
+
+ t := types.New(TINTER)
+ t.SetPkg(r.currPkg)
+ t.SetInterface(append(embeddeds, methods...))
+
+ // Ensure we expand the interface in the frontend (#25055).
+ checkwidth(t)
+ return t
+ }
+}
+
+func (r *importReader) kind() itag {
+ return itag(r.uint64())
+}
+
+func (r *importReader) signature(recv *types.Field) *types.Type {
+ params := r.paramList()
+ results := r.paramList()
+ if n := len(params); n > 0 {
+ params[n-1].SetIsDDD(r.bool())
+ }
+ t := functypefield(recv, params, results)
+ t.SetPkg(r.currPkg)
+ return t
+}
+
+func (r *importReader) paramList() []*types.Field {
+ fs := make([]*types.Field, r.uint64())
+ for i := range fs {
+ fs[i] = r.param()
+ }
+ return fs
+}
+
+func (r *importReader) param() *types.Field {
+ f := types.NewField()
+ f.Pos = r.pos()
+ f.Sym = r.ident()
+ f.Type = r.typ()
+ return f
+}
+
+func (r *importReader) bool() bool {
+ return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+ n, err := binary.ReadVarint(r)
+ if err != nil {
+ Fatalf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) uint64() uint64 {
+ n, err := binary.ReadUvarint(r)
+ if err != nil {
+ Fatalf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) byte() byte {
+ x, err := r.ReadByte()
+ if err != nil {
+ Fatalf("declReader.ReadByte: %v", err)
+ }
+ return x
+}
+
+// Compiler-specific extensions.
+
+func (r *importReader) varExt(n *Node) {
+ r.linkname(n.Sym)
+ r.symIdx(n.Sym)
+}
+
+func (r *importReader) funcExt(n *Node) {
+ r.linkname(n.Sym)
+ r.symIdx(n.Sym)
+
+ // Escape analysis.
+ for _, fs := range &types.RecvsParams {
+ for _, f := range fs(n.Type).FieldSlice() {
+ f.Note = r.string()
+ }
+ }
+
+ // Inline body.
+ if u := r.uint64(); u > 0 {
+ n.Func.Inl = &Inline{
+ Cost: int32(u - 1),
+ }
+ n.Func.Endlineno = r.pos()
+ }
+}
+
+func (r *importReader) methExt(m *types.Field) {
+ if r.bool() {
+ m.SetNointerface(true)
+ }
+ r.funcExt(asNode(m.Type.Nname()))
+}
+
+func (r *importReader) linkname(s *types.Sym) {
+ s.Linkname = r.string()
+}
+
+func (r *importReader) symIdx(s *types.Sym) {
+ lsym := s.Linksym()
+ idx := int32(r.int64())
+ if idx != -1 {
+ if s.Linkname != "" {
+ Fatalf("bad index for linknamed symbol: %v %d\n", lsym, idx)
+ }
+ lsym.SymIdx = idx
+ lsym.Set(obj.AttrIndexed, true)
+ }
+}
+
+func (r *importReader) typeExt(t *types.Type) {
+ t.SetNotInHeap(r.bool())
+ i, pi := r.int64(), r.int64()
+ if i != -1 && pi != -1 {
+ typeSymIdx[t] = [2]int64{i, pi}
+ }
+}
+
+// Map imported type T to the index of type descriptor symbols of T and *T,
+// so we can use index to reference the symbol.
+var typeSymIdx = make(map[*types.Type][2]int64)
+
+func (r *importReader) doInline(n *Node) {
+ if len(n.Func.Inl.Body) != 0 {
+ Fatalf("%v already has inline body", n)
+ }
+
+ funchdr(n)
+ body := r.stmtList()
+ funcbody()
+ if body == nil {
+ //
+ // Make sure empty body is not interpreted as
+ // no inlineable body (see also parser.fnbody)
+ // (not doing so can cause significant performance
+ // degradation due to unnecessary calls to empty
+ // functions).
+ body = []*Node{}
+ }
+ n.Func.Inl.Body = body
+
+ importlist = append(importlist, n)
+
+ if Debug.E > 0 && Debug.m > 2 {
+ if Debug.m > 3 {
+ fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type, asNodes(n.Func.Inl.Body))
+ } else {
+ fmt.Printf("inl body for %v %#v: %v\n", n, n.Type, asNodes(n.Func.Inl.Body))
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Inlined function bodies
+
+// Approach: Read nodes and use them to create/declare the same data structures
+// as done originally by the (hidden) parser by closely following the parser's
+// original code. In other words, "parsing" the import data (which happens to
+// be encoded in binary rather textual form) is the best way at the moment to
+// re-establish the syntax tree's invariants. At some future point we might be
+// able to avoid this round-about way and create the rewritten nodes directly,
+// possibly avoiding a lot of duplicate work (name resolution, type checking).
+//
+// Refined nodes (e.g., ODOTPTR as a refinement of OXDOT) are exported as their
+// unrefined nodes (since this is what the importer uses). The respective case
+// entries are unreachable in the importer.
+
+func (r *importReader) stmtList() []*Node {
+ var list []*Node
+ for {
+ n := r.node()
+ if n == nil {
+ break
+ }
+ // OBLOCK nodes may be created when importing ODCL nodes - unpack them
+ if n.Op == OBLOCK {
+ list = append(list, n.List.Slice()...)
+ } else {
+ list = append(list, n)
+ }
+
+ }
+ return list
+}
+
+func (r *importReader) caseList(sw *Node) []*Node {
+ namedTypeSwitch := sw.Op == OSWITCH && sw.Left != nil && sw.Left.Op == OTYPESW && sw.Left.Left != nil
+
+ cases := make([]*Node, r.uint64())
+ for i := range cases {
+ cas := nodl(r.pos(), OCASE, nil, nil)
+ cas.List.Set(r.stmtList())
+ if namedTypeSwitch {
+ // Note: per-case variables will have distinct, dotted
+ // names after import. That's okay: swt.go only needs
+ // Sym for diagnostics anyway.
+ caseVar := newnamel(cas.Pos, r.ident())
+ declare(caseVar, dclcontext)
+ cas.Rlist.Set1(caseVar)
+ caseVar.Name.Defn = sw.Left
+ }
+ cas.Nbody.Set(r.stmtList())
+ cases[i] = cas
+ }
+ return cases
+}
+
+func (r *importReader) exprList() []*Node {
+ var list []*Node
+ for {
+ n := r.expr()
+ if n == nil {
+ break
+ }
+ list = append(list, n)
+ }
+ return list
+}
+
+func (r *importReader) expr() *Node {
+ n := r.node()
+ if n != nil && n.Op == OBLOCK {
+ Fatalf("unexpected block node: %v", n)
+ }
+ return n
+}
+
+// TODO(gri) split into expr and stmt
+func (r *importReader) node() *Node {
+ switch op := r.op(); op {
+ // expressions
+ // case OPAREN:
+ // unreachable - unpacked by exporter
+
+ case OLITERAL:
+ pos := r.pos()
+ typ, val := r.value()
+
+ n := npos(pos, nodlit(val))
+ n.Type = typ
+ return n
+
+ case ONONAME:
+ return mkname(r.qualifiedIdent())
+
+ case ONAME:
+ return mkname(r.ident())
+
+ // case OPACK, ONONAME:
+ // unreachable - should have been resolved by typechecking
+
+ case OTYPE:
+ return typenod(r.typ())
+
+ case OTYPESW:
+ n := nodl(r.pos(), OTYPESW, nil, nil)
+ if s := r.ident(); s != nil {
+ n.Left = npos(n.Pos, newnoname(s))
+ }
+ n.Right, _ = r.exprsOrNil()
+ return n
+
+ // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
+ // unreachable - should have been resolved by typechecking
+
+ // case OCLOSURE:
+ // unimplemented
+
+ // case OPTRLIT:
+ // unreachable - mapped to case OADDR below by exporter
+
+ case OSTRUCTLIT:
+ // TODO(mdempsky): Export position information for OSTRUCTKEY nodes.
+ savedlineno := lineno
+ lineno = r.pos()
+ n := nodl(lineno, OCOMPLIT, nil, typenod(r.typ()))
+ n.List.Set(r.elemList()) // special handling of field names
+ lineno = savedlineno
+ return n
+
+ // case OARRAYLIT, OSLICELIT, OMAPLIT:
+ // unreachable - mapped to case OCOMPLIT below by exporter
+
+ case OCOMPLIT:
+ n := nodl(r.pos(), OCOMPLIT, nil, typenod(r.typ()))
+ n.List.Set(r.exprList())
+ return n
+
+ case OKEY:
+ pos := r.pos()
+ left, right := r.exprsOrNil()
+ return nodl(pos, OKEY, left, right)
+
+ // case OSTRUCTKEY:
+ // unreachable - handled in case OSTRUCTLIT by elemList
+
+ // case OCALLPART:
+ // unreachable - mapped to case OXDOT below by exporter
+
+ // case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
+ // unreachable - mapped to case OXDOT below by exporter
+
+ case OXDOT:
+ // see parser.new_dotname
+ return npos(r.pos(), nodSym(OXDOT, r.expr(), r.ident()))
+
+ // case ODOTTYPE, ODOTTYPE2:
+ // unreachable - mapped to case ODOTTYPE below by exporter
+
+ case ODOTTYPE:
+ n := nodl(r.pos(), ODOTTYPE, r.expr(), nil)
+ n.Type = r.typ()
+ return n
+
+ // case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
+ // unreachable - mapped to cases below by exporter
+
+ case OINDEX:
+ return nodl(r.pos(), op, r.expr(), r.expr())
+
+ case OSLICE, OSLICE3:
+ n := nodl(r.pos(), op, r.expr(), nil)
+ low, high := r.exprsOrNil()
+ var max *Node
+ if n.Op.IsSlice3() {
+ max = r.expr()
+ }
+ n.SetSliceBounds(low, high, max)
+ return n
+
+ // case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, ORUNES2STR, OSTR2BYTES, OSTR2RUNES, ORUNESTR:
+ // unreachable - mapped to OCONV case below by exporter
+
+ case OCONV:
+ n := nodl(r.pos(), OCONV, r.expr(), nil)
+ n.Type = r.typ()
+ return n
+
+ case OCOPY, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN:
+ n := npos(r.pos(), builtinCall(op))
+ n.List.Set(r.exprList())
+ if op == OAPPEND {
+ n.SetIsDDD(r.bool())
+ }
+ return n
+
+ // case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
+ // unreachable - mapped to OCALL case below by exporter
+
+ case OCALL:
+ n := nodl(r.pos(), OCALL, nil, nil)
+ n.Ninit.Set(r.stmtList())
+ n.Left = r.expr()
+ n.List.Set(r.exprList())
+ n.SetIsDDD(r.bool())
+ return n
+
+ case OMAKEMAP, OMAKECHAN, OMAKESLICE:
+ n := npos(r.pos(), builtinCall(OMAKE))
+ n.List.Append(typenod(r.typ()))
+ n.List.Append(r.exprList()...)
+ return n
+
+ // unary expressions
+ case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
+ return nodl(r.pos(), op, r.expr(), nil)
+
+ // binary expressions
+ case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
+ OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR:
+ return nodl(r.pos(), op, r.expr(), r.expr())
+
+ case OADDSTR:
+ pos := r.pos()
+ list := r.exprList()
+ x := npos(pos, list[0])
+ for _, y := range list[1:] {
+ x = nodl(pos, OADD, x, y)
+ }
+ return x
+
+ // --------------------------------------------------------------------
+ // statements
+ case ODCL:
+ pos := r.pos()
+ lhs := npos(pos, dclname(r.ident()))
+ typ := typenod(r.typ())
+ return npos(pos, liststmt(variter([]*Node{lhs}, typ, nil))) // TODO(gri) avoid list creation
+
+ // case ODCLFIELD:
+ // unimplemented
+
+ // case OAS, OASWB:
+ // unreachable - mapped to OAS case below by exporter
+
+ case OAS:
+ return nodl(r.pos(), OAS, r.expr(), r.expr())
+
+ case OASOP:
+ n := nodl(r.pos(), OASOP, nil, nil)
+ n.SetSubOp(r.op())
+ n.Left = r.expr()
+ if !r.bool() {
+ n.Right = nodintconst(1)
+ n.SetImplicit(true)
+ } else {
+ n.Right = r.expr()
+ }
+ return n
+
+ // case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ // unreachable - mapped to OAS2 case below by exporter
+
+ case OAS2:
+ n := nodl(r.pos(), OAS2, nil, nil)
+ n.List.Set(r.exprList())
+ n.Rlist.Set(r.exprList())
+ return n
+
+ case ORETURN:
+ n := nodl(r.pos(), ORETURN, nil, nil)
+ n.List.Set(r.exprList())
+ return n
+
+ // case ORETJMP:
+ // unreachable - generated by compiler for trampolin routines (not exported)
+
+ case OGO, ODEFER:
+ return nodl(r.pos(), op, r.expr(), nil)
+
+ case OIF:
+ n := nodl(r.pos(), OIF, nil, nil)
+ n.Ninit.Set(r.stmtList())
+ n.Left = r.expr()
+ n.Nbody.Set(r.stmtList())
+ n.Rlist.Set(r.stmtList())
+ return n
+
+ case OFOR:
+ n := nodl(r.pos(), OFOR, nil, nil)
+ n.Ninit.Set(r.stmtList())
+ n.Left, n.Right = r.exprsOrNil()
+ n.Nbody.Set(r.stmtList())
+ return n
+
+ case ORANGE:
+ n := nodl(r.pos(), ORANGE, nil, nil)
+ n.List.Set(r.stmtList())
+ n.Right = r.expr()
+ n.Nbody.Set(r.stmtList())
+ return n
+
+ case OSELECT, OSWITCH:
+ n := nodl(r.pos(), op, nil, nil)
+ n.Ninit.Set(r.stmtList())
+ n.Left, _ = r.exprsOrNil()
+ n.List.Set(r.caseList(n))
+ return n
+
+ // case OCASE:
+ // handled by caseList
+
+ case OFALL:
+ n := nodl(r.pos(), OFALL, nil, nil)
+ return n
+
+ case OBREAK, OCONTINUE:
+ pos := r.pos()
+ left, _ := r.exprsOrNil()
+ if left != nil {
+ left = newname(left.Sym)
+ }
+ return nodl(pos, op, left, nil)
+
+ // case OEMPTY:
+ // unreachable - not emitted by exporter
+
+ case OGOTO, OLABEL:
+ n := nodl(r.pos(), op, nil, nil)
+ n.Sym = lookup(r.string())
+ return n
+
+ case OEND:
+ return nil
+
+ default:
+ Fatalf("cannot import %v (%d) node\n"+
+ "\t==> please file an issue and assign to gri@", op, int(op))
+ panic("unreachable") // satisfy compiler
+ }
+}
+
+func (r *importReader) op() Op {
+ return Op(r.uint64())
+}
+
+func (r *importReader) elemList() []*Node {
+ c := r.uint64()
+ list := make([]*Node, c)
+ for i := range list {
+ s := r.ident()
+ list[i] = nodSym(OSTRUCTKEY, r.expr(), s)
+ }
+ return list
+}
+
+func (r *importReader) exprsOrNil() (a, b *Node) {
+ ab := r.uint64()
+ if ab&1 != 0 {
+ a = r.expr()
+ }
+ if ab&2 != 0 {
+ b = r.node()
+ }
+ return
+}
diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go
new file mode 100644
index 0000000..ec9cc4b
--- /dev/null
+++ b/src/cmd/compile/internal/gc/init.go
@@ -0,0 +1,109 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+// A function named init is a special case.
+// It is called by the initialization before main is run.
+// To make it unique within a package and also uncallable,
+// the name, normally "pkg.init", is altered to "pkg.init.0".
+var renameinitgen int
+
+// Dummy function for autotmps generated during typechecking.
+var dummyInitFn = nod(ODCLFUNC, nil, nil)
+
+func renameinit() *types.Sym {
+ s := lookupN("init.", renameinitgen)
+ renameinitgen++
+ return s
+}
+
+// fninit makes an initialization record for the package.
+// See runtime/proc.go:initTask for its layout.
+// The 3 tasks for initialization are:
+// 1) Initialize all of the packages the current package depends on.
+// 2) Initialize all the variables that have initializers.
+// 3) Run any init functions.
+func fninit(n []*Node) {
+ nf := initOrder(n)
+
+ var deps []*obj.LSym // initTask records for packages the current package depends on
+ var fns []*obj.LSym // functions to call for package initialization
+
+ // Find imported packages with init tasks.
+ for _, s := range types.InitSyms {
+ deps = append(deps, s.Linksym())
+ }
+
+ // Make a function that contains all the initialization statements.
+ if len(nf) > 0 {
+ lineno = nf[0].Pos // prolog/epilog gets line number of first init stmt
+ initializers := lookup("init")
+ fn := dclfunc(initializers, nod(OTFUNC, nil, nil))
+ for _, dcl := range dummyInitFn.Func.Dcl {
+ dcl.Name.Curfn = fn
+ }
+ fn.Func.Dcl = append(fn.Func.Dcl, dummyInitFn.Func.Dcl...)
+ dummyInitFn.Func.Dcl = nil
+
+ fn.Nbody.Set(nf)
+ funcbody()
+
+ fn = typecheck(fn, ctxStmt)
+ Curfn = fn
+ typecheckslice(nf, ctxStmt)
+ Curfn = nil
+ xtop = append(xtop, fn)
+ fns = append(fns, initializers.Linksym())
+ }
+ if dummyInitFn.Func.Dcl != nil {
+ // We only generate temps using dummyInitFn if there
+ // are package-scope initialization statements, so
+ // something's weird if we get here.
+ Fatalf("dummyInitFn still has declarations")
+ }
+ dummyInitFn = nil
+
+ // Record user init functions.
+ for i := 0; i < renameinitgen; i++ {
+ s := lookupN("init.", i)
+ fn := asNode(s.Def).Name.Defn
+ // Skip init functions with empty bodies.
+ if fn.Nbody.Len() == 1 && fn.Nbody.First().Op == OEMPTY {
+ continue
+ }
+ fns = append(fns, s.Linksym())
+ }
+
+ if len(deps) == 0 && len(fns) == 0 && localpkg.Name != "main" && localpkg.Name != "runtime" {
+ return // nothing to initialize
+ }
+
+ // Make an .inittask structure.
+ sym := lookup(".inittask")
+ nn := newname(sym)
+ nn.Type = types.Types[TUINT8] // dummy type
+ nn.SetClass(PEXTERN)
+ sym.Def = asTypesNode(nn)
+ exportsym(nn)
+ lsym := sym.Linksym()
+ ot := 0
+ ot = duintptr(lsym, ot, 0) // state: not initialized yet
+ ot = duintptr(lsym, ot, uint64(len(deps)))
+ ot = duintptr(lsym, ot, uint64(len(fns)))
+ for _, d := range deps {
+ ot = dsymptr(lsym, ot, d, 0)
+ }
+ for _, f := range fns {
+ ot = dsymptr(lsym, ot, f, 0)
+ }
+ // An initTask has pointers, but none into the Go heap.
+ // It's not quite read only, the state field must be modifiable.
+ ggloblsym(lsym, int32(ot), obj.NOPTR)
+}
diff --git a/src/cmd/compile/internal/gc/initorder.go b/src/cmd/compile/internal/gc/initorder.go
new file mode 100644
index 0000000..e2084fd
--- /dev/null
+++ b/src/cmd/compile/internal/gc/initorder.go
@@ -0,0 +1,358 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "container/heap"
+ "fmt"
+)
+
+// Package initialization
+//
+// Here we implement the algorithm for ordering package-level variable
+// initialization. The spec is written in terms of variable
+// initialization, but multiple variables initialized by a single
+// assignment are handled together, so here we instead focus on
+// ordering initialization assignments. Conveniently, this maps well
+// to how we represent package-level initializations using the Node
+// AST.
+//
+// Assignments are in one of three phases: NotStarted, Pending, or
+// Done. For assignments in the Pending phase, we use Xoffset to
+// record the number of unique variable dependencies whose
+// initialization assignment is not yet Done. We also maintain a
+// "blocking" map that maps assignments back to all of the assignments
+// that depend on it.
+//
+// For example, for an initialization like:
+//
+// var x = f(a, b, b)
+// var a, b = g()
+//
+// the "x = f(a, b, b)" assignment depends on two variables (a and b),
+// so its Xoffset will be 2. Correspondingly, the "a, b = g()"
+// assignment's "blocking" entry will have two entries back to x's
+// assignment.
+//
+// Logically, initialization works by (1) taking all NotStarted
+// assignments, calculating their dependencies, and marking them
+// Pending; (2) adding all Pending assignments with Xoffset==0 to a
+// "ready" priority queue (ordered by variable declaration position);
+// and (3) iteratively processing the next Pending assignment from the
+// queue, decreasing the Xoffset of assignments it's blocking, and
+// adding them to the queue if decremented to 0.
+//
+// As an optimization, we actually apply each of these three steps for
+// each assignment. This yields the same order, but keeps queue size
+// down and thus also heap operation costs.
+
+// Static initialization phase.
+// These values are stored in two bits in Node.flags.
+const (
+ InitNotStarted = iota
+ InitDone
+ InitPending
+)
+
+type InitOrder struct {
+ // blocking maps initialization assignments to the assignments
+ // that depend on it.
+ blocking map[*Node][]*Node
+
+ // ready is the queue of Pending initialization assignments
+ // that are ready for initialization.
+ ready declOrder
+}
+
+// initOrder computes initialization order for a list l of
+// package-level declarations (in declaration order) and outputs the
+// corresponding list of statements to include in the init() function
+// body.
+func initOrder(l []*Node) []*Node {
+ s := InitSchedule{
+ initplans: make(map[*Node]*InitPlan),
+ inittemps: make(map[*Node]*Node),
+ }
+ o := InitOrder{
+ blocking: make(map[*Node][]*Node),
+ }
+
+ // Process all package-level assignment in declaration order.
+ for _, n := range l {
+ switch n.Op {
+ case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ o.processAssign(n)
+ o.flushReady(s.staticInit)
+ case ODCLCONST, ODCLFUNC, ODCLTYPE:
+ // nop
+ default:
+ Fatalf("unexpected package-level statement: %v", n)
+ }
+ }
+
+ // Check that all assignments are now Done; if not, there must
+ // have been a dependency cycle.
+ for _, n := range l {
+ switch n.Op {
+ case OAS, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ if n.Initorder() != InitDone {
+ // If there have already been errors
+ // printed, those errors may have
+ // confused us and there might not be
+ // a loop. Let the user fix those
+ // first.
+ if nerrors > 0 {
+ errorexit()
+ }
+
+ findInitLoopAndExit(firstLHS(n), new([]*Node), make(map[*Node]bool))
+ Fatalf("initialization unfinished, but failed to identify loop")
+ }
+ }
+ }
+
+ // Invariant consistency check. If this is non-zero, then we
+ // should have found a cycle above.
+ if len(o.blocking) != 0 {
+ Fatalf("expected empty map: %v", o.blocking)
+ }
+
+ return s.out
+}
+
+func (o *InitOrder) processAssign(n *Node) {
+ if n.Initorder() != InitNotStarted || n.Xoffset != BADWIDTH {
+ Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
+ }
+
+ n.SetInitorder(InitPending)
+ n.Xoffset = 0
+
+ // Compute number of variable dependencies and build the
+ // inverse dependency ("blocking") graph.
+ for dep := range collectDeps(n, true) {
+ defn := dep.Name.Defn
+ // Skip dependencies on functions (PFUNC) and
+ // variables already initialized (InitDone).
+ if dep.Class() != PEXTERN || defn.Initorder() == InitDone {
+ continue
+ }
+ n.Xoffset++
+ o.blocking[defn] = append(o.blocking[defn], n)
+ }
+
+ if n.Xoffset == 0 {
+ heap.Push(&o.ready, n)
+ }
+}
+
+// flushReady repeatedly applies initialize to the earliest (in
+// declaration order) assignment ready for initialization and updates
+// the inverse dependency ("blocking") graph.
+func (o *InitOrder) flushReady(initialize func(*Node)) {
+ for o.ready.Len() != 0 {
+ n := heap.Pop(&o.ready).(*Node)
+ if n.Initorder() != InitPending || n.Xoffset != 0 {
+ Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
+ }
+
+ initialize(n)
+ n.SetInitorder(InitDone)
+ n.Xoffset = BADWIDTH
+
+ blocked := o.blocking[n]
+ delete(o.blocking, n)
+
+ for _, m := range blocked {
+ m.Xoffset--
+ if m.Xoffset == 0 {
+ heap.Push(&o.ready, m)
+ }
+ }
+ }
+}
+
+// findInitLoopAndExit searches for an initialization loop involving variable
+// or function n. If one is found, it reports the loop as an error and exits.
+//
+// path points to a slice used for tracking the sequence of
+// variables/functions visited. Using a pointer to a slice allows the
+// slice capacity to grow and limit reallocations.
+func findInitLoopAndExit(n *Node, path *[]*Node, ok map[*Node]bool) {
+ for i, x := range *path {
+ if x == n {
+ reportInitLoopAndExit((*path)[i:])
+ return
+ }
+ }
+
+ // There might be multiple loops involving n; by sorting
+ // references, we deterministically pick the one reported.
+ refers := collectDeps(n.Name.Defn, false).Sorted(func(ni, nj *Node) bool {
+ return ni.Pos.Before(nj.Pos)
+ })
+
+ *path = append(*path, n)
+ for _, ref := range refers {
+ // Short-circuit variables that were initialized.
+ if ref.Class() == PEXTERN && ref.Name.Defn.Initorder() == InitDone || ok[ref] {
+ continue
+ }
+ findInitLoopAndExit(ref, path, ok)
+ }
+
+ // n is not involved in a cycle.
+ // Record that fact to avoid checking it again when reached another way,
+ // or else this traversal will take exponential time traversing all paths
+ // through the part of the package's call graph implicated in the cycle.
+ ok[n] = true
+
+ *path = (*path)[:len(*path)-1]
+}
+
+// reportInitLoopAndExit reports and initialization loop as an error
+// and exits. However, if l is not actually an initialization loop, it
+// simply returns instead.
+func reportInitLoopAndExit(l []*Node) {
+ // Rotate loop so that the earliest variable declaration is at
+ // the start.
+ i := -1
+ for j, n := range l {
+ if n.Class() == PEXTERN && (i == -1 || n.Pos.Before(l[i].Pos)) {
+ i = j
+ }
+ }
+ if i == -1 {
+ // False positive: loop only involves recursive
+ // functions. Return so that findInitLoop can continue
+ // searching.
+ return
+ }
+ l = append(l[i:], l[:i]...)
+
+ // TODO(mdempsky): Method values are printed as "T.m-fm"
+ // rather than "T.m". Figure out how to avoid that.
+
+ var msg bytes.Buffer
+ fmt.Fprintf(&msg, "initialization loop:\n")
+ for _, n := range l {
+ fmt.Fprintf(&msg, "\t%v: %v refers to\n", n.Line(), n)
+ }
+ fmt.Fprintf(&msg, "\t%v: %v", l[0].Line(), l[0])
+
+ yyerrorl(l[0].Pos, msg.String())
+ errorexit()
+}
+
+// collectDeps returns all of the package-level functions and
+// variables that declaration n depends on. If transitive is true,
+// then it also includes the transitive dependencies of any depended
+// upon functions (but not variables).
+func collectDeps(n *Node, transitive bool) NodeSet {
+ d := initDeps{transitive: transitive}
+ switch n.Op {
+ case OAS:
+ d.inspect(n.Right)
+ case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ d.inspect(n.Right)
+ case ODCLFUNC:
+ d.inspectList(n.Nbody)
+ default:
+ Fatalf("unexpected Op: %v", n.Op)
+ }
+ return d.seen
+}
+
+type initDeps struct {
+ transitive bool
+ seen NodeSet
+}
+
+func (d *initDeps) inspect(n *Node) { inspect(n, d.visit) }
+func (d *initDeps) inspectList(l Nodes) { inspectList(l, d.visit) }
+
+// visit calls foundDep on any package-level functions or variables
+// referenced by n, if any.
+func (d *initDeps) visit(n *Node) bool {
+ switch n.Op {
+ case ONAME:
+ if n.isMethodExpression() {
+ d.foundDep(asNode(n.Type.FuncType().Nname))
+ return false
+ }
+
+ switch n.Class() {
+ case PEXTERN, PFUNC:
+ d.foundDep(n)
+ }
+
+ case OCLOSURE:
+ d.inspectList(n.Func.Closure.Nbody)
+
+ case ODOTMETH, OCALLPART:
+ d.foundDep(asNode(n.Type.FuncType().Nname))
+ }
+
+ return true
+}
+
+// foundDep records that we've found a dependency on n by adding it to
+// seen.
+func (d *initDeps) foundDep(n *Node) {
+ // Can happen with method expressions involving interface
+ // types; e.g., fixedbugs/issue4495.go.
+ if n == nil {
+ return
+ }
+
+ // Names without definitions aren't interesting as far as
+ // initialization ordering goes.
+ if n.Name.Defn == nil {
+ return
+ }
+
+ if d.seen.Has(n) {
+ return
+ }
+ d.seen.Add(n)
+ if d.transitive && n.Class() == PFUNC {
+ d.inspectList(n.Name.Defn.Nbody)
+ }
+}
+
+// declOrder implements heap.Interface, ordering assignment statements
+// by the position of their first LHS expression.
+//
+// N.B., the Pos of the first LHS expression is used because because
+// an OAS node's Pos may not be unique. For example, given the
+// declaration "var a, b = f(), g()", "a" must be ordered before "b",
+// but both OAS nodes use the "=" token's position as their Pos.
+type declOrder []*Node
+
+func (s declOrder) Len() int { return len(s) }
+func (s declOrder) Less(i, j int) bool { return firstLHS(s[i]).Pos.Before(firstLHS(s[j]).Pos) }
+func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(*Node)) }
+func (s *declOrder) Pop() interface{} {
+ n := (*s)[len(*s)-1]
+ *s = (*s)[:len(*s)-1]
+ return n
+}
+
+// firstLHS returns the first expression on the left-hand side of
+// assignment n.
+func firstLHS(n *Node) *Node {
+ switch n.Op {
+ case OAS:
+ return n.Left
+ case OAS2DOTTYPE, OAS2FUNC, OAS2RECV, OAS2MAPR:
+ return n.List.First()
+ }
+
+ Fatalf("unexpected Op: %v", n.Op)
+ return nil
+}
diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go
new file mode 100644
index 0000000..a8cc010
--- /dev/null
+++ b/src/cmd/compile/internal/gc/inl.go
@@ -0,0 +1,1507 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// The inlining facility makes 2 passes: first caninl determines which
+// functions are suitable for inlining, and for those that are it
+// saves a copy of the body. Then inlcalls walks each function body to
+// expand calls to inlinable functions.
+//
+// The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1,
+// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and
+// are not supported.
+// 0: disabled
+// 1: 80-nodes leaf functions, oneliners, panic, lazy typechecking (default)
+// 2: (unassigned)
+// 3: (unassigned)
+// 4: allow non-leaf functions
+//
+// At some point this may get another default and become switch-offable with -N.
+//
+// The -d typcheckinl flag enables early typechecking of all imported bodies,
+// which is useful to flush out bugs.
+//
+// The Debug.m flag enables diagnostic output. a single -m is useful for verifying
+// which calls get inlined or not, more is for debugging, and may go away at any point.
+
+package gc
+
+import (
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+ "strings"
+)
+
+// Inlining budget parameters, gathered in one place
+const (
+ inlineMaxBudget = 80
+ inlineExtraAppendCost = 0
+ // default is to inline if there's at most one call. -l=4 overrides this by using 1 instead.
+ inlineExtraCallCost = 57 // 57 was benchmarked to provided most benefit with no bad surprises; see https://github.com/golang/go/issues/19348#issuecomment-439370742
+ inlineExtraPanicCost = 1 // do not penalize inlining panics.
+ inlineExtraThrowCost = inlineMaxBudget // with current (2018-05/1.11) code, inlining runtime.throw does not help.
+
+ inlineBigFunctionNodes = 5000 // Functions with this many nodes are considered "big".
+ inlineBigFunctionMaxCost = 20 // Max cost of inlinee when inlining into a "big" function.
+)
+
+// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
+// the ->sym can be re-used in the local package, so peel it off the receiver's type.
+func fnpkg(fn *Node) *types.Pkg {
+ if fn.IsMethod() {
+ // method
+ rcvr := fn.Type.Recv().Type
+
+ if rcvr.IsPtr() {
+ rcvr = rcvr.Elem()
+ }
+ if rcvr.Sym == nil {
+ Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym, fn, rcvr)
+ }
+ return rcvr.Sym.Pkg
+ }
+
+ // non-method
+ return fn.Sym.Pkg
+}
+
+// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
+// because they're a copy of an already checked body.
+func typecheckinl(fn *Node) {
+ lno := setlineno(fn)
+
+ expandInline(fn)
+
+ // typecheckinl is only for imported functions;
+ // their bodies may refer to unsafe as long as the package
+ // was marked safe during import (which was checked then).
+ // the ->inl of a local function has been typechecked before caninl copied it.
+ pkg := fnpkg(fn)
+
+ if pkg == localpkg || pkg == nil {
+ return // typecheckinl on local function
+ }
+
+ if Debug.m > 2 || Debug_export != 0 {
+ fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, asNodes(fn.Func.Inl.Body))
+ }
+
+ savefn := Curfn
+ Curfn = fn
+ typecheckslice(fn.Func.Inl.Body, ctxStmt)
+ Curfn = savefn
+
+ // During expandInline (which imports fn.Func.Inl.Body),
+ // declarations are added to fn.Func.Dcl by funcHdr(). Move them
+ // to fn.Func.Inl.Dcl for consistency with how local functions
+ // behave. (Append because typecheckinl may be called multiple
+ // times.)
+ fn.Func.Inl.Dcl = append(fn.Func.Inl.Dcl, fn.Func.Dcl...)
+ fn.Func.Dcl = nil
+
+ lineno = lno
+}
+
+// Caninl determines whether fn is inlineable.
+// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
+// fn and ->nbody will already have been typechecked.
+func caninl(fn *Node) {
+ if fn.Op != ODCLFUNC {
+ Fatalf("caninl %v", fn)
+ }
+ if fn.Func.Nname == nil {
+ Fatalf("caninl no nname %+v", fn)
+ }
+
+ var reason string // reason, if any, that the function was not inlined
+ if Debug.m > 1 || logopt.Enabled() {
+ defer func() {
+ if reason != "" {
+ if Debug.m > 1 {
+ fmt.Printf("%v: cannot inline %v: %s\n", fn.Line(), fn.Func.Nname, reason)
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(fn.Pos, "cannotInlineFunction", "inline", fn.funcname(), reason)
+ }
+ }
+ }()
+ }
+
+ // If marked "go:noinline", don't inline
+ if fn.Func.Pragma&Noinline != 0 {
+ reason = "marked go:noinline"
+ return
+ }
+
+ // If marked "go:norace" and -race compilation, don't inline.
+ if flag_race && fn.Func.Pragma&Norace != 0 {
+ reason = "marked go:norace with -race compilation"
+ return
+ }
+
+ // If marked "go:nocheckptr" and -d checkptr compilation, don't inline.
+ if Debug_checkptr != 0 && fn.Func.Pragma&NoCheckPtr != 0 {
+ reason = "marked go:nocheckptr"
+ return
+ }
+
+ // If marked "go:cgo_unsafe_args", don't inline, since the
+ // function makes assumptions about its argument frame layout.
+ if fn.Func.Pragma&CgoUnsafeArgs != 0 {
+ reason = "marked go:cgo_unsafe_args"
+ return
+ }
+
+ // If marked as "go:uintptrescapes", don't inline, since the
+ // escape information is lost during inlining.
+ if fn.Func.Pragma&UintptrEscapes != 0 {
+ reason = "marked as having an escaping uintptr argument"
+ return
+ }
+
+ // The nowritebarrierrec checker currently works at function
+ // granularity, so inlining yeswritebarrierrec functions can
+ // confuse it (#22342). As a workaround, disallow inlining
+ // them for now.
+ if fn.Func.Pragma&Yeswritebarrierrec != 0 {
+ reason = "marked go:yeswritebarrierrec"
+ return
+ }
+
+ // If fn has no body (is defined outside of Go), cannot inline it.
+ if fn.Nbody.Len() == 0 {
+ reason = "no function body"
+ return
+ }
+
+ if fn.Typecheck() == 0 {
+ Fatalf("caninl on non-typechecked function %v", fn)
+ }
+
+ n := fn.Func.Nname
+ if n.Func.InlinabilityChecked() {
+ return
+ }
+ defer n.Func.SetInlinabilityChecked(true)
+
+ cc := int32(inlineExtraCallCost)
+ if Debug.l == 4 {
+ cc = 1 // this appears to yield better performance than 0.
+ }
+
+ // At this point in the game the function we're looking at may
+ // have "stale" autos, vars that still appear in the Dcl list, but
+ // which no longer have any uses in the function body (due to
+ // elimination by deadcode). We'd like to exclude these dead vars
+ // when creating the "Inline.Dcl" field below; to accomplish this,
+ // the hairyVisitor below builds up a map of used/referenced
+ // locals, and we use this map to produce a pruned Inline.Dcl
+ // list. See issue 25249 for more context.
+
+ visitor := hairyVisitor{
+ budget: inlineMaxBudget,
+ extraCallCost: cc,
+ usedLocals: make(map[*Node]bool),
+ }
+ if visitor.visitList(fn.Nbody) {
+ reason = visitor.reason
+ return
+ }
+ if visitor.budget < 0 {
+ reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", inlineMaxBudget-visitor.budget, inlineMaxBudget)
+ return
+ }
+
+ n.Func.Inl = &Inline{
+ Cost: inlineMaxBudget - visitor.budget,
+ Dcl: inlcopylist(pruneUnusedAutos(n.Name.Defn.Func.Dcl, &visitor)),
+ Body: inlcopylist(fn.Nbody.Slice()),
+ }
+
+ // hack, TODO, check for better way to link method nodes back to the thing with the ->inl
+ // this is so export can find the body of a method
+ fn.Type.FuncType().Nname = asTypesNode(n)
+
+ if Debug.m > 1 {
+ fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", fn.Line(), n, inlineMaxBudget-visitor.budget, fn.Type, asNodes(n.Func.Inl.Body))
+ } else if Debug.m != 0 {
+ fmt.Printf("%v: can inline %v\n", fn.Line(), n)
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(fn.Pos, "canInlineFunction", "inline", fn.funcname(), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget))
+ }
+}
+
+// inlFlood marks n's inline body for export and recursively ensures
+// all called functions are marked too.
+func inlFlood(n *Node) {
+ if n == nil {
+ return
+ }
+ if n.Op != ONAME || n.Class() != PFUNC {
+ Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op, n.Class())
+ }
+ if n.Func == nil {
+ Fatalf("inlFlood: missing Func on %v", n)
+ }
+ if n.Func.Inl == nil {
+ return
+ }
+
+ if n.Func.ExportInline() {
+ return
+ }
+ n.Func.SetExportInline(true)
+
+ typecheckinl(n)
+
+ // Recursively identify all referenced functions for
+ // reexport. We want to include even non-called functions,
+ // because after inlining they might be callable.
+ inspectList(asNodes(n.Func.Inl.Body), func(n *Node) bool {
+ switch n.Op {
+ case ONAME:
+ switch n.Class() {
+ case PFUNC:
+ if n.isMethodExpression() {
+ inlFlood(asNode(n.Type.Nname()))
+ } else {
+ inlFlood(n)
+ exportsym(n)
+ }
+ case PEXTERN:
+ exportsym(n)
+ }
+
+ case ODOTMETH:
+ fn := asNode(n.Type.Nname())
+ inlFlood(fn)
+
+ case OCALLPART:
+ // Okay, because we don't yet inline indirect
+ // calls to method values.
+ case OCLOSURE:
+ // If the closure is inlinable, we'll need to
+ // flood it too. But today we don't support
+ // inlining functions that contain closures.
+ //
+ // When we do, we'll probably want:
+ // inlFlood(n.Func.Closure.Func.Nname)
+ Fatalf("unexpected closure in inlinable function")
+ }
+ return true
+ })
+}
+
+// hairyVisitor visits a function body to determine its inlining
+// hairiness and whether or not it can be inlined.
+type hairyVisitor struct {
+ budget int32
+ reason string
+ extraCallCost int32
+ usedLocals map[*Node]bool
+}
+
+// Look for anything we want to punt on.
+func (v *hairyVisitor) visitList(ll Nodes) bool {
+ for _, n := range ll.Slice() {
+ if v.visit(n) {
+ return true
+ }
+ }
+ return false
+}
+
+func (v *hairyVisitor) visit(n *Node) bool {
+ if n == nil {
+ return false
+ }
+
+ switch n.Op {
+ // Call is okay if inlinable and we have the budget for the body.
+ case OCALLFUNC:
+ // Functions that call runtime.getcaller{pc,sp} can not be inlined
+ // because getcaller{pc,sp} expect a pointer to the caller's first argument.
+ //
+ // runtime.throw is a "cheap call" like panic in normal code.
+ if n.Left.Op == ONAME && n.Left.Class() == PFUNC && isRuntimePkg(n.Left.Sym.Pkg) {
+ fn := n.Left.Sym.Name
+ if fn == "getcallerpc" || fn == "getcallersp" {
+ v.reason = "call to " + fn
+ return true
+ }
+ if fn == "throw" {
+ v.budget -= inlineExtraThrowCost
+ break
+ }
+ }
+
+ if isIntrinsicCall(n) {
+ // Treat like any other node.
+ break
+ }
+
+ if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
+ v.budget -= fn.Func.Inl.Cost
+ break
+ }
+
+ // Call cost for non-leaf inlining.
+ v.budget -= v.extraCallCost
+
+ // Call is okay if inlinable and we have the budget for the body.
+ case OCALLMETH:
+ t := n.Left.Type
+ if t == nil {
+ Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
+ }
+ if t.Nname() == nil {
+ Fatalf("no function definition for [%p] %+v\n", t, t)
+ }
+ if isRuntimePkg(n.Left.Sym.Pkg) {
+ fn := n.Left.Sym.Name
+ if fn == "heapBits.nextArena" {
+ // Special case: explicitly allow
+ // mid-stack inlining of
+ // runtime.heapBits.next even though
+ // it calls slow-path
+ // runtime.heapBits.nextArena.
+ break
+ }
+ }
+ if inlfn := asNode(t.FuncType().Nname).Func; inlfn.Inl != nil {
+ v.budget -= inlfn.Inl.Cost
+ break
+ }
+ // Call cost for non-leaf inlining.
+ v.budget -= v.extraCallCost
+
+ // Things that are too hairy, irrespective of the budget
+ case OCALL, OCALLINTER:
+ // Call cost for non-leaf inlining.
+ v.budget -= v.extraCallCost
+
+ case OPANIC:
+ v.budget -= inlineExtraPanicCost
+
+ case ORECOVER:
+ // recover matches the argument frame pointer to find
+ // the right panic value, so it needs an argument frame.
+ v.reason = "call to recover"
+ return true
+
+ case OCLOSURE,
+ ORANGE,
+ OSELECT,
+ OGO,
+ ODEFER,
+ ODCLTYPE, // can't print yet
+ ORETJMP:
+ v.reason = "unhandled op " + n.Op.String()
+ return true
+
+ case OAPPEND:
+ v.budget -= inlineExtraAppendCost
+
+ case ODCLCONST, OEMPTY, OFALL:
+ // These nodes don't produce code; omit from inlining budget.
+ return false
+
+ case OLABEL:
+ // TODO(mdempsky): Add support for inlining labeled control statements.
+ if n.labeledControl() != nil {
+ v.reason = "labeled control"
+ return true
+ }
+
+ case OBREAK, OCONTINUE:
+ if n.Sym != nil {
+ // Should have short-circuited due to labeledControl above.
+ Fatalf("unexpected labeled break/continue: %v", n)
+ }
+
+ case OIF:
+ if Isconst(n.Left, CTBOOL) {
+ // This if and the condition cost nothing.
+ return v.visitList(n.Ninit) || v.visitList(n.Nbody) ||
+ v.visitList(n.Rlist)
+ }
+
+ case ONAME:
+ if n.Class() == PAUTO {
+ v.usedLocals[n] = true
+ }
+
+ }
+
+ v.budget--
+
+ // When debugging, don't stop early, to get full cost of inlining this function
+ if v.budget < 0 && Debug.m < 2 && !logopt.Enabled() {
+ return true
+ }
+
+ return v.visit(n.Left) || v.visit(n.Right) ||
+ v.visitList(n.List) || v.visitList(n.Rlist) ||
+ v.visitList(n.Ninit) || v.visitList(n.Nbody)
+}
+
+// inlcopylist (together with inlcopy) recursively copies a list of nodes, except
+// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
+// the body and dcls of an inlineable function.
+func inlcopylist(ll []*Node) []*Node {
+ s := make([]*Node, 0, len(ll))
+ for _, n := range ll {
+ s = append(s, inlcopy(n))
+ }
+ return s
+}
+
+func inlcopy(n *Node) *Node {
+ if n == nil {
+ return nil
+ }
+
+ switch n.Op {
+ case ONAME, OTYPE, OLITERAL:
+ return n
+ }
+
+ m := n.copy()
+ if n.Op != OCALLPART && m.Func != nil {
+ Fatalf("unexpected Func: %v", m)
+ }
+ m.Left = inlcopy(n.Left)
+ m.Right = inlcopy(n.Right)
+ m.List.Set(inlcopylist(n.List.Slice()))
+ m.Rlist.Set(inlcopylist(n.Rlist.Slice()))
+ m.Ninit.Set(inlcopylist(n.Ninit.Slice()))
+ m.Nbody.Set(inlcopylist(n.Nbody.Slice()))
+
+ return m
+}
+
+func countNodes(n *Node) int {
+ if n == nil {
+ return 0
+ }
+ cnt := 1
+ cnt += countNodes(n.Left)
+ cnt += countNodes(n.Right)
+ for _, n1 := range n.Ninit.Slice() {
+ cnt += countNodes(n1)
+ }
+ for _, n1 := range n.Nbody.Slice() {
+ cnt += countNodes(n1)
+ }
+ for _, n1 := range n.List.Slice() {
+ cnt += countNodes(n1)
+ }
+ for _, n1 := range n.Rlist.Slice() {
+ cnt += countNodes(n1)
+ }
+ return cnt
+}
+
+// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
+// calls made to inlineable functions. This is the external entry point.
+func inlcalls(fn *Node) {
+ savefn := Curfn
+ Curfn = fn
+ maxCost := int32(inlineMaxBudget)
+ if countNodes(fn) >= inlineBigFunctionNodes {
+ maxCost = inlineBigFunctionMaxCost
+ }
+ // Map to keep track of functions that have been inlined at a particular
+ // call site, in order to stop inlining when we reach the beginning of a
+ // recursion cycle again. We don't inline immediately recursive functions,
+ // but allow inlining if there is a recursion cycle of many functions.
+ // Most likely, the inlining will stop before we even hit the beginning of
+ // the cycle again, but the map catches the unusual case.
+ inlMap := make(map[*Node]bool)
+ fn = inlnode(fn, maxCost, inlMap)
+ if fn != Curfn {
+ Fatalf("inlnode replaced curfn")
+ }
+ Curfn = savefn
+}
+
+// Turn an OINLCALL into a statement.
+func inlconv2stmt(n *Node) {
+ n.Op = OBLOCK
+
+ // n->ninit stays
+ n.List.Set(n.Nbody.Slice())
+
+ n.Nbody.Set(nil)
+ n.Rlist.Set(nil)
+}
+
+// Turn an OINLCALL into a single valued expression.
+// The result of inlconv2expr MUST be assigned back to n, e.g.
+// n.Left = inlconv2expr(n.Left)
+func inlconv2expr(n *Node) *Node {
+ r := n.Rlist.First()
+ return addinit(r, append(n.Ninit.Slice(), n.Nbody.Slice()...))
+}
+
+// Turn the rlist (with the return values) of the OINLCALL in
+// n into an expression list lumping the ninit and body
+// containing the inlined statements on the first list element so
+// order will be preserved Used in return, oas2func and call
+// statements.
+func inlconv2list(n *Node) []*Node {
+ if n.Op != OINLCALL || n.Rlist.Len() == 0 {
+ Fatalf("inlconv2list %+v\n", n)
+ }
+
+ s := n.Rlist.Slice()
+ s[0] = addinit(s[0], append(n.Ninit.Slice(), n.Nbody.Slice()...))
+ return s
+}
+
+func inlnodelist(l Nodes, maxCost int32, inlMap map[*Node]bool) {
+ s := l.Slice()
+ for i := range s {
+ s[i] = inlnode(s[i], maxCost, inlMap)
+ }
+}
+
+// inlnode recurses over the tree to find inlineable calls, which will
+// be turned into OINLCALLs by mkinlcall. When the recursion comes
+// back up will examine left, right, list, rlist, ninit, ntest, nincr,
+// nbody and nelse and use one of the 4 inlconv/glue functions above
+// to turn the OINLCALL into an expression, a statement, or patch it
+// in to this nodes list or rlist as appropriate.
+// NOTE it makes no sense to pass the glue functions down the
+// recursion to the level where the OINLCALL gets created because they
+// have to edit /this/ n, so you'd have to push that one down as well,
+// but then you may as well do it here. so this is cleaner and
+// shorter and less complicated.
+// The result of inlnode MUST be assigned back to n, e.g.
+// n.Left = inlnode(n.Left)
+func inlnode(n *Node, maxCost int32, inlMap map[*Node]bool) *Node {
+ if n == nil {
+ return n
+ }
+
+ switch n.Op {
+ case ODEFER, OGO:
+ switch n.Left.Op {
+ case OCALLFUNC, OCALLMETH:
+ n.Left.SetNoInline(true)
+ }
+
+ // TODO do them here (or earlier),
+ // so escape analysis can avoid more heapmoves.
+ case OCLOSURE:
+ return n
+ case OCALLMETH:
+ // Prevent inlining some reflect.Value methods when using checkptr,
+ // even when package reflect was compiled without it (#35073).
+ if s := n.Left.Sym; Debug_checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
+ return n
+ }
+ }
+
+ lno := setlineno(n)
+
+ inlnodelist(n.Ninit, maxCost, inlMap)
+ for _, n1 := range n.Ninit.Slice() {
+ if n1.Op == OINLCALL {
+ inlconv2stmt(n1)
+ }
+ }
+
+ n.Left = inlnode(n.Left, maxCost, inlMap)
+ if n.Left != nil && n.Left.Op == OINLCALL {
+ n.Left = inlconv2expr(n.Left)
+ }
+
+ n.Right = inlnode(n.Right, maxCost, inlMap)
+ if n.Right != nil && n.Right.Op == OINLCALL {
+ if n.Op == OFOR || n.Op == OFORUNTIL {
+ inlconv2stmt(n.Right)
+ } else if n.Op == OAS2FUNC {
+ n.Rlist.Set(inlconv2list(n.Right))
+ n.Right = nil
+ n.Op = OAS2
+ n.SetTypecheck(0)
+ n = typecheck(n, ctxStmt)
+ } else {
+ n.Right = inlconv2expr(n.Right)
+ }
+ }
+
+ inlnodelist(n.List, maxCost, inlMap)
+ if n.Op == OBLOCK {
+ for _, n2 := range n.List.Slice() {
+ if n2.Op == OINLCALL {
+ inlconv2stmt(n2)
+ }
+ }
+ } else {
+ s := n.List.Slice()
+ for i1, n1 := range s {
+ if n1 != nil && n1.Op == OINLCALL {
+ s[i1] = inlconv2expr(s[i1])
+ }
+ }
+ }
+
+ inlnodelist(n.Rlist, maxCost, inlMap)
+ s := n.Rlist.Slice()
+ for i1, n1 := range s {
+ if n1.Op == OINLCALL {
+ if n.Op == OIF {
+ inlconv2stmt(n1)
+ } else {
+ s[i1] = inlconv2expr(s[i1])
+ }
+ }
+ }
+
+ inlnodelist(n.Nbody, maxCost, inlMap)
+ for _, n := range n.Nbody.Slice() {
+ if n.Op == OINLCALL {
+ inlconv2stmt(n)
+ }
+ }
+
+ // with all the branches out of the way, it is now time to
+ // transmogrify this node itself unless inhibited by the
+ // switch at the top of this function.
+ switch n.Op {
+ case OCALLFUNC, OCALLMETH:
+ if n.NoInline() {
+ return n
+ }
+ }
+
+ switch n.Op {
+ case OCALLFUNC:
+ if Debug.m > 3 {
+ fmt.Printf("%v:call to func %+v\n", n.Line(), n.Left)
+ }
+ if isIntrinsicCall(n) {
+ break
+ }
+ if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
+ n = mkinlcall(n, fn, maxCost, inlMap)
+ }
+
+ case OCALLMETH:
+ if Debug.m > 3 {
+ fmt.Printf("%v:call to meth %L\n", n.Line(), n.Left.Right)
+ }
+
+ // typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
+ if n.Left.Type == nil {
+ Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
+ }
+
+ if n.Left.Type.Nname() == nil {
+ Fatalf("no function definition for [%p] %+v\n", n.Left.Type, n.Left.Type)
+ }
+
+ n = mkinlcall(n, asNode(n.Left.Type.FuncType().Nname), maxCost, inlMap)
+ }
+
+ lineno = lno
+ return n
+}
+
+// inlCallee takes a function-typed expression and returns the underlying function ONAME
+// that it refers to if statically known. Otherwise, it returns nil.
+func inlCallee(fn *Node) *Node {
+ fn = staticValue(fn)
+ switch {
+ case fn.Op == ONAME && fn.Class() == PFUNC:
+ if fn.isMethodExpression() {
+ n := asNode(fn.Type.Nname())
+ // Check that receiver type matches fn.Left.
+ // TODO(mdempsky): Handle implicit dereference
+ // of pointer receiver argument?
+ if n == nil || !types.Identical(n.Type.Recv().Type, fn.Left.Type) {
+ return nil
+ }
+ return n
+ }
+ return fn
+ case fn.Op == OCLOSURE:
+ c := fn.Func.Closure
+ caninl(c)
+ return c.Func.Nname
+ }
+ return nil
+}
+
+func staticValue(n *Node) *Node {
+ for {
+ if n.Op == OCONVNOP {
+ n = n.Left
+ continue
+ }
+
+ n1 := staticValue1(n)
+ if n1 == nil {
+ return n
+ }
+ n = n1
+ }
+}
+
+// staticValue1 implements a simple SSA-like optimization. If n is a local variable
+// that is initialized and never reassigned, staticValue1 returns the initializer
+// expression. Otherwise, it returns nil.
+func staticValue1(n *Node) *Node {
+ if n.Op != ONAME || n.Class() != PAUTO || n.Name.Addrtaken() {
+ return nil
+ }
+
+ defn := n.Name.Defn
+ if defn == nil {
+ return nil
+ }
+
+ var rhs *Node
+FindRHS:
+ switch defn.Op {
+ case OAS:
+ rhs = defn.Right
+ case OAS2:
+ for i, lhs := range defn.List.Slice() {
+ if lhs == n {
+ rhs = defn.Rlist.Index(i)
+ break FindRHS
+ }
+ }
+ Fatalf("%v missing from LHS of %v", n, defn)
+ default:
+ return nil
+ }
+ if rhs == nil {
+ Fatalf("RHS is nil: %v", defn)
+ }
+
+ unsafe, _ := reassigned(n)
+ if unsafe {
+ return nil
+ }
+
+ return rhs
+}
+
+// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
+// indicating whether the name has any assignments other than its declaration.
+// The second return value is the first such assignment encountered in the walk, if any. It is mostly
+// useful for -m output documenting the reason for inhibited optimizations.
+// NB: global variables are always considered to be re-assigned.
+// TODO: handle initial declaration not including an assignment and followed by a single assignment?
+func reassigned(n *Node) (bool, *Node) {
+ if n.Op != ONAME {
+ Fatalf("reassigned %v", n)
+ }
+ // no way to reliably check for no-reassignment of globals, assume it can be
+ if n.Name.Curfn == nil {
+ return true, nil
+ }
+ f := n.Name.Curfn
+ // There just might be a good reason for this although this can be pretty surprising:
+ // local variables inside a closure have Curfn pointing to the OCLOSURE node instead
+ // of the corresponding ODCLFUNC.
+ // We need to walk the function body to check for reassignments so we follow the
+ // linkage to the ODCLFUNC node as that is where body is held.
+ if f.Op == OCLOSURE {
+ f = f.Func.Closure
+ }
+ v := reassignVisitor{name: n}
+ a := v.visitList(f.Nbody)
+ return a != nil, a
+}
+
+type reassignVisitor struct {
+ name *Node
+}
+
+func (v *reassignVisitor) visit(n *Node) *Node {
+ if n == nil {
+ return nil
+ }
+ switch n.Op {
+ case OAS, OSELRECV:
+ if n.Left == v.name && n != v.name.Name.Defn {
+ return n
+ }
+ case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV:
+ for _, p := range n.List.Slice() {
+ if p == v.name && n != v.name.Name.Defn {
+ return n
+ }
+ }
+ case OSELRECV2:
+ if (n.Left == v.name || n.List.First() == v.name) && n != v.name.Name.Defn {
+ return n
+ }
+ }
+ if a := v.visit(n.Left); a != nil {
+ return a
+ }
+ if a := v.visit(n.Right); a != nil {
+ return a
+ }
+ if a := v.visitList(n.List); a != nil {
+ return a
+ }
+ if a := v.visitList(n.Rlist); a != nil {
+ return a
+ }
+ if a := v.visitList(n.Ninit); a != nil {
+ return a
+ }
+ if a := v.visitList(n.Nbody); a != nil {
+ return a
+ }
+ return nil
+}
+
+func (v *reassignVisitor) visitList(l Nodes) *Node {
+ for _, n := range l.Slice() {
+ if a := v.visit(n); a != nil {
+ return a
+ }
+ }
+ return nil
+}
+
+func inlParam(t *types.Field, as *Node, inlvars map[*Node]*Node) *Node {
+ n := asNode(t.Nname)
+ if n == nil || n.isBlank() {
+ return nblank
+ }
+
+ inlvar := inlvars[n]
+ if inlvar == nil {
+ Fatalf("missing inlvar for %v", n)
+ }
+ as.Ninit.Append(nod(ODCL, inlvar, nil))
+ inlvar.Name.Defn = as
+ return inlvar
+}
+
+var inlgen int
+
+// If n is a call node (OCALLFUNC or OCALLMETH), and fn is an ONAME node for a
+// function with an inlinable body, return an OINLCALL node that can replace n.
+// The returned node's Ninit has the parameter assignments, the Nbody is the
+// inlined function body, and (List, Rlist) contain the (input, output)
+// parameters.
+// The result of mkinlcall MUST be assigned back to n, e.g.
+// n.Left = mkinlcall(n.Left, fn, isddd)
+func mkinlcall(n, fn *Node, maxCost int32, inlMap map[*Node]bool) *Node {
+ if fn.Func.Inl == nil {
+ if logopt.Enabled() {
+ logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
+ fmt.Sprintf("%s cannot be inlined", fn.pkgFuncName()))
+ }
+ return n
+ }
+ if fn.Func.Inl.Cost > maxCost {
+ // The inlined function body is too big. Typically we use this check to restrict
+ // inlining into very big functions. See issue 26546 and 17566.
+ if logopt.Enabled() {
+ logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", Curfn.funcname(),
+ fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Func.Inl.Cost, fn.pkgFuncName(), maxCost))
+ }
+ return n
+ }
+
+ if fn == Curfn || fn.Name.Defn == Curfn {
+ // Can't recursively inline a function into itself.
+ if logopt.Enabled() {
+ logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", Curfn.funcname()))
+ }
+ return n
+ }
+
+ if instrumenting && isRuntimePkg(fn.Sym.Pkg) {
+ // Runtime package must not be instrumented.
+ // Instrument skips runtime package. However, some runtime code can be
+ // inlined into other packages and instrumented there. To avoid this,
+ // we disable inlining of runtime functions when instrumenting.
+ // The example that we observed is inlining of LockOSThread,
+ // which lead to false race reports on m contents.
+ return n
+ }
+
+ if inlMap[fn] {
+ if Debug.m > 1 {
+ fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", n.Line(), fn, Curfn.funcname())
+ }
+ return n
+ }
+ inlMap[fn] = true
+ defer func() {
+ inlMap[fn] = false
+ }()
+ if Debug_typecheckinl == 0 {
+ typecheckinl(fn)
+ }
+
+ // We have a function node, and it has an inlineable body.
+ if Debug.m > 1 {
+ fmt.Printf("%v: inlining call to %v %#v { %#v }\n", n.Line(), fn.Sym, fn.Type, asNodes(fn.Func.Inl.Body))
+ } else if Debug.m != 0 {
+ fmt.Printf("%v: inlining call to %v\n", n.Line(), fn)
+ }
+ if Debug.m > 2 {
+ fmt.Printf("%v: Before inlining: %+v\n", n.Line(), n)
+ }
+
+ if ssaDump != "" && ssaDump == Curfn.funcname() {
+ ssaDumpInlined = append(ssaDumpInlined, fn)
+ }
+
+ ninit := n.Ninit
+
+ // For normal function calls, the function callee expression
+ // may contain side effects (e.g., added by addinit during
+ // inlconv2expr or inlconv2list). Make sure to preserve these,
+ // if necessary (#42703).
+ if n.Op == OCALLFUNC {
+ callee := n.Left
+ for callee.Op == OCONVNOP {
+ ninit.AppendNodes(&callee.Ninit)
+ callee = callee.Left
+ }
+ if callee.Op != ONAME && callee.Op != OCLOSURE {
+ Fatalf("unexpected callee expression: %v", callee)
+ }
+ }
+
+ // Make temp names to use instead of the originals.
+ inlvars := make(map[*Node]*Node)
+
+ // record formals/locals for later post-processing
+ var inlfvars []*Node
+
+ // Handle captured variables when inlining closures.
+ if fn.Name.Defn != nil {
+ if c := fn.Name.Defn.Func.Closure; c != nil {
+ for _, v := range c.Func.Closure.Func.Cvars.Slice() {
+ if v.Op == OXXX {
+ continue
+ }
+
+ o := v.Name.Param.Outer
+ // make sure the outer param matches the inlining location
+ // NB: if we enabled inlining of functions containing OCLOSURE or refined
+ // the reassigned check via some sort of copy propagation this would most
+ // likely need to be changed to a loop to walk up to the correct Param
+ if o == nil || (o.Name.Curfn != Curfn && o.Name.Curfn.Func.Closure != Curfn) {
+ Fatalf("%v: unresolvable capture %v %v\n", n.Line(), fn, v)
+ }
+
+ if v.Name.Byval() {
+ iv := typecheck(inlvar(v), ctxExpr)
+ ninit.Append(nod(ODCL, iv, nil))
+ ninit.Append(typecheck(nod(OAS, iv, o), ctxStmt))
+ inlvars[v] = iv
+ } else {
+ addr := newname(lookup("&" + v.Sym.Name))
+ addr.Type = types.NewPtr(v.Type)
+ ia := typecheck(inlvar(addr), ctxExpr)
+ ninit.Append(nod(ODCL, ia, nil))
+ ninit.Append(typecheck(nod(OAS, ia, nod(OADDR, o, nil)), ctxStmt))
+ inlvars[addr] = ia
+
+ // When capturing by reference, all occurrence of the captured var
+ // must be substituted with dereference of the temporary address
+ inlvars[v] = typecheck(nod(ODEREF, ia, nil), ctxExpr)
+ }
+ }
+ }
+ }
+
+ for _, ln := range fn.Func.Inl.Dcl {
+ if ln.Op != ONAME {
+ continue
+ }
+ if ln.Class() == PPARAMOUT { // return values handled below.
+ continue
+ }
+ if ln.isParamStackCopy() { // ignore the on-stack copy of a parameter that moved to the heap
+ // TODO(mdempsky): Remove once I'm confident
+ // this never actually happens. We currently
+ // perform inlining before escape analysis, so
+ // nothing should have moved to the heap yet.
+ Fatalf("impossible: %v", ln)
+ }
+ inlf := typecheck(inlvar(ln), ctxExpr)
+ inlvars[ln] = inlf
+ if genDwarfInline > 0 {
+ if ln.Class() == PPARAM {
+ inlf.Name.SetInlFormal(true)
+ } else {
+ inlf.Name.SetInlLocal(true)
+ }
+ inlf.Pos = ln.Pos
+ inlfvars = append(inlfvars, inlf)
+ }
+ }
+
+ // We can delay declaring+initializing result parameters if:
+ // (1) there's exactly one "return" statement in the inlined function;
+ // (2) it's not an empty return statement (#44355); and
+ // (3) the result parameters aren't named.
+ delayretvars := true
+
+ nreturns := 0
+ inspectList(asNodes(fn.Func.Inl.Body), func(n *Node) bool {
+ if n != nil && n.Op == ORETURN {
+ nreturns++
+ if n.List.Len() == 0 {
+ delayretvars = false // empty return statement (case 2)
+ }
+ }
+ return true
+ })
+
+ if nreturns != 1 {
+ delayretvars = false // not exactly one return statement (case 1)
+ }
+
+ // temporaries for return values.
+ var retvars []*Node
+ for i, t := range fn.Type.Results().Fields().Slice() {
+ var m *Node
+ if n := asNode(t.Nname); n != nil && !n.isBlank() && !strings.HasPrefix(n.Sym.Name, "~r") {
+ m = inlvar(n)
+ m = typecheck(m, ctxExpr)
+ inlvars[n] = m
+ delayretvars = false // found a named result parameter (case 3)
+ } else {
+ // anonymous return values, synthesize names for use in assignment that replaces return
+ m = retvar(t, i)
+ }
+
+ if genDwarfInline > 0 {
+ // Don't update the src.Pos on a return variable if it
+ // was manufactured by the inliner (e.g. "~R2"); such vars
+ // were not part of the original callee.
+ if !strings.HasPrefix(m.Sym.Name, "~R") {
+ m.Name.SetInlFormal(true)
+ m.Pos = t.Pos
+ inlfvars = append(inlfvars, m)
+ }
+ }
+
+ retvars = append(retvars, m)
+ }
+
+ // Assign arguments to the parameters' temp names.
+ as := nod(OAS2, nil, nil)
+ as.SetColas(true)
+ if n.Op == OCALLMETH {
+ if n.Left.Left == nil {
+ Fatalf("method call without receiver: %+v", n)
+ }
+ as.Rlist.Append(n.Left.Left)
+ }
+ as.Rlist.Append(n.List.Slice()...)
+
+ // For non-dotted calls to variadic functions, we assign the
+ // variadic parameter's temp name separately.
+ var vas *Node
+
+ if recv := fn.Type.Recv(); recv != nil {
+ as.List.Append(inlParam(recv, as, inlvars))
+ }
+ for _, param := range fn.Type.Params().Fields().Slice() {
+ // For ordinary parameters or variadic parameters in
+ // dotted calls, just add the variable to the
+ // assignment list, and we're done.
+ if !param.IsDDD() || n.IsDDD() {
+ as.List.Append(inlParam(param, as, inlvars))
+ continue
+ }
+
+ // Otherwise, we need to collect the remaining values
+ // to pass as a slice.
+
+ x := as.List.Len()
+ for as.List.Len() < as.Rlist.Len() {
+ as.List.Append(argvar(param.Type, as.List.Len()))
+ }
+ varargs := as.List.Slice()[x:]
+
+ vas = nod(OAS, nil, nil)
+ vas.Left = inlParam(param, vas, inlvars)
+ if len(varargs) == 0 {
+ vas.Right = nodnil()
+ vas.Right.Type = param.Type
+ } else {
+ vas.Right = nod(OCOMPLIT, nil, typenod(param.Type))
+ vas.Right.List.Set(varargs)
+ }
+ }
+
+ if as.Rlist.Len() != 0 {
+ as = typecheck(as, ctxStmt)
+ ninit.Append(as)
+ }
+
+ if vas != nil {
+ vas = typecheck(vas, ctxStmt)
+ ninit.Append(vas)
+ }
+
+ if !delayretvars {
+ // Zero the return parameters.
+ for _, n := range retvars {
+ ninit.Append(nod(ODCL, n, nil))
+ ras := nod(OAS, n, nil)
+ ras = typecheck(ras, ctxStmt)
+ ninit.Append(ras)
+ }
+ }
+
+ retlabel := autolabel(".i")
+
+ inlgen++
+
+ parent := -1
+ if b := Ctxt.PosTable.Pos(n.Pos).Base(); b != nil {
+ parent = b.InliningIndex()
+ }
+ newIndex := Ctxt.InlTree.Add(parent, n.Pos, fn.Sym.Linksym())
+
+ // Add an inline mark just before the inlined body.
+ // This mark is inline in the code so that it's a reasonable spot
+ // to put a breakpoint. Not sure if that's really necessary or not
+ // (in which case it could go at the end of the function instead).
+ // Note issue 28603.
+ inlMark := nod(OINLMARK, nil, nil)
+ inlMark.Pos = n.Pos.WithIsStmt()
+ inlMark.Xoffset = int64(newIndex)
+ ninit.Append(inlMark)
+
+ if genDwarfInline > 0 {
+ if !fn.Sym.Linksym().WasInlined() {
+ Ctxt.DwFixups.SetPrecursorFunc(fn.Sym.Linksym(), fn)
+ fn.Sym.Linksym().Set(obj.AttrWasInlined, true)
+ }
+ }
+
+ subst := inlsubst{
+ retlabel: retlabel,
+ retvars: retvars,
+ delayretvars: delayretvars,
+ inlvars: inlvars,
+ bases: make(map[*src.PosBase]*src.PosBase),
+ newInlIndex: newIndex,
+ }
+
+ body := subst.list(asNodes(fn.Func.Inl.Body))
+
+ lab := nodSym(OLABEL, nil, retlabel)
+ body = append(body, lab)
+
+ typecheckslice(body, ctxStmt)
+
+ if genDwarfInline > 0 {
+ for _, v := range inlfvars {
+ v.Pos = subst.updatedPos(v.Pos)
+ }
+ }
+
+ //dumplist("ninit post", ninit);
+
+ call := nod(OINLCALL, nil, nil)
+ call.Ninit.Set(ninit.Slice())
+ call.Nbody.Set(body)
+ call.Rlist.Set(retvars)
+ call.Type = n.Type
+ call.SetTypecheck(1)
+
+ // transitive inlining
+ // might be nice to do this before exporting the body,
+ // but can't emit the body with inlining expanded.
+ // instead we emit the things that the body needs
+ // and each use must redo the inlining.
+ // luckily these are small.
+ inlnodelist(call.Nbody, maxCost, inlMap)
+ for _, n := range call.Nbody.Slice() {
+ if n.Op == OINLCALL {
+ inlconv2stmt(n)
+ }
+ }
+
+ if Debug.m > 2 {
+ fmt.Printf("%v: After inlining %+v\n\n", call.Line(), call)
+ }
+
+ return call
+}
+
+// Every time we expand a function we generate a new set of tmpnames,
+// PAUTO's in the calling functions, and link them off of the
+// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
+func inlvar(var_ *Node) *Node {
+ if Debug.m > 3 {
+ fmt.Printf("inlvar %+v\n", var_)
+ }
+
+ n := newname(var_.Sym)
+ n.Type = var_.Type
+ n.SetClass(PAUTO)
+ n.Name.SetUsed(true)
+ n.Name.Curfn = Curfn // the calling function, not the called one
+ n.Name.SetAddrtaken(var_.Name.Addrtaken())
+
+ Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ return n
+}
+
+// Synthesize a variable to store the inlined function's results in.
+func retvar(t *types.Field, i int) *Node {
+ n := newname(lookupN("~R", i))
+ n.Type = t.Type
+ n.SetClass(PAUTO)
+ n.Name.SetUsed(true)
+ n.Name.Curfn = Curfn // the calling function, not the called one
+ Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ return n
+}
+
+// Synthesize a variable to store the inlined function's arguments
+// when they come from a multiple return call.
+func argvar(t *types.Type, i int) *Node {
+ n := newname(lookupN("~arg", i))
+ n.Type = t.Elem()
+ n.SetClass(PAUTO)
+ n.Name.SetUsed(true)
+ n.Name.Curfn = Curfn // the calling function, not the called one
+ Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ return n
+}
+
+// The inlsubst type implements the actual inlining of a single
+// function call.
+type inlsubst struct {
+ // Target of the goto substituted in place of a return.
+ retlabel *types.Sym
+
+ // Temporary result variables.
+ retvars []*Node
+
+ // Whether result variables should be initialized at the
+ // "return" statement.
+ delayretvars bool
+
+ inlvars map[*Node]*Node
+
+ // bases maps from original PosBase to PosBase with an extra
+ // inlined call frame.
+ bases map[*src.PosBase]*src.PosBase
+
+ // newInlIndex is the index of the inlined call frame to
+ // insert for inlined nodes.
+ newInlIndex int
+}
+
+// list inlines a list of nodes.
+func (subst *inlsubst) list(ll Nodes) []*Node {
+ s := make([]*Node, 0, ll.Len())
+ for _, n := range ll.Slice() {
+ s = append(s, subst.node(n))
+ }
+ return s
+}
+
+// node recursively copies a node from the saved pristine body of the
+// inlined function, substituting references to input/output
+// parameters with ones to the tmpnames, and substituting returns with
+// assignments to the output.
+func (subst *inlsubst) node(n *Node) *Node {
+ if n == nil {
+ return nil
+ }
+
+ switch n.Op {
+ case ONAME:
+ if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode
+ if Debug.m > 2 {
+ fmt.Printf("substituting name %+v -> %+v\n", n, inlvar)
+ }
+ return inlvar
+ }
+
+ if Debug.m > 2 {
+ fmt.Printf("not substituting name %+v\n", n)
+ }
+ return n
+
+ case OLITERAL, OTYPE:
+ // If n is a named constant or type, we can continue
+ // using it in the inline copy. Otherwise, make a copy
+ // so we can update the line number.
+ if n.Sym != nil {
+ return n
+ }
+
+ // Since we don't handle bodies with closures, this return is guaranteed to belong to the current inlined function.
+
+ // dump("Return before substitution", n);
+ case ORETURN:
+ m := nodSym(OGOTO, nil, subst.retlabel)
+ m.Ninit.Set(subst.list(n.Ninit))
+
+ if len(subst.retvars) != 0 && n.List.Len() != 0 {
+ as := nod(OAS2, nil, nil)
+
+ // Make a shallow copy of retvars.
+ // Otherwise OINLCALL.Rlist will be the same list,
+ // and later walk and typecheck may clobber it.
+ for _, n := range subst.retvars {
+ as.List.Append(n)
+ }
+ as.Rlist.Set(subst.list(n.List))
+
+ if subst.delayretvars {
+ for _, n := range as.List.Slice() {
+ as.Ninit.Append(nod(ODCL, n, nil))
+ n.Name.Defn = as
+ }
+ }
+
+ as = typecheck(as, ctxStmt)
+ m.Ninit.Append(as)
+ }
+
+ typecheckslice(m.Ninit.Slice(), ctxStmt)
+ m = typecheck(m, ctxStmt)
+
+ // dump("Return after substitution", m);
+ return m
+
+ case OGOTO, OLABEL:
+ m := n.copy()
+ m.Pos = subst.updatedPos(m.Pos)
+ m.Ninit.Set(nil)
+ p := fmt.Sprintf("%s·%d", n.Sym.Name, inlgen)
+ m.Sym = lookup(p)
+
+ return m
+ }
+
+ m := n.copy()
+ m.Pos = subst.updatedPos(m.Pos)
+ m.Ninit.Set(nil)
+
+ if n.Op == OCLOSURE {
+ Fatalf("cannot inline function containing closure: %+v", n)
+ }
+
+ m.Left = subst.node(n.Left)
+ m.Right = subst.node(n.Right)
+ m.List.Set(subst.list(n.List))
+ m.Rlist.Set(subst.list(n.Rlist))
+ m.Ninit.Set(append(m.Ninit.Slice(), subst.list(n.Ninit)...))
+ m.Nbody.Set(subst.list(n.Nbody))
+
+ return m
+}
+
+func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos {
+ pos := Ctxt.PosTable.Pos(xpos)
+ oldbase := pos.Base() // can be nil
+ newbase := subst.bases[oldbase]
+ if newbase == nil {
+ newbase = src.NewInliningBase(oldbase, subst.newInlIndex)
+ subst.bases[oldbase] = newbase
+ }
+ pos.SetBase(newbase)
+ return Ctxt.PosTable.XPos(pos)
+}
+
+func pruneUnusedAutos(ll []*Node, vis *hairyVisitor) []*Node {
+ s := make([]*Node, 0, len(ll))
+ for _, n := range ll {
+ if n.Class() == PAUTO {
+ if _, found := vis.usedLocals[n]; !found {
+ continue
+ }
+ }
+ s = append(s, n)
+ }
+ return s
+}
+
+// devirtualize replaces interface method calls within fn with direct
+// concrete-type method calls where applicable.
+func devirtualize(fn *Node) {
+ Curfn = fn
+ inspectList(fn.Nbody, func(n *Node) bool {
+ if n.Op == OCALLINTER {
+ devirtualizeCall(n)
+ }
+ return true
+ })
+}
+
+func devirtualizeCall(call *Node) {
+ recv := staticValue(call.Left.Left)
+ if recv.Op != OCONVIFACE {
+ return
+ }
+
+ typ := recv.Left.Type
+ if typ.IsInterface() {
+ return
+ }
+
+ x := nodl(call.Left.Pos, ODOTTYPE, call.Left.Left, nil)
+ x.Type = typ
+ x = nodlSym(call.Left.Pos, OXDOT, x, call.Left.Sym)
+ x = typecheck(x, ctxExpr|ctxCallee)
+ switch x.Op {
+ case ODOTMETH:
+ if Debug.m != 0 {
+ Warnl(call.Pos, "devirtualizing %v to %v", call.Left, typ)
+ }
+ call.Op = OCALLMETH
+ call.Left = x
+ case ODOTINTER:
+ // Promoted method from embedded interface-typed field (#42279).
+ if Debug.m != 0 {
+ Warnl(call.Pos, "partially devirtualizing %v to %v", call.Left, typ)
+ }
+ call.Op = OCALLINTER
+ call.Left = x
+ default:
+ // TODO(mdempsky): Turn back into Fatalf after more testing.
+ if Debug.m != 0 {
+ Warnl(call.Pos, "failed to devirtualize %v (%v)", x, x.Op)
+ }
+ return
+ }
+
+ // Duplicated logic from typecheck for function call return
+ // value types.
+ //
+ // Receiver parameter size may have changed; need to update
+ // call.Type to get correct stack offsets for result
+ // parameters.
+ checkwidth(x.Type)
+ switch ft := x.Type; ft.NumResults() {
+ case 0:
+ case 1:
+ call.Type = ft.Results().Field(0).Type
+ default:
+ call.Type = ft.Results()
+ }
+}
diff --git a/src/cmd/compile/internal/gc/inl_test.go b/src/cmd/compile/internal/gc/inl_test.go
new file mode 100644
index 0000000..02735e5
--- /dev/null
+++ b/src/cmd/compile/internal/gc/inl_test.go
@@ -0,0 +1,269 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bufio"
+ "internal/testenv"
+ "io"
+ "math/bits"
+ "os/exec"
+ "regexp"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+// TestIntendedInlining tests that specific runtime functions are inlined.
+// This allows refactoring for code clarity and re-use without fear that
+// changes to the compiler will cause silent performance regressions.
+func TestIntendedInlining(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" {
+ t.Skip("skipping in short mode")
+ }
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ // want is the list of function names (by package) that should
+ // be inlinable. If they have no callers in their packages, they
+ // might not actually be inlined anywhere.
+ want := map[string][]string{
+ "runtime": {
+ "add",
+ "acquirem",
+ "add1",
+ "addb",
+ "adjustpanics",
+ "adjustpointer",
+ "alignDown",
+ "alignUp",
+ "bucketMask",
+ "bucketShift",
+ "chanbuf",
+ "deferArgs",
+ "deferclass",
+ "evacuated",
+ "fastlog2",
+ "fastrand",
+ "float64bits",
+ "funcPC",
+ "getArgInfoFast",
+ "getm",
+ "getMCache",
+ "isDirectIface",
+ "itabHashFunc",
+ "noescape",
+ "pcvalueCacheKey",
+ "readUnaligned32",
+ "readUnaligned64",
+ "releasem",
+ "roundupsize",
+ "stackmapdata",
+ "stringStructOf",
+ "subtract1",
+ "subtractb",
+ "tophash",
+ "totaldefersize",
+ "(*bmap).keys",
+ "(*bmap).overflow",
+ "(*waitq).enqueue",
+
+ // GC-related ones
+ "cgoInRange",
+ "gclinkptr.ptr",
+ "guintptr.ptr",
+ "heapBits.bits",
+ "heapBits.isPointer",
+ "heapBits.morePointers",
+ "heapBits.next",
+ "heapBitsForAddr",
+ "markBits.isMarked",
+ "muintptr.ptr",
+ "puintptr.ptr",
+ "spanOf",
+ "spanOfUnchecked",
+ "(*gcWork).putFast",
+ "(*gcWork).tryGetFast",
+ "(*guintptr).set",
+ "(*markBits).advance",
+ "(*mspan).allocBitsForIndex",
+ "(*mspan).base",
+ "(*mspan).markBitsForBase",
+ "(*mspan).markBitsForIndex",
+ "(*muintptr).set",
+ "(*puintptr).set",
+ },
+ "runtime/internal/sys": {},
+ "runtime/internal/math": {
+ "MulUintptr",
+ },
+ "bytes": {
+ "(*Buffer).Bytes",
+ "(*Buffer).Cap",
+ "(*Buffer).Len",
+ "(*Buffer).Grow",
+ "(*Buffer).Next",
+ "(*Buffer).Read",
+ "(*Buffer).ReadByte",
+ "(*Buffer).Reset",
+ "(*Buffer).String",
+ "(*Buffer).UnreadByte",
+ "(*Buffer).tryGrowByReslice",
+ },
+ "compress/flate": {
+ "byLiteral.Len",
+ "byLiteral.Less",
+ "byLiteral.Swap",
+ "(*dictDecoder).tryWriteCopy",
+ },
+ "encoding/base64": {
+ "assemble32",
+ "assemble64",
+ },
+ "unicode/utf8": {
+ "FullRune",
+ "FullRuneInString",
+ "RuneLen",
+ "ValidRune",
+ },
+ "reflect": {
+ "Value.CanAddr",
+ "Value.CanSet",
+ "Value.CanInterface",
+ "Value.IsValid",
+ "Value.pointer",
+ "add",
+ "align",
+ "flag.mustBe",
+ "flag.mustBeAssignable",
+ "flag.mustBeExported",
+ "flag.kind",
+ "flag.ro",
+ },
+ "regexp": {
+ "(*bitState).push",
+ },
+ "math/big": {
+ "bigEndianWord",
+ // The following functions require the math_big_pure_go build tag.
+ "addVW",
+ "subVW",
+ },
+ "math/rand": {
+ "(*rngSource).Int63",
+ "(*rngSource).Uint64",
+ },
+ }
+
+ if runtime.GOARCH != "386" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" {
+ // nextFreeFast calls sys.Ctz64, which on 386 is implemented in asm and is not inlinable.
+ // We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386.
+ // On mips64x and riscv64, Ctz64 is not intrinsified and causes nextFreeFast too expensive
+ // to inline (Issue 22239).
+ want["runtime"] = append(want["runtime"], "nextFreeFast")
+ }
+ if runtime.GOARCH != "386" {
+ // As explained above, Ctz64 and Ctz32 are not Go code on 386.
+ // The same applies to Bswap32.
+ want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Ctz64")
+ want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Ctz32")
+ want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Bswap32")
+ }
+ if bits.UintSize == 64 {
+ // rotl_31 is only defined on 64-bit architectures
+ want["runtime"] = append(want["runtime"], "rotl_31")
+ }
+
+ switch runtime.GOARCH {
+ case "386", "wasm", "arm":
+ default:
+ // TODO(mvdan): As explained in /test/inline_sync.go, some
+ // architectures don't have atomic intrinsics, so these go over
+ // the inlining budget. Move back to the main table once that
+ // problem is solved.
+ want["sync"] = []string{
+ "(*Mutex).Lock",
+ "(*Mutex).Unlock",
+ "(*RWMutex).RLock",
+ "(*RWMutex).RUnlock",
+ "(*Once).Do",
+ }
+ }
+
+ // Functions that must actually be inlined; they must have actual callers.
+ must := map[string]bool{
+ "compress/flate.byLiteral.Len": true,
+ "compress/flate.byLiteral.Less": true,
+ "compress/flate.byLiteral.Swap": true,
+ }
+
+ notInlinedReason := make(map[string]string)
+ pkgs := make([]string, 0, len(want))
+ for pname, fnames := range want {
+ pkgs = append(pkgs, pname)
+ for _, fname := range fnames {
+ fullName := pname + "." + fname
+ if _, ok := notInlinedReason[fullName]; ok {
+ t.Errorf("duplicate func: %s", fullName)
+ }
+ notInlinedReason[fullName] = "unknown reason"
+ }
+ }
+
+ args := append([]string{"build", "-a", "-gcflags=all=-m -m", "-tags=math_big_pure_go"}, pkgs...)
+ cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), args...))
+ pr, pw := io.Pipe()
+ cmd.Stdout = pw
+ cmd.Stderr = pw
+ cmdErr := make(chan error, 1)
+ go func() {
+ cmdErr <- cmd.Run()
+ pw.Close()
+ }()
+ scanner := bufio.NewScanner(pr)
+ curPkg := ""
+ canInline := regexp.MustCompile(`: can inline ([^ ]*)`)
+ haveInlined := regexp.MustCompile(`: inlining call to ([^ ]*)`)
+ cannotInline := regexp.MustCompile(`: cannot inline ([^ ]*): (.*)`)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "# ") {
+ curPkg = line[2:]
+ continue
+ }
+ if m := haveInlined.FindStringSubmatch(line); m != nil {
+ fname := m[1]
+ delete(notInlinedReason, curPkg+"."+fname)
+ continue
+ }
+ if m := canInline.FindStringSubmatch(line); m != nil {
+ fname := m[1]
+ fullname := curPkg + "." + fname
+ // If function must be inlined somewhere, being inlinable is not enough
+ if _, ok := must[fullname]; !ok {
+ delete(notInlinedReason, fullname)
+ continue
+ }
+ }
+ if m := cannotInline.FindStringSubmatch(line); m != nil {
+ fname, reason := m[1], m[2]
+ fullName := curPkg + "." + fname
+ if _, ok := notInlinedReason[fullName]; ok {
+ // cmd/compile gave us a reason why
+ notInlinedReason[fullName] = reason
+ }
+ continue
+ }
+ }
+ if err := <-cmdErr; err != nil {
+ t.Fatal(err)
+ }
+ if err := scanner.Err(); err != nil {
+ t.Fatal(err)
+ }
+ for fullName, reason := range notInlinedReason {
+ t.Errorf("%s was not inlined: %s", fullName, reason)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/lang_test.go b/src/cmd/compile/internal/gc/lang_test.go
new file mode 100644
index 0000000..72e7f07
--- /dev/null
+++ b/src/cmd/compile/internal/gc/lang_test.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+)
+
+const aliasSrc = `
+package x
+
+type T = int
+`
+
+func TestInvalidLang(t *testing.T) {
+ t.Parallel()
+
+ testenv.MustHaveGoBuild(t)
+
+ dir, err := ioutil.TempDir("", "TestInvalidLang")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ src := filepath.Join(dir, "alias.go")
+ if err := ioutil.WriteFile(src, []byte(aliasSrc), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ outfile := filepath.Join(dir, "alias.o")
+
+ if testLang(t, "go9.99", src, outfile) == nil {
+ t.Error("compilation with -lang=go9.99 succeeded unexpectedly")
+ }
+
+ // This test will have to be adjusted if we ever reach 1.99 or 2.0.
+ if testLang(t, "go1.99", src, outfile) == nil {
+ t.Error("compilation with -lang=go1.99 succeeded unexpectedly")
+ }
+
+ if testLang(t, "go1.8", src, outfile) == nil {
+ t.Error("compilation with -lang=go1.8 succeeded unexpectedly")
+ }
+
+ if err := testLang(t, "go1.9", src, outfile); err != nil {
+ t.Errorf("compilation with -lang=go1.9 failed unexpectedly: %v", err)
+ }
+}
+
+func testLang(t *testing.T, lang, src, outfile string) error {
+ run := []string{testenv.GoToolPath(t), "tool", "compile", "-lang", lang, "-o", outfile, src}
+ t.Log(run)
+ out, err := exec.Command(run[0], run[1:]...).CombinedOutput()
+ t.Logf("%s", out)
+ return err
+}
diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go
new file mode 100644
index 0000000..7cce371
--- /dev/null
+++ b/src/cmd/compile/internal/gc/lex.go
@@ -0,0 +1,224 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/syntax"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "fmt"
+ "strings"
+)
+
+// lineno is the source position at the start of the most recently lexed token.
+// TODO(gri) rename and eventually remove
+var lineno src.XPos
+
+func makePos(base *src.PosBase, line, col uint) src.XPos {
+ return Ctxt.PosTable.XPos(src.MakePos(base, line, col))
+}
+
+func isSpace(c rune) bool {
+ return c == ' ' || c == '\t' || c == '\n' || c == '\r'
+}
+
+func isQuoted(s string) bool {
+ return len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"'
+}
+
+type PragmaFlag int16
+
+const (
+ // Func pragmas.
+ Nointerface PragmaFlag = 1 << iota
+ Noescape // func parameters don't escape
+ Norace // func must not have race detector annotations
+ Nosplit // func should not execute on separate stack
+ Noinline // func should not be inlined
+ NoCheckPtr // func should not be instrumented by checkptr
+ CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
+ UintptrEscapes // pointers converted to uintptr escape
+
+ // Runtime-only func pragmas.
+ // See ../../../../runtime/README.md for detailed descriptions.
+ Systemstack // func must run on system stack
+ Nowritebarrier // emit compiler error instead of write barrier
+ Nowritebarrierrec // error on write barrier in this or recursive callees
+ Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
+
+ // Runtime and cgo type pragmas
+ NotInHeap // values of this type must not be heap allocated
+
+ // Go command pragmas
+ GoBuildPragma
+)
+
+const (
+ FuncPragmas = Nointerface |
+ Noescape |
+ Norace |
+ Nosplit |
+ Noinline |
+ NoCheckPtr |
+ CgoUnsafeArgs |
+ UintptrEscapes |
+ Systemstack |
+ Nowritebarrier |
+ Nowritebarrierrec |
+ Yeswritebarrierrec
+
+ TypePragmas = NotInHeap
+)
+
+func pragmaFlag(verb string) PragmaFlag {
+ switch verb {
+ case "go:build":
+ return GoBuildPragma
+ case "go:nointerface":
+ if objabi.Fieldtrack_enabled != 0 {
+ return Nointerface
+ }
+ case "go:noescape":
+ return Noescape
+ case "go:norace":
+ return Norace
+ case "go:nosplit":
+ return Nosplit | NoCheckPtr // implies NoCheckPtr (see #34972)
+ case "go:noinline":
+ return Noinline
+ case "go:nocheckptr":
+ return NoCheckPtr
+ case "go:systemstack":
+ return Systemstack
+ case "go:nowritebarrier":
+ return Nowritebarrier
+ case "go:nowritebarrierrec":
+ return Nowritebarrierrec | Nowritebarrier // implies Nowritebarrier
+ case "go:yeswritebarrierrec":
+ return Yeswritebarrierrec
+ case "go:cgo_unsafe_args":
+ return CgoUnsafeArgs | NoCheckPtr // implies NoCheckPtr (see #34968)
+ case "go:uintptrescapes":
+ // For the next function declared in the file
+ // any uintptr arguments may be pointer values
+ // converted to uintptr. This directive
+ // ensures that the referenced allocated
+ // object, if any, is retained and not moved
+ // until the call completes, even though from
+ // the types alone it would appear that the
+ // object is no longer needed during the
+ // call. The conversion to uintptr must appear
+ // in the argument list.
+ // Used in syscall/dll_windows.go.
+ return UintptrEscapes
+ case "go:notinheap":
+ return NotInHeap
+ }
+ return 0
+}
+
+// pragcgo is called concurrently if files are parsed concurrently.
+func (p *noder) pragcgo(pos syntax.Pos, text string) {
+ f := pragmaFields(text)
+
+ verb := strings.TrimPrefix(f[0], "go:")
+ f[0] = verb
+
+ switch verb {
+ case "cgo_export_static", "cgo_export_dynamic":
+ switch {
+ case len(f) == 2 && !isQuoted(f[1]):
+ case len(f) == 3 && !isQuoted(f[1]) && !isQuoted(f[2]):
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf(`usage: //go:%s local [remote]`, verb)})
+ return
+ }
+ case "cgo_import_dynamic":
+ switch {
+ case len(f) == 2 && !isQuoted(f[1]):
+ case len(f) == 3 && !isQuoted(f[1]) && !isQuoted(f[2]):
+ case len(f) == 4 && !isQuoted(f[1]) && !isQuoted(f[2]) && isQuoted(f[3]):
+ f[3] = strings.Trim(f[3], `"`)
+ if objabi.GOOS == "aix" && f[3] != "" {
+ // On Aix, library pattern must be "lib.a/object.o"
+ // or "lib.a/libname.so.X"
+ n := strings.Split(f[3], "/")
+ if len(n) != 2 || !strings.HasSuffix(n[0], ".a") || (!strings.HasSuffix(n[1], ".o") && !strings.Contains(n[1], ".so.")) {
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_import_dynamic local [remote ["lib.a/object.o"]]`})
+ return
+ }
+ }
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_import_dynamic local [remote ["library"]]`})
+ return
+ }
+ case "cgo_import_static":
+ switch {
+ case len(f) == 2 && !isQuoted(f[1]):
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_import_static local`})
+ return
+ }
+ case "cgo_dynamic_linker":
+ switch {
+ case len(f) == 2 && isQuoted(f[1]):
+ f[1] = strings.Trim(f[1], `"`)
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_dynamic_linker "path"`})
+ return
+ }
+ case "cgo_ldflag":
+ switch {
+ case len(f) == 2 && isQuoted(f[1]):
+ f[1] = strings.Trim(f[1], `"`)
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_ldflag "arg"`})
+ return
+ }
+ default:
+ return
+ }
+ p.pragcgobuf = append(p.pragcgobuf, f)
+}
+
+// pragmaFields is similar to strings.FieldsFunc(s, isSpace)
+// but does not split when inside double quoted regions and always
+// splits before the start and after the end of a double quoted region.
+// pragmaFields does not recognize escaped quotes. If a quote in s is not
+// closed the part after the opening quote will not be returned as a field.
+func pragmaFields(s string) []string {
+ var a []string
+ inQuote := false
+ fieldStart := -1 // Set to -1 when looking for start of field.
+ for i, c := range s {
+ switch {
+ case c == '"':
+ if inQuote {
+ inQuote = false
+ a = append(a, s[fieldStart:i+1])
+ fieldStart = -1
+ } else {
+ inQuote = true
+ if fieldStart >= 0 {
+ a = append(a, s[fieldStart:i])
+ }
+ fieldStart = i
+ }
+ case !inQuote && isSpace(c):
+ if fieldStart >= 0 {
+ a = append(a, s[fieldStart:i])
+ fieldStart = -1
+ }
+ default:
+ if fieldStart == -1 {
+ fieldStart = i
+ }
+ }
+ }
+ if !inQuote && fieldStart >= 0 { // Last field might end at the end of the string.
+ a = append(a, s[fieldStart:])
+ }
+ return a
+}
diff --git a/src/cmd/compile/internal/gc/lex_test.go b/src/cmd/compile/internal/gc/lex_test.go
new file mode 100644
index 0000000..b2081a1
--- /dev/null
+++ b/src/cmd/compile/internal/gc/lex_test.go
@@ -0,0 +1,121 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/syntax"
+ "reflect"
+ "runtime"
+ "testing"
+)
+
+func eq(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func TestPragmaFields(t *testing.T) {
+ var tests = []struct {
+ in string
+ want []string
+ }{
+ {"", []string{}},
+ {" \t ", []string{}},
+ {`""""`, []string{`""`, `""`}},
+ {" a'b'c ", []string{"a'b'c"}},
+ {"1 2 3 4", []string{"1", "2", "3", "4"}},
+ {"\n☺\t☹\n", []string{"☺", "☹"}},
+ {`"1 2 " 3 " 4 5"`, []string{`"1 2 "`, `3`, `" 4 5"`}},
+ {`"1""2 3""4"`, []string{`"1"`, `"2 3"`, `"4"`}},
+ {`12"34"`, []string{`12`, `"34"`}},
+ {`12"34 `, []string{`12`}},
+ }
+
+ for _, tt := range tests {
+ got := pragmaFields(tt.in)
+ if !eq(got, tt.want) {
+ t.Errorf("pragmaFields(%q) = %v; want %v", tt.in, got, tt.want)
+ continue
+ }
+ }
+}
+
+func TestPragcgo(t *testing.T) {
+ type testStruct struct {
+ in string
+ want []string
+ }
+
+ var tests = []testStruct{
+ {`go:cgo_export_dynamic local`, []string{`cgo_export_dynamic`, `local`}},
+ {`go:cgo_export_dynamic local remote`, []string{`cgo_export_dynamic`, `local`, `remote`}},
+ {`go:cgo_export_dynamic local' remote'`, []string{`cgo_export_dynamic`, `local'`, `remote'`}},
+ {`go:cgo_export_static local`, []string{`cgo_export_static`, `local`}},
+ {`go:cgo_export_static local remote`, []string{`cgo_export_static`, `local`, `remote`}},
+ {`go:cgo_export_static local' remote'`, []string{`cgo_export_static`, `local'`, `remote'`}},
+ {`go:cgo_import_dynamic local`, []string{`cgo_import_dynamic`, `local`}},
+ {`go:cgo_import_dynamic local remote`, []string{`cgo_import_dynamic`, `local`, `remote`}},
+ {`go:cgo_import_static local`, []string{`cgo_import_static`, `local`}},
+ {`go:cgo_import_static local'`, []string{`cgo_import_static`, `local'`}},
+ {`go:cgo_dynamic_linker "/path/"`, []string{`cgo_dynamic_linker`, `/path/`}},
+ {`go:cgo_dynamic_linker "/p ath/"`, []string{`cgo_dynamic_linker`, `/p ath/`}},
+ {`go:cgo_ldflag "arg"`, []string{`cgo_ldflag`, `arg`}},
+ {`go:cgo_ldflag "a rg"`, []string{`cgo_ldflag`, `a rg`}},
+ }
+
+ if runtime.GOOS != "aix" {
+ tests = append(tests, []testStruct{
+ {`go:cgo_import_dynamic local remote "library"`, []string{`cgo_import_dynamic`, `local`, `remote`, `library`}},
+ {`go:cgo_import_dynamic local' remote' "lib rary"`, []string{`cgo_import_dynamic`, `local'`, `remote'`, `lib rary`}},
+ }...)
+ } else {
+ // cgo_import_dynamic with a library is slightly different on AIX
+ // as the library field must follow the pattern [libc.a/object.o].
+ tests = append(tests, []testStruct{
+ {`go:cgo_import_dynamic local remote "lib.a/obj.o"`, []string{`cgo_import_dynamic`, `local`, `remote`, `lib.a/obj.o`}},
+ // This test must fail.
+ {`go:cgo_import_dynamic local' remote' "library"`, []string{`<unknown position>: usage: //go:cgo_import_dynamic local [remote ["lib.a/object.o"]]`}},
+ }...)
+
+ }
+
+ var p noder
+ var nopos syntax.Pos
+ for _, tt := range tests {
+
+ p.err = make(chan syntax.Error)
+ gotch := make(chan [][]string, 1)
+ go func() {
+ p.pragcgobuf = nil
+ p.pragcgo(nopos, tt.in)
+ if p.pragcgobuf != nil {
+ gotch <- p.pragcgobuf
+ }
+ }()
+
+ select {
+ case e := <-p.err:
+ want := tt.want[0]
+ if e.Error() != want {
+ t.Errorf("pragcgo(%q) = %q; want %q", tt.in, e, want)
+ continue
+ }
+ case got := <-gotch:
+ want := [][]string{tt.want}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("pragcgo(%q) = %q; want %q", tt.in, got, want)
+ continue
+ }
+ }
+
+ }
+}
diff --git a/src/cmd/compile/internal/gc/logic_test.go b/src/cmd/compile/internal/gc/logic_test.go
new file mode 100644
index 0000000..78d2dd2
--- /dev/null
+++ b/src/cmd/compile/internal/gc/logic_test.go
@@ -0,0 +1,289 @@
+package gc
+
+import "testing"
+
+// Tests to make sure logic simplification rules are correct.
+
+func TestLogic64(t *testing.T) {
+ // test values to determine function equality
+ values := [...]int64{-1 << 63, 1<<63 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
+
+ // golden functions we use repeatedly
+ zero := func(x int64) int64 { return 0 }
+ id := func(x int64) int64 { return x }
+ or := func(x, y int64) int64 { return x | y }
+ and := func(x, y int64) int64 { return x & y }
+ y := func(x, y int64) int64 { return y }
+
+ for _, test := range [...]struct {
+ name string
+ f func(int64) int64
+ golden func(int64) int64
+ }{
+ {"x|x", func(x int64) int64 { return x | x }, id},
+ {"x|0", func(x int64) int64 { return x | 0 }, id},
+ {"x|-1", func(x int64) int64 { return x | -1 }, func(x int64) int64 { return -1 }},
+ {"x&x", func(x int64) int64 { return x & x }, id},
+ {"x&0", func(x int64) int64 { return x & 0 }, zero},
+ {"x&-1", func(x int64) int64 { return x & -1 }, id},
+ {"x^x", func(x int64) int64 { return x ^ x }, zero},
+ {"x^0", func(x int64) int64 { return x ^ 0 }, id},
+ {"x^-1", func(x int64) int64 { return x ^ -1 }, func(x int64) int64 { return ^x }},
+ {"x+0", func(x int64) int64 { return x + 0 }, id},
+ {"x-x", func(x int64) int64 { return x - x }, zero},
+ {"x*0", func(x int64) int64 { return x * 0 }, zero},
+ {"^^x", func(x int64) int64 { return ^^x }, id},
+ } {
+ for _, v := range values {
+ got := test.f(v)
+ want := test.golden(v)
+ if want != got {
+ t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
+ }
+ }
+ }
+ for _, test := range [...]struct {
+ name string
+ f func(int64, int64) int64
+ golden func(int64, int64) int64
+ }{
+ {"x|(x|y)", func(x, y int64) int64 { return x | (x | y) }, or},
+ {"x|(y|x)", func(x, y int64) int64 { return x | (y | x) }, or},
+ {"(x|y)|x", func(x, y int64) int64 { return (x | y) | x }, or},
+ {"(y|x)|x", func(x, y int64) int64 { return (y | x) | x }, or},
+ {"x&(x&y)", func(x, y int64) int64 { return x & (x & y) }, and},
+ {"x&(y&x)", func(x, y int64) int64 { return x & (y & x) }, and},
+ {"(x&y)&x", func(x, y int64) int64 { return (x & y) & x }, and},
+ {"(y&x)&x", func(x, y int64) int64 { return (y & x) & x }, and},
+ {"x^(x^y)", func(x, y int64) int64 { return x ^ (x ^ y) }, y},
+ {"x^(y^x)", func(x, y int64) int64 { return x ^ (y ^ x) }, y},
+ {"(x^y)^x", func(x, y int64) int64 { return (x ^ y) ^ x }, y},
+ {"(y^x)^x", func(x, y int64) int64 { return (y ^ x) ^ x }, y},
+ {"-(y-x)", func(x, y int64) int64 { return -(y - x) }, func(x, y int64) int64 { return x - y }},
+ {"(x+y)-x", func(x, y int64) int64 { return (x + y) - x }, y},
+ {"(y+x)-x", func(x, y int64) int64 { return (y + x) - x }, y},
+ } {
+ for _, v := range values {
+ for _, w := range values {
+ got := test.f(v, w)
+ want := test.golden(v, w)
+ if want != got {
+ t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
+ }
+ }
+ }
+ }
+}
+
+func TestLogic32(t *testing.T) {
+ // test values to determine function equality
+ values := [...]int32{-1 << 31, 1<<31 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
+
+ // golden functions we use repeatedly
+ zero := func(x int32) int32 { return 0 }
+ id := func(x int32) int32 { return x }
+ or := func(x, y int32) int32 { return x | y }
+ and := func(x, y int32) int32 { return x & y }
+ y := func(x, y int32) int32 { return y }
+
+ for _, test := range [...]struct {
+ name string
+ f func(int32) int32
+ golden func(int32) int32
+ }{
+ {"x|x", func(x int32) int32 { return x | x }, id},
+ {"x|0", func(x int32) int32 { return x | 0 }, id},
+ {"x|-1", func(x int32) int32 { return x | -1 }, func(x int32) int32 { return -1 }},
+ {"x&x", func(x int32) int32 { return x & x }, id},
+ {"x&0", func(x int32) int32 { return x & 0 }, zero},
+ {"x&-1", func(x int32) int32 { return x & -1 }, id},
+ {"x^x", func(x int32) int32 { return x ^ x }, zero},
+ {"x^0", func(x int32) int32 { return x ^ 0 }, id},
+ {"x^-1", func(x int32) int32 { return x ^ -1 }, func(x int32) int32 { return ^x }},
+ {"x+0", func(x int32) int32 { return x + 0 }, id},
+ {"x-x", func(x int32) int32 { return x - x }, zero},
+ {"x*0", func(x int32) int32 { return x * 0 }, zero},
+ {"^^x", func(x int32) int32 { return ^^x }, id},
+ } {
+ for _, v := range values {
+ got := test.f(v)
+ want := test.golden(v)
+ if want != got {
+ t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
+ }
+ }
+ }
+ for _, test := range [...]struct {
+ name string
+ f func(int32, int32) int32
+ golden func(int32, int32) int32
+ }{
+ {"x|(x|y)", func(x, y int32) int32 { return x | (x | y) }, or},
+ {"x|(y|x)", func(x, y int32) int32 { return x | (y | x) }, or},
+ {"(x|y)|x", func(x, y int32) int32 { return (x | y) | x }, or},
+ {"(y|x)|x", func(x, y int32) int32 { return (y | x) | x }, or},
+ {"x&(x&y)", func(x, y int32) int32 { return x & (x & y) }, and},
+ {"x&(y&x)", func(x, y int32) int32 { return x & (y & x) }, and},
+ {"(x&y)&x", func(x, y int32) int32 { return (x & y) & x }, and},
+ {"(y&x)&x", func(x, y int32) int32 { return (y & x) & x }, and},
+ {"x^(x^y)", func(x, y int32) int32 { return x ^ (x ^ y) }, y},
+ {"x^(y^x)", func(x, y int32) int32 { return x ^ (y ^ x) }, y},
+ {"(x^y)^x", func(x, y int32) int32 { return (x ^ y) ^ x }, y},
+ {"(y^x)^x", func(x, y int32) int32 { return (y ^ x) ^ x }, y},
+ {"-(y-x)", func(x, y int32) int32 { return -(y - x) }, func(x, y int32) int32 { return x - y }},
+ {"(x+y)-x", func(x, y int32) int32 { return (x + y) - x }, y},
+ {"(y+x)-x", func(x, y int32) int32 { return (y + x) - x }, y},
+ } {
+ for _, v := range values {
+ for _, w := range values {
+ got := test.f(v, w)
+ want := test.golden(v, w)
+ if want != got {
+ t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
+ }
+ }
+ }
+ }
+}
+
+func TestLogic16(t *testing.T) {
+ // test values to determine function equality
+ values := [...]int16{-1 << 15, 1<<15 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
+
+ // golden functions we use repeatedly
+ zero := func(x int16) int16 { return 0 }
+ id := func(x int16) int16 { return x }
+ or := func(x, y int16) int16 { return x | y }
+ and := func(x, y int16) int16 { return x & y }
+ y := func(x, y int16) int16 { return y }
+
+ for _, test := range [...]struct {
+ name string
+ f func(int16) int16
+ golden func(int16) int16
+ }{
+ {"x|x", func(x int16) int16 { return x | x }, id},
+ {"x|0", func(x int16) int16 { return x | 0 }, id},
+ {"x|-1", func(x int16) int16 { return x | -1 }, func(x int16) int16 { return -1 }},
+ {"x&x", func(x int16) int16 { return x & x }, id},
+ {"x&0", func(x int16) int16 { return x & 0 }, zero},
+ {"x&-1", func(x int16) int16 { return x & -1 }, id},
+ {"x^x", func(x int16) int16 { return x ^ x }, zero},
+ {"x^0", func(x int16) int16 { return x ^ 0 }, id},
+ {"x^-1", func(x int16) int16 { return x ^ -1 }, func(x int16) int16 { return ^x }},
+ {"x+0", func(x int16) int16 { return x + 0 }, id},
+ {"x-x", func(x int16) int16 { return x - x }, zero},
+ {"x*0", func(x int16) int16 { return x * 0 }, zero},
+ {"^^x", func(x int16) int16 { return ^^x }, id},
+ } {
+ for _, v := range values {
+ got := test.f(v)
+ want := test.golden(v)
+ if want != got {
+ t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
+ }
+ }
+ }
+ for _, test := range [...]struct {
+ name string
+ f func(int16, int16) int16
+ golden func(int16, int16) int16
+ }{
+ {"x|(x|y)", func(x, y int16) int16 { return x | (x | y) }, or},
+ {"x|(y|x)", func(x, y int16) int16 { return x | (y | x) }, or},
+ {"(x|y)|x", func(x, y int16) int16 { return (x | y) | x }, or},
+ {"(y|x)|x", func(x, y int16) int16 { return (y | x) | x }, or},
+ {"x&(x&y)", func(x, y int16) int16 { return x & (x & y) }, and},
+ {"x&(y&x)", func(x, y int16) int16 { return x & (y & x) }, and},
+ {"(x&y)&x", func(x, y int16) int16 { return (x & y) & x }, and},
+ {"(y&x)&x", func(x, y int16) int16 { return (y & x) & x }, and},
+ {"x^(x^y)", func(x, y int16) int16 { return x ^ (x ^ y) }, y},
+ {"x^(y^x)", func(x, y int16) int16 { return x ^ (y ^ x) }, y},
+ {"(x^y)^x", func(x, y int16) int16 { return (x ^ y) ^ x }, y},
+ {"(y^x)^x", func(x, y int16) int16 { return (y ^ x) ^ x }, y},
+ {"-(y-x)", func(x, y int16) int16 { return -(y - x) }, func(x, y int16) int16 { return x - y }},
+ {"(x+y)-x", func(x, y int16) int16 { return (x + y) - x }, y},
+ {"(y+x)-x", func(x, y int16) int16 { return (y + x) - x }, y},
+ } {
+ for _, v := range values {
+ for _, w := range values {
+ got := test.f(v, w)
+ want := test.golden(v, w)
+ if want != got {
+ t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
+ }
+ }
+ }
+ }
+}
+
+func TestLogic8(t *testing.T) {
+ // test values to determine function equality
+ values := [...]int8{-1 << 7, 1<<7 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
+
+ // golden functions we use repeatedly
+ zero := func(x int8) int8 { return 0 }
+ id := func(x int8) int8 { return x }
+ or := func(x, y int8) int8 { return x | y }
+ and := func(x, y int8) int8 { return x & y }
+ y := func(x, y int8) int8 { return y }
+
+ for _, test := range [...]struct {
+ name string
+ f func(int8) int8
+ golden func(int8) int8
+ }{
+ {"x|x", func(x int8) int8 { return x | x }, id},
+ {"x|0", func(x int8) int8 { return x | 0 }, id},
+ {"x|-1", func(x int8) int8 { return x | -1 }, func(x int8) int8 { return -1 }},
+ {"x&x", func(x int8) int8 { return x & x }, id},
+ {"x&0", func(x int8) int8 { return x & 0 }, zero},
+ {"x&-1", func(x int8) int8 { return x & -1 }, id},
+ {"x^x", func(x int8) int8 { return x ^ x }, zero},
+ {"x^0", func(x int8) int8 { return x ^ 0 }, id},
+ {"x^-1", func(x int8) int8 { return x ^ -1 }, func(x int8) int8 { return ^x }},
+ {"x+0", func(x int8) int8 { return x + 0 }, id},
+ {"x-x", func(x int8) int8 { return x - x }, zero},
+ {"x*0", func(x int8) int8 { return x * 0 }, zero},
+ {"^^x", func(x int8) int8 { return ^^x }, id},
+ } {
+ for _, v := range values {
+ got := test.f(v)
+ want := test.golden(v)
+ if want != got {
+ t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
+ }
+ }
+ }
+ for _, test := range [...]struct {
+ name string
+ f func(int8, int8) int8
+ golden func(int8, int8) int8
+ }{
+ {"x|(x|y)", func(x, y int8) int8 { return x | (x | y) }, or},
+ {"x|(y|x)", func(x, y int8) int8 { return x | (y | x) }, or},
+ {"(x|y)|x", func(x, y int8) int8 { return (x | y) | x }, or},
+ {"(y|x)|x", func(x, y int8) int8 { return (y | x) | x }, or},
+ {"x&(x&y)", func(x, y int8) int8 { return x & (x & y) }, and},
+ {"x&(y&x)", func(x, y int8) int8 { return x & (y & x) }, and},
+ {"(x&y)&x", func(x, y int8) int8 { return (x & y) & x }, and},
+ {"(y&x)&x", func(x, y int8) int8 { return (y & x) & x }, and},
+ {"x^(x^y)", func(x, y int8) int8 { return x ^ (x ^ y) }, y},
+ {"x^(y^x)", func(x, y int8) int8 { return x ^ (y ^ x) }, y},
+ {"(x^y)^x", func(x, y int8) int8 { return (x ^ y) ^ x }, y},
+ {"(y^x)^x", func(x, y int8) int8 { return (y ^ x) ^ x }, y},
+ {"-(y-x)", func(x, y int8) int8 { return -(y - x) }, func(x, y int8) int8 { return x - y }},
+ {"(x+y)-x", func(x, y int8) int8 { return (x + y) - x }, y},
+ {"(y+x)-x", func(x, y int8) int8 { return (y + x) - x }, y},
+ } {
+ for _, v := range values {
+ for _, w := range values {
+ got := test.f(v, w)
+ want := test.golden(v, w)
+ if want != got {
+ t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
new file mode 100644
index 0000000..a6963a3
--- /dev/null
+++ b/src/cmd/compile/internal/gc/main.go
@@ -0,0 +1,1610 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run mkbuiltin.go
+
+package gc
+
+import (
+ "bufio"
+ "bytes"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/bio"
+ "cmd/internal/dwarf"
+ "cmd/internal/goobj"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+ "flag"
+ "fmt"
+ "internal/goversion"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+ "regexp"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var (
+ buildid string
+ spectre string
+ spectreIndex bool
+)
+
+var (
+ Debug_append int
+ Debug_checkptr int
+ Debug_closure int
+ Debug_compilelater int
+ debug_dclstack int
+ Debug_dumpptrs int
+ Debug_libfuzzer int
+ Debug_panic int
+ Debug_slice int
+ Debug_vlog bool
+ Debug_wb int
+ Debug_pctab string
+ Debug_locationlist int
+ Debug_typecheckinl int
+ Debug_gendwarfinl int
+ Debug_softfloat int
+ Debug_defer int
+)
+
+// Debug arguments.
+// These can be specified with the -d flag, as in "-d nil"
+// to set the debug_checknil variable.
+// Multiple options can be comma-separated.
+// Each option accepts an optional argument, as in "gcprog=2"
+var debugtab = []struct {
+ name string
+ help string
+ val interface{} // must be *int or *string
+}{
+ {"append", "print information about append compilation", &Debug_append},
+ {"checkptr", "instrument unsafe pointer conversions", &Debug_checkptr},
+ {"closure", "print information about closure compilation", &Debug_closure},
+ {"compilelater", "compile functions as late as possible", &Debug_compilelater},
+ {"disablenil", "disable nil checks", &disable_checknil},
+ {"dclstack", "run internal dclstack check", &debug_dclstack},
+ {"dumpptrs", "show Node pointer values in Dump/dumplist output", &Debug_dumpptrs},
+ {"gcprog", "print dump of GC programs", &Debug_gcprog},
+ {"libfuzzer", "coverage instrumentation for libfuzzer", &Debug_libfuzzer},
+ {"nil", "print information about nil checks", &Debug_checknil},
+ {"panic", "do not hide any compiler panic", &Debug_panic},
+ {"slice", "print information about slice compilation", &Debug_slice},
+ {"typeassert", "print information about type assertion inlining", &Debug_typeassert},
+ {"wb", "print information about write barriers", &Debug_wb},
+ {"export", "print export data", &Debug_export},
+ {"pctab", "print named pc-value table", &Debug_pctab},
+ {"locationlists", "print information about DWARF location list creation", &Debug_locationlist},
+ {"typecheckinl", "eager typechecking of inline function bodies", &Debug_typecheckinl},
+ {"dwarfinl", "print information about DWARF inlined function creation", &Debug_gendwarfinl},
+ {"softfloat", "force compiler to emit soft-float code", &Debug_softfloat},
+ {"defer", "print information about defer compilation", &Debug_defer},
+ {"fieldtrack", "enable fieldtracking", &objabi.Fieldtrack_enabled},
+}
+
+const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
+
+<key> is one of:
+
+`
+
+const debugHelpFooter = `
+<value> is key-specific.
+
+Key "checkptr" supports values:
+ "0": instrumentation disabled
+ "1": conversions involving unsafe.Pointer are instrumented
+ "2": conversions to unsafe.Pointer force heap allocation
+
+Key "pctab" supports values:
+ "pctospadj", "pctofile", "pctoline", "pctoinline", "pctopcdata"
+`
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
+ objabi.Flagprint(os.Stderr)
+ Exit(2)
+}
+
+func hidePanic() {
+ if Debug_panic == 0 && nsavederrors+nerrors > 0 {
+ // If we've already complained about things
+ // in the program, don't bother complaining
+ // about a panic too; let the user clean up
+ // the code and try again.
+ if err := recover(); err != nil {
+ errorexit()
+ }
+ }
+}
+
+// supportsDynlink reports whether or not the code generator for the given
+// architecture supports the -shared and -dynlink flags.
+func supportsDynlink(arch *sys.Arch) bool {
+ return arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X)
+}
+
+// timing data for compiler phases
+var timings Timings
+var benchfile string
+
+var nowritebarrierrecCheck *nowritebarrierrecChecker
+
+// Main parses flags and Go source files specified in the command-line
+// arguments, type-checks the parsed Go package, compiles functions to machine
+// code, and finally writes the compiled package definition to disk.
+func Main(archInit func(*Arch)) {
+ timings.Start("fe", "init")
+
+ defer hidePanic()
+
+ archInit(&thearch)
+
+ Ctxt = obj.Linknew(thearch.LinkArch)
+ Ctxt.DiagFunc = yyerror
+ Ctxt.DiagFlush = flusherrors
+ Ctxt.Bso = bufio.NewWriter(os.Stdout)
+
+ // UseBASEntries is preferred because it shaves about 2% off build time, but LLDB, dsymutil, and dwarfdump
+ // on Darwin don't support it properly, especially since macOS 10.14 (Mojave). This is exposed as a flag
+ // to allow testing with LLVM tools on Linux, and to help with reporting this bug to the LLVM project.
+ // See bugs 31188 and 21945 (CLs 170638, 98075, 72371).
+ Ctxt.UseBASEntries = Ctxt.Headtype != objabi.Hdarwin
+
+ localpkg = types.NewPkg("", "")
+ localpkg.Prefix = "\"\""
+
+ // We won't know localpkg's height until after import
+ // processing. In the mean time, set to MaxPkgHeight to ensure
+ // height comparisons at least work until then.
+ localpkg.Height = types.MaxPkgHeight
+
+ // pseudo-package, for scoping
+ builtinpkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin?
+ builtinpkg.Prefix = "go.builtin" // not go%2ebuiltin
+
+ // pseudo-package, accessed by import "unsafe"
+ unsafepkg = types.NewPkg("unsafe", "unsafe")
+
+ // Pseudo-package that contains the compiler's builtin
+ // declarations for package runtime. These are declared in a
+ // separate package to avoid conflicts with package runtime's
+ // actual declarations, which may differ intentionally but
+ // insignificantly.
+ Runtimepkg = types.NewPkg("go.runtime", "runtime")
+ Runtimepkg.Prefix = "runtime"
+
+ // pseudo-packages used in symbol tables
+ itabpkg = types.NewPkg("go.itab", "go.itab")
+ itabpkg.Prefix = "go.itab" // not go%2eitab
+
+ itablinkpkg = types.NewPkg("go.itablink", "go.itablink")
+ itablinkpkg.Prefix = "go.itablink" // not go%2eitablink
+
+ trackpkg = types.NewPkg("go.track", "go.track")
+ trackpkg.Prefix = "go.track" // not go%2etrack
+
+ // pseudo-package used for map zero values
+ mappkg = types.NewPkg("go.map", "go.map")
+ mappkg.Prefix = "go.map"
+
+ // pseudo-package used for methods with anonymous receivers
+ gopkg = types.NewPkg("go", "")
+
+ Wasm := objabi.GOARCH == "wasm"
+
+ // Whether the limit for stack-allocated objects is much smaller than normal.
+ // This can be helpful for diagnosing certain causes of GC latency. See #27732.
+ smallFrames := false
+ jsonLogOpt := ""
+
+ flag.BoolVar(&compiling_runtime, "+", false, "compiling runtime")
+ flag.BoolVar(&compiling_std, "std", false, "compiling standard library")
+ flag.StringVar(&localimport, "D", "", "set relative `path` for local imports")
+
+ objabi.Flagcount("%", "debug non-static initializers", &Debug.P)
+ objabi.Flagcount("B", "disable bounds checking", &Debug.B)
+ objabi.Flagcount("C", "disable printing of columns in error messages", &Debug.C)
+ objabi.Flagcount("E", "debug symbol export", &Debug.E)
+ objabi.Flagcount("K", "debug missing line numbers", &Debug.K)
+ objabi.Flagcount("L", "show full file names in error messages", &Debug.L)
+ objabi.Flagcount("N", "disable optimizations", &Debug.N)
+ objabi.Flagcount("S", "print assembly listing", &Debug.S)
+ objabi.Flagcount("W", "debug parse tree after type checking", &Debug.W)
+ objabi.Flagcount("e", "no limit on number of errors reported", &Debug.e)
+ objabi.Flagcount("h", "halt on error", &Debug.h)
+ objabi.Flagcount("j", "debug runtime-initialized variables", &Debug.j)
+ objabi.Flagcount("l", "disable inlining", &Debug.l)
+ objabi.Flagcount("m", "print optimization decisions", &Debug.m)
+ objabi.Flagcount("r", "debug generated wrappers", &Debug.r)
+ objabi.Flagcount("w", "debug type checking", &Debug.w)
+
+ objabi.Flagfn1("I", "add `directory` to import search path", addidir)
+ objabi.AddVersionFlag() // -V
+ flag.StringVar(&asmhdr, "asmhdr", "", "write assembly header to `file`")
+ flag.StringVar(&buildid, "buildid", "", "record `id` as the build id in the export metadata")
+ flag.IntVar(&nBackendWorkers, "c", 1, "concurrency during compilation, 1 means no concurrency")
+ flag.BoolVar(&pure_go, "complete", false, "compiling complete package (no C or assembly)")
+ flag.StringVar(&debugstr, "d", "", "print debug information about items in `list`; try -d help")
+ flag.BoolVar(&flagDWARF, "dwarf", !Wasm, "generate DWARF symbols")
+ flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", true, "add location lists to DWARF in optimized mode")
+ flag.IntVar(&genDwarfInline, "gendwarfinl", 2, "generate DWARF inline info records")
+ objabi.Flagfn1("embedcfg", "read go:embed configuration from `file`", readEmbedCfg)
+ objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap)
+ objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg)
+ flag.StringVar(&flag_installsuffix, "installsuffix", "", "set pkg directory `suffix`")
+ flag.StringVar(&flag_lang, "lang", "", "release to compile for")
+ flag.StringVar(&linkobj, "linkobj", "", "write linker-specific object to `file`")
+ objabi.Flagcount("live", "debug liveness analysis", &debuglive)
+ if sys.MSanSupported(objabi.GOOS, objabi.GOARCH) {
+ flag.BoolVar(&flag_msan, "msan", false, "build code compatible with C/C++ memory sanitizer")
+ }
+ flag.BoolVar(&nolocalimports, "nolocalimports", false, "reject local (relative) imports")
+ flag.StringVar(&outfile, "o", "", "write output to `file`")
+ flag.StringVar(&myimportpath, "p", "", "set expected package import `path`")
+ flag.BoolVar(&writearchive, "pack", false, "write to file.a instead of file.o")
+ if sys.RaceDetectorSupported(objabi.GOOS, objabi.GOARCH) {
+ flag.BoolVar(&flag_race, "race", false, "enable race detector")
+ }
+ flag.StringVar(&spectre, "spectre", spectre, "enable spectre mitigations in `list` (all, index, ret)")
+ if enableTrace {
+ flag.BoolVar(&trace, "t", false, "trace type-checking")
+ }
+ flag.StringVar(&pathPrefix, "trimpath", "", "remove `prefix` from recorded source file paths")
+ flag.BoolVar(&Debug_vlog, "v", false, "increase debug verbosity")
+ flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier")
+ var flag_shared bool
+ var flag_dynlink bool
+ if supportsDynlink(thearch.LinkArch.Arch) {
+ flag.BoolVar(&flag_shared, "shared", false, "generate code that can be linked into a shared library")
+ flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries")
+ flag.BoolVar(&Ctxt.Flag_linkshared, "linkshared", false, "generate code that will be linked against Go shared libraries")
+ }
+ flag.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to `file`")
+ flag.StringVar(&memprofile, "memprofile", "", "write memory profile to `file`")
+ flag.Int64Var(&memprofilerate, "memprofilerate", 0, "set runtime.MemProfileRate to `rate`")
+ var goversion string
+ flag.StringVar(&goversion, "goversion", "", "required version of the runtime")
+ var symabisPath string
+ flag.StringVar(&symabisPath, "symabis", "", "read symbol ABIs from `file`")
+ flag.StringVar(&traceprofile, "traceprofile", "", "write an execution trace to `file`")
+ flag.StringVar(&blockprofile, "blockprofile", "", "write block profile to `file`")
+ flag.StringVar(&mutexprofile, "mutexprofile", "", "write mutex profile to `file`")
+ flag.StringVar(&benchfile, "bench", "", "append benchmark times to `file`")
+ flag.BoolVar(&smallFrames, "smallframes", false, "reduce the size limit for stack allocated objects")
+ flag.BoolVar(&Ctxt.UseBASEntries, "dwarfbasentries", Ctxt.UseBASEntries, "use base address selection entries in DWARF")
+ flag.StringVar(&jsonLogOpt, "json", "", "version,destination for JSON compiler/optimizer logging")
+
+ objabi.Flagparse(usage)
+
+ Ctxt.Pkgpath = myimportpath
+
+ for _, f := range strings.Split(spectre, ",") {
+ f = strings.TrimSpace(f)
+ switch f {
+ default:
+ log.Fatalf("unknown setting -spectre=%s", f)
+ case "":
+ // nothing
+ case "all":
+ spectreIndex = true
+ Ctxt.Retpoline = true
+ case "index":
+ spectreIndex = true
+ case "ret":
+ Ctxt.Retpoline = true
+ }
+ }
+
+ if spectreIndex {
+ switch objabi.GOARCH {
+ case "amd64":
+ // ok
+ default:
+ log.Fatalf("GOARCH=%s does not support -spectre=index", objabi.GOARCH)
+ }
+ }
+
+ // Record flags that affect the build result. (And don't
+ // record flags that don't, since that would cause spurious
+ // changes in the binary.)
+ recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre")
+
+ if smallFrames {
+ maxStackVarSize = 128 * 1024
+ maxImplicitStackVarSize = 16 * 1024
+ }
+
+ Ctxt.Flag_shared = flag_dynlink || flag_shared
+ Ctxt.Flag_dynlink = flag_dynlink
+ Ctxt.Flag_optimize = Debug.N == 0
+
+ Ctxt.Debugasm = Debug.S
+ Ctxt.Debugvlog = Debug_vlog
+ if flagDWARF {
+ Ctxt.DebugInfo = debuginfo
+ Ctxt.GenAbstractFunc = genAbstractFunc
+ Ctxt.DwFixups = obj.NewDwarfFixupTable(Ctxt)
+ } else {
+ // turn off inline generation if no dwarf at all
+ genDwarfInline = 0
+ Ctxt.Flag_locationlists = false
+ }
+
+ if flag.NArg() < 1 && debugstr != "help" && debugstr != "ssa/help" {
+ usage()
+ }
+
+ if goversion != "" && goversion != runtime.Version() {
+ fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), goversion)
+ Exit(2)
+ }
+
+ checkLang()
+
+ if symabisPath != "" {
+ readSymABIs(symabisPath, myimportpath)
+ }
+
+ thearch.LinkArch.Init(Ctxt)
+
+ if outfile == "" {
+ p := flag.Arg(0)
+ if i := strings.LastIndex(p, "/"); i >= 0 {
+ p = p[i+1:]
+ }
+ if runtime.GOOS == "windows" {
+ if i := strings.LastIndex(p, `\`); i >= 0 {
+ p = p[i+1:]
+ }
+ }
+ if i := strings.LastIndex(p, "."); i >= 0 {
+ p = p[:i]
+ }
+ suffix := ".o"
+ if writearchive {
+ suffix = ".a"
+ }
+ outfile = p + suffix
+ }
+
+ startProfile()
+
+ if flag_race && flag_msan {
+ log.Fatal("cannot use both -race and -msan")
+ }
+ if flag_race || flag_msan {
+ // -race and -msan imply -d=checkptr for now.
+ Debug_checkptr = 1
+ }
+ if ispkgin(omit_pkgs) {
+ flag_race = false
+ flag_msan = false
+ }
+ if flag_race {
+ racepkg = types.NewPkg("runtime/race", "")
+ }
+ if flag_msan {
+ msanpkg = types.NewPkg("runtime/msan", "")
+ }
+ if flag_race || flag_msan {
+ instrumenting = true
+ }
+
+ if compiling_runtime && Debug.N != 0 {
+ log.Fatal("cannot disable optimizations while compiling runtime")
+ }
+ if nBackendWorkers < 1 {
+ log.Fatalf("-c must be at least 1, got %d", nBackendWorkers)
+ }
+ if nBackendWorkers > 1 && !concurrentBackendAllowed() {
+ log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
+ }
+ if Ctxt.Flag_locationlists && len(Ctxt.Arch.DWARFRegisters) == 0 {
+ log.Fatalf("location lists requested but register mapping not available on %v", Ctxt.Arch.Name)
+ }
+
+ // parse -d argument
+ if debugstr != "" {
+ Split:
+ for _, name := range strings.Split(debugstr, ",") {
+ if name == "" {
+ continue
+ }
+ // display help about the -d option itself and quit
+ if name == "help" {
+ fmt.Print(debugHelpHeader)
+ maxLen := len("ssa/help")
+ for _, t := range debugtab {
+ if len(t.name) > maxLen {
+ maxLen = len(t.name)
+ }
+ }
+ for _, t := range debugtab {
+ fmt.Printf("\t%-*s\t%s\n", maxLen, t.name, t.help)
+ }
+ // ssa options have their own help
+ fmt.Printf("\t%-*s\t%s\n", maxLen, "ssa/help", "print help about SSA debugging")
+ fmt.Print(debugHelpFooter)
+ os.Exit(0)
+ }
+ val, valstring, haveInt := 1, "", true
+ if i := strings.IndexAny(name, "=:"); i >= 0 {
+ var err error
+ name, valstring = name[:i], name[i+1:]
+ val, err = strconv.Atoi(valstring)
+ if err != nil {
+ val, haveInt = 1, false
+ }
+ }
+ for _, t := range debugtab {
+ if t.name != name {
+ continue
+ }
+ switch vp := t.val.(type) {
+ case nil:
+ // Ignore
+ case *string:
+ *vp = valstring
+ case *int:
+ if !haveInt {
+ log.Fatalf("invalid debug value %v", name)
+ }
+ *vp = val
+ default:
+ panic("bad debugtab type")
+ }
+ continue Split
+ }
+ // special case for ssa for now
+ if strings.HasPrefix(name, "ssa/") {
+ // expect form ssa/phase/flag
+ // e.g. -d=ssa/generic_cse/time
+ // _ in phase name also matches space
+ phase := name[4:]
+ flag := "debug" // default flag is debug
+ if i := strings.Index(phase, "/"); i >= 0 {
+ flag = phase[i+1:]
+ phase = phase[:i]
+ }
+ err := ssa.PhaseOption(phase, flag, val, valstring)
+ if err != "" {
+ log.Fatalf(err)
+ }
+ continue Split
+ }
+ log.Fatalf("unknown debug key -d %s\n", name)
+ }
+ }
+
+ if compiling_runtime {
+ // Runtime can't use -d=checkptr, at least not yet.
+ Debug_checkptr = 0
+
+ // Fuzzing the runtime isn't interesting either.
+ Debug_libfuzzer = 0
+ }
+
+ // set via a -d flag
+ Ctxt.Debugpcln = Debug_pctab
+ if flagDWARF {
+ dwarf.EnableLogging(Debug_gendwarfinl != 0)
+ }
+
+ if Debug_softfloat != 0 {
+ thearch.SoftFloat = true
+ }
+
+ // enable inlining. for now:
+ // default: inlining on. (Debug.l == 1)
+ // -l: inlining off (Debug.l == 0)
+ // -l=2, -l=3: inlining on again, with extra debugging (Debug.l > 1)
+ if Debug.l <= 1 {
+ Debug.l = 1 - Debug.l
+ }
+
+ if jsonLogOpt != "" { // parse version,destination from json logging optimization.
+ logopt.LogJsonOption(jsonLogOpt)
+ }
+
+ ssaDump = os.Getenv("GOSSAFUNC")
+ ssaDir = os.Getenv("GOSSADIR")
+ if ssaDump != "" {
+ if strings.HasSuffix(ssaDump, "+") {
+ ssaDump = ssaDump[:len(ssaDump)-1]
+ ssaDumpStdout = true
+ }
+ spl := strings.Split(ssaDump, ":")
+ if len(spl) > 1 {
+ ssaDump = spl[0]
+ ssaDumpCFG = spl[1]
+ }
+ }
+
+ trackScopes = flagDWARF
+
+ Widthptr = thearch.LinkArch.PtrSize
+ Widthreg = thearch.LinkArch.RegSize
+
+ // initialize types package
+ // (we need to do this to break dependencies that otherwise
+ // would lead to import cycles)
+ types.Widthptr = Widthptr
+ types.Dowidth = dowidth
+ types.Fatalf = Fatalf
+ types.Sconv = func(s *types.Sym, flag, mode int) string {
+ return sconv(s, FmtFlag(flag), fmtMode(mode))
+ }
+ types.Tconv = func(t *types.Type, flag, mode int) string {
+ return tconv(t, FmtFlag(flag), fmtMode(mode))
+ }
+ types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) {
+ symFormat(sym, s, verb, fmtMode(mode))
+ }
+ types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) {
+ typeFormat(t, s, verb, fmtMode(mode))
+ }
+ types.TypeLinkSym = func(t *types.Type) *obj.LSym {
+ return typenamesym(t).Linksym()
+ }
+ types.FmtLeft = int(FmtLeft)
+ types.FmtUnsigned = int(FmtUnsigned)
+ types.FErr = int(FErr)
+ types.Ctxt = Ctxt
+
+ initUniverse()
+
+ dclcontext = PEXTERN
+ nerrors = 0
+
+ autogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
+
+ timings.Start("fe", "loadsys")
+ loadsys()
+
+ timings.Start("fe", "parse")
+ lines := parseFiles(flag.Args())
+ timings.Stop()
+ timings.AddEvent(int64(lines), "lines")
+
+ finishUniverse()
+
+ recordPackageName()
+
+ typecheckok = true
+
+ // Process top-level declarations in phases.
+
+ // Phase 1: const, type, and names and types of funcs.
+ // This will gather all the information about types
+ // and methods but doesn't depend on any of it.
+ //
+ // We also defer type alias declarations until phase 2
+ // to avoid cycles like #18640.
+ // TODO(gri) Remove this again once we have a fix for #25838.
+
+ // Don't use range--typecheck can add closures to xtop.
+ timings.Start("fe", "typecheck", "top1")
+ for i := 0; i < len(xtop); i++ {
+ n := xtop[i]
+ if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias()) {
+ xtop[i] = typecheck(n, ctxStmt)
+ }
+ }
+
+ // Phase 2: Variable assignments.
+ // To check interface assignments, depends on phase 1.
+
+ // Don't use range--typecheck can add closures to xtop.
+ timings.Start("fe", "typecheck", "top2")
+ for i := 0; i < len(xtop); i++ {
+ n := xtop[i]
+ if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias() {
+ xtop[i] = typecheck(n, ctxStmt)
+ }
+ }
+
+ // Phase 3: Type check function bodies.
+ // Don't use range--typecheck can add closures to xtop.
+ timings.Start("fe", "typecheck", "func")
+ var fcount int64
+ for i := 0; i < len(xtop); i++ {
+ n := xtop[i]
+ if n.Op == ODCLFUNC {
+ Curfn = n
+ decldepth = 1
+ saveerrors()
+ typecheckslice(Curfn.Nbody.Slice(), ctxStmt)
+ checkreturn(Curfn)
+ if nerrors != 0 {
+ Curfn.Nbody.Set(nil) // type errors; do not compile
+ }
+ // Now that we've checked whether n terminates,
+ // we can eliminate some obviously dead code.
+ deadcode(Curfn)
+ fcount++
+ }
+ }
+ // With all types checked, it's now safe to verify map keys. One single
+ // check past phase 9 isn't sufficient, as we may exit with other errors
+ // before then, thus skipping map key errors.
+ checkMapKeys()
+ timings.AddEvent(fcount, "funcs")
+
+ if nsavederrors+nerrors != 0 {
+ errorexit()
+ }
+
+ fninit(xtop)
+
+ // Phase 4: Decide how to capture closed variables.
+ // This needs to run before escape analysis,
+ // because variables captured by value do not escape.
+ timings.Start("fe", "capturevars")
+ for _, n := range xtop {
+ if n.Op == ODCLFUNC && n.Func.Closure != nil {
+ Curfn = n
+ capturevars(n)
+ }
+ }
+ capturevarscomplete = true
+
+ Curfn = nil
+
+ if nsavederrors+nerrors != 0 {
+ errorexit()
+ }
+
+ // Phase 5: Inlining
+ timings.Start("fe", "inlining")
+ if Debug_typecheckinl != 0 {
+ // Typecheck imported function bodies if Debug.l > 1,
+ // otherwise lazily when used or re-exported.
+ for _, n := range importlist {
+ if n.Func.Inl != nil {
+ saveerrors()
+ typecheckinl(n)
+ }
+ }
+
+ if nsavederrors+nerrors != 0 {
+ errorexit()
+ }
+ }
+
+ if Debug.l != 0 {
+ // Find functions that can be inlined and clone them before walk expands them.
+ visitBottomUp(xtop, func(list []*Node, recursive bool) {
+ numfns := numNonClosures(list)
+ for _, n := range list {
+ if !recursive || numfns > 1 {
+ // We allow inlining if there is no
+ // recursion, or the recursion cycle is
+ // across more than one function.
+ caninl(n)
+ } else {
+ if Debug.m > 1 {
+ fmt.Printf("%v: cannot inline %v: recursive\n", n.Line(), n.Func.Nname)
+ }
+ }
+ inlcalls(n)
+ }
+ })
+ }
+
+ for _, n := range xtop {
+ if n.Op == ODCLFUNC {
+ devirtualize(n)
+ }
+ }
+ Curfn = nil
+
+ // Phase 6: Escape analysis.
+ // Required for moving heap allocations onto stack,
+ // which in turn is required by the closure implementation,
+ // which stores the addresses of stack variables into the closure.
+ // If the closure does not escape, it needs to be on the stack
+ // or else the stack copier will not update it.
+ // Large values are also moved off stack in escape analysis;
+ // because large values may contain pointers, it must happen early.
+ timings.Start("fe", "escapes")
+ escapes(xtop)
+
+ // Collect information for go:nowritebarrierrec
+ // checking. This must happen before transformclosure.
+ // We'll do the final check after write barriers are
+ // inserted.
+ if compiling_runtime {
+ nowritebarrierrecCheck = newNowritebarrierrecChecker()
+ }
+
+ // Phase 7: Transform closure bodies to properly reference captured variables.
+ // This needs to happen before walk, because closures must be transformed
+ // before walk reaches a call of a closure.
+ timings.Start("fe", "xclosures")
+ for _, n := range xtop {
+ if n.Op == ODCLFUNC && n.Func.Closure != nil {
+ Curfn = n
+ transformclosure(n)
+ }
+ }
+
+ // Prepare for SSA compilation.
+ // This must be before peekitabs, because peekitabs
+ // can trigger function compilation.
+ initssaconfig()
+
+ // Just before compilation, compile itabs found on
+ // the right side of OCONVIFACE so that methods
+ // can be de-virtualized during compilation.
+ Curfn = nil
+ peekitabs()
+
+ // Phase 8: Compile top level functions.
+ // Don't use range--walk can add functions to xtop.
+ timings.Start("be", "compilefuncs")
+ fcount = 0
+ for i := 0; i < len(xtop); i++ {
+ n := xtop[i]
+ if n.Op == ODCLFUNC {
+ funccompile(n)
+ fcount++
+ }
+ }
+ timings.AddEvent(fcount, "funcs")
+
+ compileFunctions()
+
+ if nowritebarrierrecCheck != nil {
+ // Write barriers are now known. Check the
+ // call graph.
+ nowritebarrierrecCheck.check()
+ nowritebarrierrecCheck = nil
+ }
+
+ // Finalize DWARF inline routine DIEs, then explicitly turn off
+ // DWARF inlining gen so as to avoid problems with generated
+ // method wrappers.
+ if Ctxt.DwFixups != nil {
+ Ctxt.DwFixups.Finalize(myimportpath, Debug_gendwarfinl != 0)
+ Ctxt.DwFixups = nil
+ genDwarfInline = 0
+ }
+
+ // Phase 9: Check external declarations.
+ timings.Start("be", "externaldcls")
+ for i, n := range externdcl {
+ if n.Op == ONAME {
+ externdcl[i] = typecheck(externdcl[i], ctxExpr)
+ }
+ }
+ // Check the map keys again, since we typechecked the external
+ // declarations.
+ checkMapKeys()
+
+ if nerrors+nsavederrors != 0 {
+ errorexit()
+ }
+
+ // Write object data to disk.
+ timings.Start("be", "dumpobj")
+ dumpdata()
+ Ctxt.NumberSyms()
+ dumpobj()
+ if asmhdr != "" {
+ dumpasmhdr()
+ }
+
+ // Check whether any of the functions we have compiled have gigantic stack frames.
+ sort.Slice(largeStackFrames, func(i, j int) bool {
+ return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
+ })
+ for _, large := range largeStackFrames {
+ if large.callee != 0 {
+ yyerrorl(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
+ } else {
+ yyerrorl(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
+ }
+ }
+
+ if len(funcStack) != 0 {
+ Fatalf("funcStack is non-empty: %v", len(funcStack))
+ }
+ if len(compilequeue) != 0 {
+ Fatalf("%d uncompiled functions", len(compilequeue))
+ }
+
+ logopt.FlushLoggedOpts(Ctxt, myimportpath)
+
+ if nerrors+nsavederrors != 0 {
+ errorexit()
+ }
+
+ flusherrors()
+ timings.Stop()
+
+ if benchfile != "" {
+ if err := writebench(benchfile); err != nil {
+ log.Fatalf("cannot write benchmark data: %v", err)
+ }
+ }
+}
+
+// numNonClosures returns the number of functions in list which are not closures.
+func numNonClosures(list []*Node) int {
+ count := 0
+ for _, n := range list {
+ if n.Func.Closure == nil {
+ count++
+ }
+ }
+ return count
+}
+
+func writebench(filename string) error {
+ f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
+ if err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+ fmt.Fprintln(&buf, "commit:", objabi.Version)
+ fmt.Fprintln(&buf, "goos:", runtime.GOOS)
+ fmt.Fprintln(&buf, "goarch:", runtime.GOARCH)
+ timings.Write(&buf, "BenchmarkCompile:"+myimportpath+":")
+
+ n, err := f.Write(buf.Bytes())
+ if err != nil {
+ return err
+ }
+ if n != buf.Len() {
+ panic("bad writer")
+ }
+
+ return f.Close()
+}
+
+var (
+ importMap = map[string]string{}
+ packageFile map[string]string // nil means not in use
+)
+
+func addImportMap(s string) {
+ if strings.Count(s, "=") != 1 {
+ log.Fatal("-importmap argument must be of the form source=actual")
+ }
+ i := strings.Index(s, "=")
+ source, actual := s[:i], s[i+1:]
+ if source == "" || actual == "" {
+ log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
+ }
+ importMap[source] = actual
+}
+
+func readImportCfg(file string) {
+ packageFile = map[string]string{}
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-importcfg: %v", err)
+ }
+
+ for lineNum, line := range strings.Split(string(data), "\n") {
+ lineNum++ // 1-based
+ line = strings.TrimSpace(line)
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ var verb, args string
+ if i := strings.Index(line, " "); i < 0 {
+ verb = line
+ } else {
+ verb, args = line[:i], strings.TrimSpace(line[i+1:])
+ }
+ var before, after string
+ if i := strings.Index(args, "="); i >= 0 {
+ before, after = args[:i], args[i+1:]
+ }
+ switch verb {
+ default:
+ log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
+ case "importmap":
+ if before == "" || after == "" {
+ log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
+ }
+ importMap[before] = after
+ case "packagefile":
+ if before == "" || after == "" {
+ log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
+ }
+ packageFile[before] = after
+ }
+ }
+}
+
+// symabiDefs and symabiRefs record the defined and referenced ABIs of
+// symbols required by non-Go code. These are keyed by link symbol
+// name, where the local package prefix is always `"".`
+var symabiDefs, symabiRefs map[string]obj.ABI
+
+// readSymABIs reads a symabis file that specifies definitions and
+// references of text symbols by ABI.
+//
+// The symabis format is a set of lines, where each line is a sequence
+// of whitespace-separated fields. The first field is a verb and is
+// either "def" for defining a symbol ABI or "ref" for referencing a
+// symbol using an ABI. For both "def" and "ref", the second field is
+// the symbol name and the third field is the ABI name, as one of the
+// named cmd/internal/obj.ABI constants.
+func readSymABIs(file, myimportpath string) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-symabis: %v", err)
+ }
+
+ symabiDefs = make(map[string]obj.ABI)
+ symabiRefs = make(map[string]obj.ABI)
+
+ localPrefix := ""
+ if myimportpath != "" {
+ // Symbols in this package may be written either as
+ // "".X or with the package's import path already in
+ // the symbol.
+ localPrefix = objabi.PathToPrefix(myimportpath) + "."
+ }
+
+ for lineNum, line := range strings.Split(string(data), "\n") {
+ lineNum++ // 1-based
+ line = strings.TrimSpace(line)
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ parts := strings.Fields(line)
+ switch parts[0] {
+ case "def", "ref":
+ // Parse line.
+ if len(parts) != 3 {
+ log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0])
+ }
+ sym, abistr := parts[1], parts[2]
+ abi, valid := obj.ParseABI(abistr)
+ if !valid {
+ log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr)
+ }
+
+ // If the symbol is already prefixed with
+ // myimportpath, rewrite it to start with ""
+ // so it matches the compiler's internal
+ // symbol names.
+ if localPrefix != "" && strings.HasPrefix(sym, localPrefix) {
+ sym = `"".` + sym[len(localPrefix):]
+ }
+
+ // Record for later.
+ if parts[0] == "def" {
+ symabiDefs[sym] = abi
+ } else {
+ symabiRefs[sym] = abi
+ }
+ default:
+ log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0])
+ }
+ }
+}
+
+func saveerrors() {
+ nsavederrors += nerrors
+ nerrors = 0
+}
+
+func arsize(b *bufio.Reader, name string) int {
+ var buf [ArhdrSize]byte
+ if _, err := io.ReadFull(b, buf[:]); err != nil {
+ return -1
+ }
+ aname := strings.Trim(string(buf[0:16]), " ")
+ if !strings.HasPrefix(aname, name) {
+ return -1
+ }
+ asize := strings.Trim(string(buf[48:58]), " ")
+ i, _ := strconv.Atoi(asize)
+ return i
+}
+
+var idirs []string
+
+func addidir(dir string) {
+ if dir != "" {
+ idirs = append(idirs, dir)
+ }
+}
+
+func isDriveLetter(b byte) bool {
+ return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z'
+}
+
+// is this path a local name? begins with ./ or ../ or /
+func islocalname(name string) bool {
+ return strings.HasPrefix(name, "/") ||
+ runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' ||
+ strings.HasPrefix(name, "./") || name == "." ||
+ strings.HasPrefix(name, "../") || name == ".."
+}
+
+func findpkg(name string) (file string, ok bool) {
+ if islocalname(name) {
+ if nolocalimports {
+ return "", false
+ }
+
+ if packageFile != nil {
+ file, ok = packageFile[name]
+ return file, ok
+ }
+
+ // try .a before .6. important for building libraries:
+ // if there is an array.6 in the array.a library,
+ // want to find all of array.a, not just array.6.
+ file = fmt.Sprintf("%s.a", name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ file = fmt.Sprintf("%s.o", name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ return "", false
+ }
+
+ // local imports should be canonicalized already.
+ // don't want to see "encoding/../encoding/base64"
+ // as different from "encoding/base64".
+ if q := path.Clean(name); q != name {
+ yyerror("non-canonical import path %q (should be %q)", name, q)
+ return "", false
+ }
+
+ if packageFile != nil {
+ file, ok = packageFile[name]
+ return file, ok
+ }
+
+ for _, dir := range idirs {
+ file = fmt.Sprintf("%s/%s.a", dir, name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ file = fmt.Sprintf("%s/%s.o", dir, name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ }
+
+ if objabi.GOROOT != "" {
+ suffix := ""
+ suffixsep := ""
+ if flag_installsuffix != "" {
+ suffixsep = "_"
+ suffix = flag_installsuffix
+ } else if flag_race {
+ suffixsep = "_"
+ suffix = "race"
+ } else if flag_msan {
+ suffixsep = "_"
+ suffix = "msan"
+ }
+
+ file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.o", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ }
+
+ return "", false
+}
+
+// loadsys loads the definitions for the low-level runtime functions,
+// so that the compiler can generate calls to them,
+// but does not make them visible to user code.
+func loadsys() {
+ types.Block = 1
+
+ inimport = true
+ typecheckok = true
+
+ typs := runtimeTypes()
+ for _, d := range &runtimeDecls {
+ sym := Runtimepkg.Lookup(d.name)
+ typ := typs[d.typ]
+ switch d.tag {
+ case funcTag:
+ importfunc(Runtimepkg, src.NoXPos, sym, typ)
+ case varTag:
+ importvar(Runtimepkg, src.NoXPos, sym, typ)
+ default:
+ Fatalf("unhandled declaration tag %v", d.tag)
+ }
+ }
+
+ typecheckok = false
+ inimport = false
+}
+
+// myheight tracks the local package's height based on packages
+// imported so far.
+var myheight int
+
+func importfile(f *Val) *types.Pkg {
+ path_, ok := f.U.(string)
+ if !ok {
+ yyerror("import path must be a string")
+ return nil
+ }
+
+ if len(path_) == 0 {
+ yyerror("import path is empty")
+ return nil
+ }
+
+ if isbadimport(path_, false) {
+ return nil
+ }
+
+ // The package name main is no longer reserved,
+ // but we reserve the import path "main" to identify
+ // the main package, just as we reserve the import
+ // path "math" to identify the standard math package.
+ if path_ == "main" {
+ yyerror("cannot import \"main\"")
+ errorexit()
+ }
+
+ if myimportpath != "" && path_ == myimportpath {
+ yyerror("import %q while compiling that package (import cycle)", path_)
+ errorexit()
+ }
+
+ if mapped, ok := importMap[path_]; ok {
+ path_ = mapped
+ }
+
+ if path_ == "unsafe" {
+ return unsafepkg
+ }
+
+ if islocalname(path_) {
+ if path_[0] == '/' {
+ yyerror("import path cannot be absolute path")
+ return nil
+ }
+
+ prefix := Ctxt.Pathname
+ if localimport != "" {
+ prefix = localimport
+ }
+ path_ = path.Join(prefix, path_)
+
+ if isbadimport(path_, true) {
+ return nil
+ }
+ }
+
+ file, found := findpkg(path_)
+ if !found {
+ yyerror("can't find import: %q", path_)
+ errorexit()
+ }
+
+ importpkg := types.NewPkg(path_, "")
+ if importpkg.Imported {
+ return importpkg
+ }
+
+ importpkg.Imported = true
+
+ imp, err := bio.Open(file)
+ if err != nil {
+ yyerror("can't open import: %q: %v", path_, err)
+ errorexit()
+ }
+ defer imp.Close()
+
+ // check object header
+ p, err := imp.ReadString('\n')
+ if err != nil {
+ yyerror("import %s: reading input: %v", file, err)
+ errorexit()
+ }
+
+ if p == "!<arch>\n" { // package archive
+ // package export block should be first
+ sz := arsize(imp.Reader, "__.PKGDEF")
+ if sz <= 0 {
+ yyerror("import %s: not a package file", file)
+ errorexit()
+ }
+ p, err = imp.ReadString('\n')
+ if err != nil {
+ yyerror("import %s: reading input: %v", file, err)
+ errorexit()
+ }
+ }
+
+ if !strings.HasPrefix(p, "go object ") {
+ yyerror("import %s: not a go object file: %s", file, p)
+ errorexit()
+ }
+ q := fmt.Sprintf("%s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
+ if p[10:] != q {
+ yyerror("import %s: object is [%s] expected [%s]", file, p[10:], q)
+ errorexit()
+ }
+
+ // process header lines
+ for {
+ p, err = imp.ReadString('\n')
+ if err != nil {
+ yyerror("import %s: reading input: %v", file, err)
+ errorexit()
+ }
+ if p == "\n" {
+ break // header ends with blank line
+ }
+ }
+
+ // In the importfile, if we find:
+ // $$\n (textual format): not supported anymore
+ // $$B\n (binary format) : import directly, then feed the lexer a dummy statement
+
+ // look for $$
+ var c byte
+ for {
+ c, err = imp.ReadByte()
+ if err != nil {
+ break
+ }
+ if c == '$' {
+ c, err = imp.ReadByte()
+ if c == '$' || err != nil {
+ break
+ }
+ }
+ }
+
+ // get character after $$
+ if err == nil {
+ c, _ = imp.ReadByte()
+ }
+
+ var fingerprint goobj.FingerprintType
+ switch c {
+ case '\n':
+ yyerror("cannot import %s: old export format no longer supported (recompile library)", path_)
+ return nil
+
+ case 'B':
+ if Debug_export != 0 {
+ fmt.Printf("importing %s (%s)\n", path_, file)
+ }
+ imp.ReadByte() // skip \n after $$B
+
+ c, err = imp.ReadByte()
+ if err != nil {
+ yyerror("import %s: reading input: %v", file, err)
+ errorexit()
+ }
+
+ // Indexed format is distinguished by an 'i' byte,
+ // whereas previous export formats started with 'c', 'd', or 'v'.
+ if c != 'i' {
+ yyerror("import %s: unexpected package format byte: %v", file, c)
+ errorexit()
+ }
+ fingerprint = iimport(importpkg, imp)
+
+ default:
+ yyerror("no import in %q", path_)
+ errorexit()
+ }
+
+ // assume files move (get installed) so don't record the full path
+ if packageFile != nil {
+ // If using a packageFile map, assume path_ can be recorded directly.
+ Ctxt.AddImport(path_, fingerprint)
+ } else {
+ // For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
+ Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint)
+ }
+
+ if importpkg.Height >= myheight {
+ myheight = importpkg.Height + 1
+ }
+
+ return importpkg
+}
+
+func pkgnotused(lineno src.XPos, path string, name string) {
+ // If the package was imported with a name other than the final
+ // import path element, show it explicitly in the error message.
+ // Note that this handles both renamed imports and imports of
+ // packages containing unconventional package declarations.
+ // Note that this uses / always, even on Windows, because Go import
+ // paths always use forward slashes.
+ elem := path
+ if i := strings.LastIndex(elem, "/"); i >= 0 {
+ elem = elem[i+1:]
+ }
+ if name == "" || elem == name {
+ yyerrorl(lineno, "imported and not used: %q", path)
+ } else {
+ yyerrorl(lineno, "imported and not used: %q as %s", path, name)
+ }
+}
+
+func mkpackage(pkgname string) {
+ if localpkg.Name == "" {
+ if pkgname == "_" {
+ yyerror("invalid package name _")
+ }
+ localpkg.Name = pkgname
+ } else {
+ if pkgname != localpkg.Name {
+ yyerror("package %s; expected %s", pkgname, localpkg.Name)
+ }
+ }
+}
+
+func clearImports() {
+ type importedPkg struct {
+ pos src.XPos
+ path string
+ name string
+ }
+ var unused []importedPkg
+
+ for _, s := range localpkg.Syms {
+ n := asNode(s.Def)
+ if n == nil {
+ continue
+ }
+ if n.Op == OPACK {
+ // throw away top-level package name left over
+ // from previous file.
+ // leave s->block set to cause redeclaration
+ // errors if a conflicting top-level name is
+ // introduced by a different file.
+ if !n.Name.Used() && nsyntaxerrors == 0 {
+ unused = append(unused, importedPkg{n.Pos, n.Name.Pkg.Path, s.Name})
+ }
+ s.Def = nil
+ continue
+ }
+ if IsAlias(s) {
+ // throw away top-level name left over
+ // from previous import . "x"
+ if n.Name != nil && n.Name.Pack != nil && !n.Name.Pack.Name.Used() && nsyntaxerrors == 0 {
+ unused = append(unused, importedPkg{n.Name.Pack.Pos, n.Name.Pack.Name.Pkg.Path, ""})
+ n.Name.Pack.Name.SetUsed(true)
+ }
+ s.Def = nil
+ continue
+ }
+ }
+
+ sort.Slice(unused, func(i, j int) bool { return unused[i].pos.Before(unused[j].pos) })
+ for _, pkg := range unused {
+ pkgnotused(pkg.pos, pkg.path, pkg.name)
+ }
+}
+
+func IsAlias(sym *types.Sym) bool {
+ return sym.Def != nil && asNode(sym.Def).Sym != sym
+}
+
+// By default, assume any debug flags are incompatible with concurrent
+// compilation. A few are safe and potentially in common use for
+// normal compiles, though; return true for those.
+func concurrentFlagOk() bool {
+ // Report whether any debug flag that would prevent concurrent
+ // compilation is set, by zeroing out the allowed ones and then
+ // checking if the resulting struct is zero.
+ d := Debug
+ d.B = 0 // disable bounds checking
+ d.C = 0 // disable printing of columns in error messages
+ d.e = 0 // no limit on errors; errors all come from non-concurrent code
+ d.N = 0 // disable optimizations
+ d.l = 0 // disable inlining
+ d.w = 0 // all printing happens before compilation
+ d.W = 0 // all printing happens before compilation
+ d.S = 0 // printing disassembly happens at the end (but see concurrentBackendAllowed below)
+
+ return d == DebugFlags{}
+}
+
+func concurrentBackendAllowed() bool {
+ if !concurrentFlagOk() {
+ return false
+ }
+
+ // Debug.S by itself is ok, because all printing occurs
+ // while writing the object file, and that is non-concurrent.
+ // Adding Debug_vlog, however, causes Debug.S to also print
+ // while flushing the plist, which happens concurrently.
+ if Debug_vlog || debugstr != "" || debuglive > 0 {
+ return false
+ }
+ // TODO: Test and delete this condition.
+ if objabi.Fieldtrack_enabled != 0 {
+ return false
+ }
+ // TODO: fix races and enable the following flags
+ if Ctxt.Flag_shared || Ctxt.Flag_dynlink || flag_race {
+ return false
+ }
+ return true
+}
+
+// recordFlags records the specified command-line flags to be placed
+// in the DWARF info.
+func recordFlags(flags ...string) {
+ if myimportpath == "" {
+ // We can't record the flags if we don't know what the
+ // package name is.
+ return
+ }
+
+ type BoolFlag interface {
+ IsBoolFlag() bool
+ }
+ type CountFlag interface {
+ IsCountFlag() bool
+ }
+ var cmd bytes.Buffer
+ for _, name := range flags {
+ f := flag.Lookup(name)
+ if f == nil {
+ continue
+ }
+ getter := f.Value.(flag.Getter)
+ if getter.String() == f.DefValue {
+ // Flag has default value, so omit it.
+ continue
+ }
+ if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() {
+ val, ok := getter.Get().(bool)
+ if ok && val {
+ fmt.Fprintf(&cmd, " -%s", f.Name)
+ continue
+ }
+ }
+ if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() {
+ val, ok := getter.Get().(int)
+ if ok && val == 1 {
+ fmt.Fprintf(&cmd, " -%s", f.Name)
+ continue
+ }
+ }
+ fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
+ }
+
+ if cmd.Len() == 0 {
+ return
+ }
+ s := Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + myimportpath)
+ s.Type = objabi.SDWARFCUINFO
+ // Sometimes (for example when building tests) we can link
+ // together two package main archives. So allow dups.
+ s.Set(obj.AttrDuplicateOK, true)
+ Ctxt.Data = append(Ctxt.Data, s)
+ s.P = cmd.Bytes()[1:]
+}
+
+// recordPackageName records the name of the package being
+// compiled, so that the linker can save it in the compile unit's DIE.
+func recordPackageName() {
+ s := Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + myimportpath)
+ s.Type = objabi.SDWARFCUINFO
+ // Sometimes (for example when building tests) we can link
+ // together two package main archives. So allow dups.
+ s.Set(obj.AttrDuplicateOK, true)
+ Ctxt.Data = append(Ctxt.Data, s)
+ s.P = []byte(localpkg.Name)
+}
+
+// flag_lang is the language version we are compiling for, set by the -lang flag.
+var flag_lang string
+
+// currentLang returns the current language version.
+func currentLang() string {
+ return fmt.Sprintf("go1.%d", goversion.Version)
+}
+
+// goVersionRE is a regular expression that matches the valid
+// arguments to the -lang flag.
+var goVersionRE = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
+
+// A lang is a language version broken into major and minor numbers.
+type lang struct {
+ major, minor int
+}
+
+// langWant is the desired language version set by the -lang flag.
+// If the -lang flag is not set, this is the zero value, meaning that
+// any language version is supported.
+var langWant lang
+
+// langSupported reports whether language version major.minor is
+// supported in a particular package.
+func langSupported(major, minor int, pkg *types.Pkg) bool {
+ if pkg == nil {
+ // TODO(mdempsky): Set Pkg for local types earlier.
+ pkg = localpkg
+ }
+ if pkg != localpkg {
+ // Assume imported packages passed type-checking.
+ return true
+ }
+
+ if langWant.major == 0 && langWant.minor == 0 {
+ return true
+ }
+ return langWant.major > major || (langWant.major == major && langWant.minor >= minor)
+}
+
+// checkLang verifies that the -lang flag holds a valid value, and
+// exits if not. It initializes data used by langSupported.
+func checkLang() {
+ if flag_lang == "" {
+ return
+ }
+
+ var err error
+ langWant, err = parseLang(flag_lang)
+ if err != nil {
+ log.Fatalf("invalid value %q for -lang: %v", flag_lang, err)
+ }
+
+ if def := currentLang(); flag_lang != def {
+ defVers, err := parseLang(def)
+ if err != nil {
+ log.Fatalf("internal error parsing default lang %q: %v", def, err)
+ }
+ if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) {
+ log.Fatalf("invalid value %q for -lang: max known version is %q", flag_lang, def)
+ }
+ }
+}
+
+// parseLang parses a -lang option into a langVer.
+func parseLang(s string) (lang, error) {
+ matches := goVersionRE.FindStringSubmatch(s)
+ if matches == nil {
+ return lang{}, fmt.Errorf(`should be something like "go1.12"`)
+ }
+ major, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return lang{}, err
+ }
+ minor, err := strconv.Atoi(matches[2])
+ if err != nil {
+ return lang{}, err
+ }
+ return lang{major: major, minor: minor}, nil
+}
diff --git a/src/cmd/compile/internal/gc/mapfile_mmap.go b/src/cmd/compile/internal/gc/mapfile_mmap.go
new file mode 100644
index 0000000..9483688
--- /dev/null
+++ b/src/cmd/compile/internal/gc/mapfile_mmap.go
@@ -0,0 +1,48 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package gc
+
+import (
+ "os"
+ "reflect"
+ "syscall"
+ "unsafe"
+)
+
+// TODO(mdempsky): Is there a higher-level abstraction that still
+// works well for iimport?
+
+// mapFile returns length bytes from the file starting at the
+// specified offset as a string.
+func mapFile(f *os.File, offset, length int64) (string, error) {
+ // POSIX mmap: "The implementation may require that off is a
+ // multiple of the page size."
+ x := offset & int64(os.Getpagesize()-1)
+ offset -= x
+ length += x
+
+ buf, err := syscall.Mmap(int(f.Fd()), offset, int(length), syscall.PROT_READ, syscall.MAP_SHARED)
+ keepAlive(f)
+ if err != nil {
+ return "", err
+ }
+
+ buf = buf[x:]
+ pSlice := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+
+ var res string
+ pString := (*reflect.StringHeader)(unsafe.Pointer(&res))
+
+ pString.Data = pSlice.Data
+ pString.Len = pSlice.Len
+
+ return res, nil
+}
+
+// keepAlive is a reimplementation of runtime.KeepAlive, which wasn't
+// added until Go 1.7, whereas we need to compile with Go 1.4.
+var keepAlive = func(interface{}) {}
diff --git a/src/cmd/compile/internal/gc/mapfile_read.go b/src/cmd/compile/internal/gc/mapfile_read.go
new file mode 100644
index 0000000..c6f68ed
--- /dev/null
+++ b/src/cmd/compile/internal/gc/mapfile_read.go
@@ -0,0 +1,21 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
+
+package gc
+
+import (
+ "io"
+ "os"
+)
+
+func mapFile(f *os.File, offset, length int64) (string, error) {
+ buf := make([]byte, length)
+ _, err := io.ReadFull(io.NewSectionReader(f, offset, length), buf)
+ if err != nil {
+ return "", err
+ }
+ return string(buf), nil
+}
diff --git a/src/cmd/compile/internal/gc/mkbuiltin.go b/src/cmd/compile/internal/gc/mkbuiltin.go
new file mode 100644
index 0000000..63d2a12
--- /dev/null
+++ b/src/cmd/compile/internal/gc/mkbuiltin.go
@@ -0,0 +1,225 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// Generate builtin.go from builtin/runtime.go.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+var stdout = flag.Bool("stdout", false, "write to stdout instead of builtin.go")
+
+func main() {
+ flag.Parse()
+
+ var b bytes.Buffer
+ fmt.Fprintln(&b, "// Code generated by mkbuiltin.go. DO NOT EDIT.")
+ fmt.Fprintln(&b)
+ fmt.Fprintln(&b, "package gc")
+ fmt.Fprintln(&b)
+ fmt.Fprintln(&b, `import "cmd/compile/internal/types"`)
+
+ mkbuiltin(&b, "runtime")
+
+ out, err := format.Source(b.Bytes())
+ if err != nil {
+ log.Fatal(err)
+ }
+ if *stdout {
+ _, err = os.Stdout.Write(out)
+ } else {
+ err = ioutil.WriteFile("builtin.go", out, 0666)
+ }
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func mkbuiltin(w io.Writer, name string) {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, filepath.Join("builtin", name+".go"), nil, 0)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var interner typeInterner
+
+ fmt.Fprintf(w, "var %sDecls = [...]struct { name string; tag int; typ int }{\n", name)
+ for _, decl := range f.Decls {
+ switch decl := decl.(type) {
+ case *ast.FuncDecl:
+ if decl.Recv != nil {
+ log.Fatal("methods unsupported")
+ }
+ if decl.Body != nil {
+ log.Fatal("unexpected function body")
+ }
+ fmt.Fprintf(w, "{%q, funcTag, %d},\n", decl.Name.Name, interner.intern(decl.Type))
+ case *ast.GenDecl:
+ if decl.Tok == token.IMPORT {
+ if len(decl.Specs) != 1 || decl.Specs[0].(*ast.ImportSpec).Path.Value != "\"unsafe\"" {
+ log.Fatal("runtime cannot import other package")
+ }
+ continue
+ }
+ if decl.Tok != token.VAR {
+ log.Fatal("unhandled declaration kind", decl.Tok)
+ }
+ for _, spec := range decl.Specs {
+ spec := spec.(*ast.ValueSpec)
+ if len(spec.Values) != 0 {
+ log.Fatal("unexpected values")
+ }
+ typ := interner.intern(spec.Type)
+ for _, name := range spec.Names {
+ fmt.Fprintf(w, "{%q, varTag, %d},\n", name.Name, typ)
+ }
+ }
+ default:
+ log.Fatal("unhandled decl type", decl)
+ }
+ }
+ fmt.Fprintln(w, "}")
+
+ fmt.Fprintln(w)
+ fmt.Fprintf(w, "func %sTypes() []*types.Type {\n", name)
+ fmt.Fprintf(w, "var typs [%d]*types.Type\n", len(interner.typs))
+ for i, typ := range interner.typs {
+ fmt.Fprintf(w, "typs[%d] = %s\n", i, typ)
+ }
+ fmt.Fprintln(w, "return typs[:]")
+ fmt.Fprintln(w, "}")
+}
+
+// typeInterner maps Go type expressions to compiler code that
+// constructs the denoted type. It recognizes and reuses common
+// subtype expressions.
+type typeInterner struct {
+ typs []string
+ hash map[string]int
+}
+
+func (i *typeInterner) intern(t ast.Expr) int {
+ x := i.mktype(t)
+ v, ok := i.hash[x]
+ if !ok {
+ v = len(i.typs)
+ if i.hash == nil {
+ i.hash = make(map[string]int)
+ }
+ i.hash[x] = v
+ i.typs = append(i.typs, x)
+ }
+ return v
+}
+
+func (i *typeInterner) subtype(t ast.Expr) string {
+ return fmt.Sprintf("typs[%d]", i.intern(t))
+}
+
+func (i *typeInterner) mktype(t ast.Expr) string {
+ switch t := t.(type) {
+ case *ast.Ident:
+ switch t.Name {
+ case "byte":
+ return "types.Bytetype"
+ case "rune":
+ return "types.Runetype"
+ }
+ return fmt.Sprintf("types.Types[T%s]", strings.ToUpper(t.Name))
+ case *ast.SelectorExpr:
+ if t.X.(*ast.Ident).Name != "unsafe" || t.Sel.Name != "Pointer" {
+ log.Fatalf("unhandled type: %#v", t)
+ }
+ return "types.Types[TUNSAFEPTR]"
+
+ case *ast.ArrayType:
+ if t.Len == nil {
+ return fmt.Sprintf("types.NewSlice(%s)", i.subtype(t.Elt))
+ }
+ return fmt.Sprintf("types.NewArray(%s, %d)", i.subtype(t.Elt), intconst(t.Len))
+ case *ast.ChanType:
+ dir := "types.Cboth"
+ switch t.Dir {
+ case ast.SEND:
+ dir = "types.Csend"
+ case ast.RECV:
+ dir = "types.Crecv"
+ }
+ return fmt.Sprintf("types.NewChan(%s, %s)", i.subtype(t.Value), dir)
+ case *ast.FuncType:
+ return fmt.Sprintf("functype(nil, %s, %s)", i.fields(t.Params, false), i.fields(t.Results, false))
+ case *ast.InterfaceType:
+ if len(t.Methods.List) != 0 {
+ log.Fatal("non-empty interfaces unsupported")
+ }
+ return "types.Types[TINTER]"
+ case *ast.MapType:
+ return fmt.Sprintf("types.NewMap(%s, %s)", i.subtype(t.Key), i.subtype(t.Value))
+ case *ast.StarExpr:
+ return fmt.Sprintf("types.NewPtr(%s)", i.subtype(t.X))
+ case *ast.StructType:
+ return fmt.Sprintf("tostruct(%s)", i.fields(t.Fields, true))
+
+ default:
+ log.Fatalf("unhandled type: %#v", t)
+ panic("unreachable")
+ }
+}
+
+func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string {
+ if fl == nil || len(fl.List) == 0 {
+ return "nil"
+ }
+ var res []string
+ for _, f := range fl.List {
+ typ := i.subtype(f.Type)
+ if len(f.Names) == 0 {
+ res = append(res, fmt.Sprintf("anonfield(%s)", typ))
+ } else {
+ for _, name := range f.Names {
+ if keepNames {
+ res = append(res, fmt.Sprintf("namedfield(%q, %s)", name.Name, typ))
+ } else {
+ res = append(res, fmt.Sprintf("anonfield(%s)", typ))
+ }
+ }
+ }
+ }
+ return fmt.Sprintf("[]*Node{%s}", strings.Join(res, ", "))
+}
+
+func intconst(e ast.Expr) int64 {
+ switch e := e.(type) {
+ case *ast.BasicLit:
+ if e.Kind != token.INT {
+ log.Fatalf("expected INT, got %v", e.Kind)
+ }
+ x, err := strconv.ParseInt(e.Value, 0, 64)
+ if err != nil {
+ log.Fatal(err)
+ }
+ return x
+ default:
+ log.Fatalf("unhandled expr: %#v", e)
+ panic("unreachable")
+ }
+}
diff --git a/src/cmd/compile/internal/gc/mpfloat.go b/src/cmd/compile/internal/gc/mpfloat.go
new file mode 100644
index 0000000..401aef3
--- /dev/null
+++ b/src/cmd/compile/internal/gc/mpfloat.go
@@ -0,0 +1,357 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "fmt"
+ "math"
+ "math/big"
+)
+
+// implements float arithmetic
+
+const (
+ // Maximum size in bits for Mpints before signalling
+ // overflow and also mantissa precision for Mpflts.
+ Mpprec = 512
+ // Turn on for constant arithmetic debugging output.
+ Mpdebug = false
+)
+
+// Mpflt represents a floating-point constant.
+type Mpflt struct {
+ Val big.Float
+}
+
+// Mpcplx represents a complex constant.
+type Mpcplx struct {
+ Real Mpflt
+ Imag Mpflt
+}
+
+// Use newMpflt (not new(Mpflt)!) to get the correct default precision.
+func newMpflt() *Mpflt {
+ var a Mpflt
+ a.Val.SetPrec(Mpprec)
+ return &a
+}
+
+// Use newMpcmplx (not new(Mpcplx)!) to get the correct default precision.
+func newMpcmplx() *Mpcplx {
+ var a Mpcplx
+ a.Real = *newMpflt()
+ a.Imag = *newMpflt()
+ return &a
+}
+
+func (a *Mpflt) SetInt(b *Mpint) {
+ if b.checkOverflow(0) {
+ // sign doesn't really matter but copy anyway
+ a.Val.SetInf(b.Val.Sign() < 0)
+ return
+ }
+ a.Val.SetInt(&b.Val)
+}
+
+func (a *Mpflt) Set(b *Mpflt) {
+ a.Val.Set(&b.Val)
+}
+
+func (a *Mpflt) Add(b *Mpflt) {
+ if Mpdebug {
+ fmt.Printf("\n%v + %v", a, b)
+ }
+
+ a.Val.Add(&a.Val, &b.Val)
+
+ if Mpdebug {
+ fmt.Printf(" = %v\n\n", a)
+ }
+}
+
+func (a *Mpflt) AddFloat64(c float64) {
+ var b Mpflt
+
+ b.SetFloat64(c)
+ a.Add(&b)
+}
+
+func (a *Mpflt) Sub(b *Mpflt) {
+ if Mpdebug {
+ fmt.Printf("\n%v - %v", a, b)
+ }
+
+ a.Val.Sub(&a.Val, &b.Val)
+
+ if Mpdebug {
+ fmt.Printf(" = %v\n\n", a)
+ }
+}
+
+func (a *Mpflt) Mul(b *Mpflt) {
+ if Mpdebug {
+ fmt.Printf("%v\n * %v\n", a, b)
+ }
+
+ a.Val.Mul(&a.Val, &b.Val)
+
+ if Mpdebug {
+ fmt.Printf(" = %v\n\n", a)
+ }
+}
+
+func (a *Mpflt) MulFloat64(c float64) {
+ var b Mpflt
+
+ b.SetFloat64(c)
+ a.Mul(&b)
+}
+
+func (a *Mpflt) Quo(b *Mpflt) {
+ if Mpdebug {
+ fmt.Printf("%v\n / %v\n", a, b)
+ }
+
+ a.Val.Quo(&a.Val, &b.Val)
+
+ if Mpdebug {
+ fmt.Printf(" = %v\n\n", a)
+ }
+}
+
+func (a *Mpflt) Cmp(b *Mpflt) int {
+ return a.Val.Cmp(&b.Val)
+}
+
+func (a *Mpflt) CmpFloat64(c float64) int {
+ if c == 0 {
+ return a.Val.Sign() // common case shortcut
+ }
+ return a.Val.Cmp(big.NewFloat(c))
+}
+
+func (a *Mpflt) Float64() float64 {
+ x, _ := a.Val.Float64()
+
+ // check for overflow
+ if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
+ Fatalf("ovf in Mpflt Float64")
+ }
+
+ return x + 0 // avoid -0 (should not be needed, but be conservative)
+}
+
+func (a *Mpflt) Float32() float64 {
+ x32, _ := a.Val.Float32()
+ x := float64(x32)
+
+ // check for overflow
+ if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
+ Fatalf("ovf in Mpflt Float32")
+ }
+
+ return x + 0 // avoid -0 (should not be needed, but be conservative)
+}
+
+func (a *Mpflt) SetFloat64(c float64) {
+ if Mpdebug {
+ fmt.Printf("\nconst %g", c)
+ }
+
+ // convert -0 to 0
+ if c == 0 {
+ c = 0
+ }
+ a.Val.SetFloat64(c)
+
+ if Mpdebug {
+ fmt.Printf(" = %v\n", a)
+ }
+}
+
+func (a *Mpflt) Neg() {
+ // avoid -0
+ if a.Val.Sign() != 0 {
+ a.Val.Neg(&a.Val)
+ }
+}
+
+func (a *Mpflt) SetString(as string) {
+ f, _, err := a.Val.Parse(as, 0)
+ if err != nil {
+ yyerror("malformed constant: %s (%v)", as, err)
+ a.Val.SetFloat64(0)
+ return
+ }
+
+ if f.IsInf() {
+ yyerror("constant too large: %s", as)
+ a.Val.SetFloat64(0)
+ return
+ }
+
+ // -0 becomes 0
+ if f.Sign() == 0 && f.Signbit() {
+ a.Val.SetFloat64(0)
+ }
+}
+
+func (f *Mpflt) String() string {
+ return f.Val.Text('b', 0)
+}
+
+func (fvp *Mpflt) GoString() string {
+ // determine sign
+ sign := ""
+ f := &fvp.Val
+ if f.Sign() < 0 {
+ sign = "-"
+ f = new(big.Float).Abs(f)
+ }
+
+ // Don't try to convert infinities (will not terminate).
+ if f.IsInf() {
+ return sign + "Inf"
+ }
+
+ // Use exact fmt formatting if in float64 range (common case):
+ // proceed if f doesn't underflow to 0 or overflow to inf.
+ if x, _ := f.Float64(); f.Sign() == 0 == (x == 0) && !math.IsInf(x, 0) {
+ return fmt.Sprintf("%s%.6g", sign, x)
+ }
+
+ // Out of float64 range. Do approximate manual to decimal
+ // conversion to avoid precise but possibly slow Float
+ // formatting.
+ // f = mant * 2**exp
+ var mant big.Float
+ exp := f.MantExp(&mant) // 0.5 <= mant < 1.0
+
+ // approximate float64 mantissa m and decimal exponent d
+ // f ~ m * 10**d
+ m, _ := mant.Float64() // 0.5 <= m < 1.0
+ d := float64(exp) * (math.Ln2 / math.Ln10) // log_10(2)
+
+ // adjust m for truncated (integer) decimal exponent e
+ e := int64(d)
+ m *= math.Pow(10, d-float64(e))
+
+ // ensure 1 <= m < 10
+ switch {
+ case m < 1-0.5e-6:
+ // The %.6g format below rounds m to 5 digits after the
+ // decimal point. Make sure that m*10 < 10 even after
+ // rounding up: m*10 + 0.5e-5 < 10 => m < 1 - 0.5e6.
+ m *= 10
+ e--
+ case m >= 10:
+ m /= 10
+ e++
+ }
+
+ return fmt.Sprintf("%s%.6ge%+d", sign, m, e)
+}
+
+// complex multiply v *= rv
+// (a, b) * (c, d) = (a*c - b*d, b*c + a*d)
+func (v *Mpcplx) Mul(rv *Mpcplx) {
+ var ac, ad, bc, bd Mpflt
+
+ ac.Set(&v.Real)
+ ac.Mul(&rv.Real) // ac
+
+ bd.Set(&v.Imag)
+ bd.Mul(&rv.Imag) // bd
+
+ bc.Set(&v.Imag)
+ bc.Mul(&rv.Real) // bc
+
+ ad.Set(&v.Real)
+ ad.Mul(&rv.Imag) // ad
+
+ v.Real.Set(&ac)
+ v.Real.Sub(&bd) // ac-bd
+
+ v.Imag.Set(&bc)
+ v.Imag.Add(&ad) // bc+ad
+}
+
+// complex divide v /= rv
+// (a, b) / (c, d) = ((a*c + b*d), (b*c - a*d))/(c*c + d*d)
+func (v *Mpcplx) Div(rv *Mpcplx) bool {
+ if rv.Real.CmpFloat64(0) == 0 && rv.Imag.CmpFloat64(0) == 0 {
+ return false
+ }
+
+ var ac, ad, bc, bd, cc_plus_dd Mpflt
+
+ cc_plus_dd.Set(&rv.Real)
+ cc_plus_dd.Mul(&rv.Real) // cc
+
+ ac.Set(&rv.Imag)
+ ac.Mul(&rv.Imag) // dd
+ cc_plus_dd.Add(&ac) // cc+dd
+
+ // We already checked that c and d are not both zero, but we can't
+ // assume that c²+d² != 0 follows, because for tiny values of c
+ // and/or d c²+d² can underflow to zero. Check that c²+d² is
+ // nonzero, return if it's not.
+ if cc_plus_dd.CmpFloat64(0) == 0 {
+ return false
+ }
+
+ ac.Set(&v.Real)
+ ac.Mul(&rv.Real) // ac
+
+ bd.Set(&v.Imag)
+ bd.Mul(&rv.Imag) // bd
+
+ bc.Set(&v.Imag)
+ bc.Mul(&rv.Real) // bc
+
+ ad.Set(&v.Real)
+ ad.Mul(&rv.Imag) // ad
+
+ v.Real.Set(&ac)
+ v.Real.Add(&bd) // ac+bd
+ v.Real.Quo(&cc_plus_dd) // (ac+bd)/(cc+dd)
+
+ v.Imag.Set(&bc)
+ v.Imag.Sub(&ad) // bc-ad
+ v.Imag.Quo(&cc_plus_dd) // (bc+ad)/(cc+dd)
+
+ return true
+}
+
+func (v *Mpcplx) String() string {
+ return fmt.Sprintf("(%s+%si)", v.Real.String(), v.Imag.String())
+}
+
+func (v *Mpcplx) GoString() string {
+ var re string
+ sre := v.Real.CmpFloat64(0)
+ if sre != 0 {
+ re = v.Real.GoString()
+ }
+
+ var im string
+ sim := v.Imag.CmpFloat64(0)
+ if sim != 0 {
+ im = v.Imag.GoString()
+ }
+
+ switch {
+ case sre == 0 && sim == 0:
+ return "0"
+ case sre == 0:
+ return im + "i"
+ case sim == 0:
+ return re
+ case sim < 0:
+ return fmt.Sprintf("(%s%si)", re, im)
+ default:
+ return fmt.Sprintf("(%s+%si)", re, im)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/mpint.go b/src/cmd/compile/internal/gc/mpint.go
new file mode 100644
index 0000000..340350b
--- /dev/null
+++ b/src/cmd/compile/internal/gc/mpint.go
@@ -0,0 +1,304 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "fmt"
+ "math/big"
+)
+
+// implements integer arithmetic
+
+// Mpint represents an integer constant.
+type Mpint struct {
+ Val big.Int
+ Ovf bool // set if Val overflowed compiler limit (sticky)
+ Rune bool // set if syntax indicates default type rune
+}
+
+func (a *Mpint) SetOverflow() {
+ a.Val.SetUint64(1) // avoid spurious div-zero errors
+ a.Ovf = true
+}
+
+func (a *Mpint) checkOverflow(extra int) bool {
+ // We don't need to be precise here, any reasonable upper limit would do.
+ // For now, use existing limit so we pass all the tests unchanged.
+ if a.Val.BitLen()+extra > Mpprec {
+ a.SetOverflow()
+ }
+ return a.Ovf
+}
+
+func (a *Mpint) Set(b *Mpint) {
+ a.Val.Set(&b.Val)
+}
+
+func (a *Mpint) SetFloat(b *Mpflt) bool {
+ // avoid converting huge floating-point numbers to integers
+ // (2*Mpprec is large enough to permit all tests to pass)
+ if b.Val.MantExp(nil) > 2*Mpprec {
+ a.SetOverflow()
+ return false
+ }
+
+ if _, acc := b.Val.Int(&a.Val); acc == big.Exact {
+ return true
+ }
+
+ const delta = 16 // a reasonably small number of bits > 0
+ var t big.Float
+ t.SetPrec(Mpprec - delta)
+
+ // try rounding down a little
+ t.SetMode(big.ToZero)
+ t.Set(&b.Val)
+ if _, acc := t.Int(&a.Val); acc == big.Exact {
+ return true
+ }
+
+ // try rounding up a little
+ t.SetMode(big.AwayFromZero)
+ t.Set(&b.Val)
+ if _, acc := t.Int(&a.Val); acc == big.Exact {
+ return true
+ }
+
+ a.Ovf = false
+ return false
+}
+
+func (a *Mpint) Add(b *Mpint) {
+ if a.Ovf || b.Ovf {
+ if nsavederrors+nerrors == 0 {
+ Fatalf("ovf in Mpint Add")
+ }
+ a.SetOverflow()
+ return
+ }
+
+ a.Val.Add(&a.Val, &b.Val)
+
+ if a.checkOverflow(0) {
+ yyerror("constant addition overflow")
+ }
+}
+
+func (a *Mpint) Sub(b *Mpint) {
+ if a.Ovf || b.Ovf {
+ if nsavederrors+nerrors == 0 {
+ Fatalf("ovf in Mpint Sub")
+ }
+ a.SetOverflow()
+ return
+ }
+
+ a.Val.Sub(&a.Val, &b.Val)
+
+ if a.checkOverflow(0) {
+ yyerror("constant subtraction overflow")
+ }
+}
+
+func (a *Mpint) Mul(b *Mpint) {
+ if a.Ovf || b.Ovf {
+ if nsavederrors+nerrors == 0 {
+ Fatalf("ovf in Mpint Mul")
+ }
+ a.SetOverflow()
+ return
+ }
+
+ a.Val.Mul(&a.Val, &b.Val)
+
+ if a.checkOverflow(0) {
+ yyerror("constant multiplication overflow")
+ }
+}
+
+func (a *Mpint) Quo(b *Mpint) {
+ if a.Ovf || b.Ovf {
+ if nsavederrors+nerrors == 0 {
+ Fatalf("ovf in Mpint Quo")
+ }
+ a.SetOverflow()
+ return
+ }
+
+ a.Val.Quo(&a.Val, &b.Val)
+
+ if a.checkOverflow(0) {
+ // can only happen for div-0 which should be checked elsewhere
+ yyerror("constant division overflow")
+ }
+}
+
+func (a *Mpint) Rem(b *Mpint) {
+ if a.Ovf || b.Ovf {
+ if nsavederrors+nerrors == 0 {
+ Fatalf("ovf in Mpint Rem")
+ }
+ a.SetOverflow()
+ return
+ }
+
+ a.Val.Rem(&a.Val, &b.Val)
+
+ if a.checkOverflow(0) {
+ // should never happen
+ yyerror("constant modulo overflow")
+ }
+}
+
+func (a *Mpint) Or(b *Mpint) {
+ if a.Ovf || b.Ovf {
+ if nsavederrors+nerrors == 0 {
+ Fatalf("ovf in Mpint Or")
+ }
+ a.SetOverflow()
+ return
+ }
+
+ a.Val.Or(&a.Val, &b.Val)
+}
+
+func (a *Mpint) And(b *Mpint) {
+ if a.Ovf || b.Ovf {
+ if nsavederrors+nerrors == 0 {
+ Fatalf("ovf in Mpint And")
+ }
+ a.SetOverflow()
+ return
+ }
+
+ a.Val.And(&a.Val, &b.Val)
+}
+
+func (a *Mpint) AndNot(b *Mpint) {
+ if a.Ovf || b.Ovf {
+ if nsavederrors+nerrors == 0 {
+ Fatalf("ovf in Mpint AndNot")
+ }
+ a.SetOverflow()
+ return
+ }
+
+ a.Val.AndNot(&a.Val, &b.Val)
+}
+
+func (a *Mpint) Xor(b *Mpint) {
+ if a.Ovf || b.Ovf {
+ if nsavederrors+nerrors == 0 {
+ Fatalf("ovf in Mpint Xor")
+ }
+ a.SetOverflow()
+ return
+ }
+
+ a.Val.Xor(&a.Val, &b.Val)
+}
+
+func (a *Mpint) Lsh(b *Mpint) {
+ if a.Ovf || b.Ovf {
+ if nsavederrors+nerrors == 0 {
+ Fatalf("ovf in Mpint Lsh")
+ }
+ a.SetOverflow()
+ return
+ }
+
+ s := b.Int64()
+ if s < 0 || s >= Mpprec {
+ msg := "shift count too large"
+ if s < 0 {
+ msg = "invalid negative shift count"
+ }
+ yyerror("%s: %d", msg, s)
+ a.SetInt64(0)
+ return
+ }
+
+ if a.checkOverflow(int(s)) {
+ yyerror("constant shift overflow")
+ return
+ }
+ a.Val.Lsh(&a.Val, uint(s))
+}
+
+func (a *Mpint) Rsh(b *Mpint) {
+ if a.Ovf || b.Ovf {
+ if nsavederrors+nerrors == 0 {
+ Fatalf("ovf in Mpint Rsh")
+ }
+ a.SetOverflow()
+ return
+ }
+
+ s := b.Int64()
+ if s < 0 {
+ yyerror("invalid negative shift count: %d", s)
+ if a.Val.Sign() < 0 {
+ a.SetInt64(-1)
+ } else {
+ a.SetInt64(0)
+ }
+ return
+ }
+
+ a.Val.Rsh(&a.Val, uint(s))
+}
+
+func (a *Mpint) Cmp(b *Mpint) int {
+ return a.Val.Cmp(&b.Val)
+}
+
+func (a *Mpint) CmpInt64(c int64) int {
+ if c == 0 {
+ return a.Val.Sign() // common case shortcut
+ }
+ return a.Val.Cmp(big.NewInt(c))
+}
+
+func (a *Mpint) Neg() {
+ a.Val.Neg(&a.Val)
+}
+
+func (a *Mpint) Int64() int64 {
+ if a.Ovf {
+ if nsavederrors+nerrors == 0 {
+ Fatalf("constant overflow")
+ }
+ return 0
+ }
+
+ return a.Val.Int64()
+}
+
+func (a *Mpint) SetInt64(c int64) {
+ a.Val.SetInt64(c)
+}
+
+func (a *Mpint) SetString(as string) {
+ _, ok := a.Val.SetString(as, 0)
+ if !ok {
+ // The lexer checks for correct syntax of the literal
+ // and reports detailed errors. Thus SetString should
+ // never fail (in theory it might run out of memory,
+ // but that wouldn't be reported as an error here).
+ Fatalf("malformed integer constant: %s", as)
+ return
+ }
+ if a.checkOverflow(0) {
+ yyerror("constant too large: %s", as)
+ }
+}
+
+func (a *Mpint) GoString() string {
+ return a.Val.String()
+}
+
+func (a *Mpint) String() string {
+ return fmt.Sprintf("%#x", &a.Val)
+}
diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go
new file mode 100644
index 0000000..7494c3e
--- /dev/null
+++ b/src/cmd/compile/internal/gc/noder.go
@@ -0,0 +1,1756 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// parseFiles concurrently parses files into *syntax.File structures.
+// Each declaration in every *syntax.File is converted to a syntax tree
+// and its root represented by *Node is appended to xtop.
+// Returns the total count of parsed lines.
+func parseFiles(filenames []string) uint {
+ noders := make([]*noder, 0, len(filenames))
+ // Limit the number of simultaneously open files.
+ sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10)
+
+ for _, filename := range filenames {
+ p := &noder{
+ basemap: make(map[*syntax.PosBase]*src.PosBase),
+ err: make(chan syntax.Error),
+ }
+ noders = append(noders, p)
+
+ go func(filename string) {
+ sem <- struct{}{}
+ defer func() { <-sem }()
+ defer close(p.err)
+ base := syntax.NewFileBase(filename)
+
+ f, err := os.Open(filename)
+ if err != nil {
+ p.error(syntax.Error{Msg: err.Error()})
+ return
+ }
+ defer f.Close()
+
+ p.file, _ = syntax.Parse(base, f, p.error, p.pragma, syntax.CheckBranches) // errors are tracked via p.error
+ }(filename)
+ }
+
+ var lines uint
+ for _, p := range noders {
+ for e := range p.err {
+ p.yyerrorpos(e.Pos, "%s", e.Msg)
+ }
+
+ p.node()
+ lines += p.file.Lines
+ p.file = nil // release memory
+
+ if nsyntaxerrors != 0 {
+ errorexit()
+ }
+ // Always run testdclstack here, even when debug_dclstack is not set, as a sanity measure.
+ testdclstack()
+ }
+
+ localpkg.Height = myheight
+
+ return lines
+}
+
+// makeSrcPosBase translates from a *syntax.PosBase to a *src.PosBase.
+func (p *noder) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase {
+ // fast path: most likely PosBase hasn't changed
+ if p.basecache.last == b0 {
+ return p.basecache.base
+ }
+
+ b1, ok := p.basemap[b0]
+ if !ok {
+ fn := b0.Filename()
+ if b0.IsFileBase() {
+ b1 = src.NewFileBase(fn, absFilename(fn))
+ } else {
+ // line directive base
+ p0 := b0.Pos()
+ p0b := p0.Base()
+ if p0b == b0 {
+ panic("infinite recursion in makeSrcPosBase")
+ }
+ p1 := src.MakePos(p.makeSrcPosBase(p0b), p0.Line(), p0.Col())
+ b1 = src.NewLinePragmaBase(p1, fn, fileh(fn), b0.Line(), b0.Col())
+ }
+ p.basemap[b0] = b1
+ }
+
+ // update cache
+ p.basecache.last = b0
+ p.basecache.base = b1
+
+ return b1
+}
+
+func (p *noder) makeXPos(pos syntax.Pos) (_ src.XPos) {
+ return Ctxt.PosTable.XPos(src.MakePos(p.makeSrcPosBase(pos.Base()), pos.Line(), pos.Col()))
+}
+
+func (p *noder) yyerrorpos(pos syntax.Pos, format string, args ...interface{}) {
+ yyerrorl(p.makeXPos(pos), format, args...)
+}
+
+var pathPrefix string
+
+// TODO(gri) Can we eliminate fileh in favor of absFilename?
+func fileh(name string) string {
+ return objabi.AbsFile("", name, pathPrefix)
+}
+
+func absFilename(name string) string {
+ return objabi.AbsFile(Ctxt.Pathname, name, pathPrefix)
+}
+
+// noder transforms package syntax's AST into a Node tree.
+type noder struct {
+ basemap map[*syntax.PosBase]*src.PosBase
+ basecache struct {
+ last *syntax.PosBase
+ base *src.PosBase
+ }
+
+ file *syntax.File
+ linknames []linkname
+ pragcgobuf [][]string
+ err chan syntax.Error
+ scope ScopeID
+ importedUnsafe bool
+ importedEmbed bool
+
+ // scopeVars is a stack tracking the number of variables declared in the
+ // current function at the moment each open scope was opened.
+ scopeVars []int
+
+ lastCloseScopePos syntax.Pos
+}
+
+func (p *noder) funcBody(fn *Node, block *syntax.BlockStmt) {
+ oldScope := p.scope
+ p.scope = 0
+ funchdr(fn)
+
+ if block != nil {
+ body := p.stmts(block.List)
+ if body == nil {
+ body = []*Node{nod(OEMPTY, nil, nil)}
+ }
+ fn.Nbody.Set(body)
+
+ lineno = p.makeXPos(block.Rbrace)
+ fn.Func.Endlineno = lineno
+ }
+
+ funcbody()
+ p.scope = oldScope
+}
+
+func (p *noder) openScope(pos syntax.Pos) {
+ types.Markdcl()
+
+ if trackScopes {
+ Curfn.Func.Parents = append(Curfn.Func.Parents, p.scope)
+ p.scopeVars = append(p.scopeVars, len(Curfn.Func.Dcl))
+ p.scope = ScopeID(len(Curfn.Func.Parents))
+
+ p.markScope(pos)
+ }
+}
+
+func (p *noder) closeScope(pos syntax.Pos) {
+ p.lastCloseScopePos = pos
+ types.Popdcl()
+
+ if trackScopes {
+ scopeVars := p.scopeVars[len(p.scopeVars)-1]
+ p.scopeVars = p.scopeVars[:len(p.scopeVars)-1]
+ if scopeVars == len(Curfn.Func.Dcl) {
+ // no variables were declared in this scope, so we can retract it.
+
+ if int(p.scope) != len(Curfn.Func.Parents) {
+ Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted")
+ }
+
+ p.scope = Curfn.Func.Parents[p.scope-1]
+ Curfn.Func.Parents = Curfn.Func.Parents[:len(Curfn.Func.Parents)-1]
+
+ nmarks := len(Curfn.Func.Marks)
+ Curfn.Func.Marks[nmarks-1].Scope = p.scope
+ prevScope := ScopeID(0)
+ if nmarks >= 2 {
+ prevScope = Curfn.Func.Marks[nmarks-2].Scope
+ }
+ if Curfn.Func.Marks[nmarks-1].Scope == prevScope {
+ Curfn.Func.Marks = Curfn.Func.Marks[:nmarks-1]
+ }
+ return
+ }
+
+ p.scope = Curfn.Func.Parents[p.scope-1]
+
+ p.markScope(pos)
+ }
+}
+
+func (p *noder) markScope(pos syntax.Pos) {
+ xpos := p.makeXPos(pos)
+ if i := len(Curfn.Func.Marks); i > 0 && Curfn.Func.Marks[i-1].Pos == xpos {
+ Curfn.Func.Marks[i-1].Scope = p.scope
+ } else {
+ Curfn.Func.Marks = append(Curfn.Func.Marks, Mark{xpos, p.scope})
+ }
+}
+
+// closeAnotherScope is like closeScope, but it reuses the same mark
+// position as the last closeScope call. This is useful for "for" and
+// "if" statements, as their implicit blocks always end at the same
+// position as an explicit block.
+func (p *noder) closeAnotherScope() {
+ p.closeScope(p.lastCloseScopePos)
+}
+
+// linkname records a //go:linkname directive.
+type linkname struct {
+ pos syntax.Pos
+ local string
+ remote string
+}
+
+func (p *noder) node() {
+ types.Block = 1
+ p.importedUnsafe = false
+ p.importedEmbed = false
+
+ p.setlineno(p.file.PkgName)
+ mkpackage(p.file.PkgName.Value)
+
+ if pragma, ok := p.file.Pragma.(*Pragma); ok {
+ pragma.Flag &^= GoBuildPragma
+ p.checkUnused(pragma)
+ }
+
+ xtop = append(xtop, p.decls(p.file.DeclList)...)
+
+ for _, n := range p.linknames {
+ if !p.importedUnsafe {
+ p.yyerrorpos(n.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
+ continue
+ }
+ s := lookup(n.local)
+ if n.remote != "" {
+ s.Linkname = n.remote
+ } else {
+ // Use the default object symbol name if the
+ // user didn't provide one.
+ if myimportpath == "" {
+ p.yyerrorpos(n.pos, "//go:linkname requires linkname argument or -p compiler flag")
+ } else {
+ s.Linkname = objabi.PathToPrefix(myimportpath) + "." + n.local
+ }
+ }
+ }
+
+ // The linker expects an ABI0 wrapper for all cgo-exported
+ // functions.
+ for _, prag := range p.pragcgobuf {
+ switch prag[0] {
+ case "cgo_export_static", "cgo_export_dynamic":
+ if symabiRefs == nil {
+ symabiRefs = make(map[string]obj.ABI)
+ }
+ symabiRefs[prag[1]] = obj.ABI0
+ }
+ }
+
+ pragcgobuf = append(pragcgobuf, p.pragcgobuf...)
+ lineno = src.NoXPos
+ clearImports()
+}
+
+func (p *noder) decls(decls []syntax.Decl) (l []*Node) {
+ var cs constState
+
+ for _, decl := range decls {
+ p.setlineno(decl)
+ switch decl := decl.(type) {
+ case *syntax.ImportDecl:
+ p.importDecl(decl)
+
+ case *syntax.VarDecl:
+ l = append(l, p.varDecl(decl)...)
+
+ case *syntax.ConstDecl:
+ l = append(l, p.constDecl(decl, &cs)...)
+
+ case *syntax.TypeDecl:
+ l = append(l, p.typeDecl(decl))
+
+ case *syntax.FuncDecl:
+ l = append(l, p.funcDecl(decl))
+
+ default:
+ panic("unhandled Decl")
+ }
+ }
+
+ return
+}
+
+func (p *noder) importDecl(imp *syntax.ImportDecl) {
+ if imp.Path.Bad {
+ return // avoid follow-on errors if there was a syntax error
+ }
+
+ if pragma, ok := imp.Pragma.(*Pragma); ok {
+ p.checkUnused(pragma)
+ }
+
+ val := p.basicLit(imp.Path)
+ ipkg := importfile(&val)
+ if ipkg == nil {
+ if nerrors == 0 {
+ Fatalf("phase error in import")
+ }
+ return
+ }
+
+ if ipkg == unsafepkg {
+ p.importedUnsafe = true
+ }
+ if ipkg.Path == "embed" {
+ p.importedEmbed = true
+ }
+
+ ipkg.Direct = true
+
+ var my *types.Sym
+ if imp.LocalPkgName != nil {
+ my = p.name(imp.LocalPkgName)
+ } else {
+ my = lookup(ipkg.Name)
+ }
+
+ pack := p.nod(imp, OPACK, nil, nil)
+ pack.Sym = my
+ pack.Name.Pkg = ipkg
+
+ switch my.Name {
+ case ".":
+ importdot(ipkg, pack)
+ return
+ case "init":
+ yyerrorl(pack.Pos, "cannot import package as init - init must be a func")
+ return
+ case "_":
+ return
+ }
+ if my.Def != nil {
+ redeclare(pack.Pos, my, "as imported package name")
+ }
+ my.Def = asTypesNode(pack)
+ my.Lastlineno = pack.Pos
+ my.Block = 1 // at top level
+}
+
+func (p *noder) varDecl(decl *syntax.VarDecl) []*Node {
+ names := p.declNames(decl.NameList)
+ typ := p.typeExprOrNil(decl.Type)
+
+ var exprs []*Node
+ if decl.Values != nil {
+ exprs = p.exprList(decl.Values)
+ }
+
+ if pragma, ok := decl.Pragma.(*Pragma); ok {
+ if len(pragma.Embeds) > 0 {
+ if !p.importedEmbed {
+ // This check can't be done when building the list pragma.Embeds
+ // because that list is created before the noder starts walking over the file,
+ // so at that point it hasn't seen the imports.
+ // We're left to check now, just before applying the //go:embed lines.
+ for _, e := range pragma.Embeds {
+ p.yyerrorpos(e.Pos, "//go:embed only allowed in Go files that import \"embed\"")
+ }
+ } else {
+ varEmbed(p, names, typ, exprs, pragma.Embeds)
+ }
+ pragma.Embeds = nil
+ }
+ p.checkUnused(pragma)
+ }
+
+ p.setlineno(decl)
+ return variter(names, typ, exprs)
+}
+
+// constState tracks state between constant specifiers within a
+// declaration group. This state is kept separate from noder so nested
+// constant declarations are handled correctly (e.g., issue 15550).
+type constState struct {
+ group *syntax.Group
+ typ *Node
+ values []*Node
+ iota int64
+}
+
+func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*Node {
+ if decl.Group == nil || decl.Group != cs.group {
+ *cs = constState{
+ group: decl.Group,
+ }
+ }
+
+ if pragma, ok := decl.Pragma.(*Pragma); ok {
+ p.checkUnused(pragma)
+ }
+
+ names := p.declNames(decl.NameList)
+ typ := p.typeExprOrNil(decl.Type)
+
+ var values []*Node
+ if decl.Values != nil {
+ values = p.exprList(decl.Values)
+ cs.typ, cs.values = typ, values
+ } else {
+ if typ != nil {
+ yyerror("const declaration cannot have type without expression")
+ }
+ typ, values = cs.typ, cs.values
+ }
+
+ nn := make([]*Node, 0, len(names))
+ for i, n := range names {
+ if i >= len(values) {
+ yyerror("missing value in const declaration")
+ break
+ }
+ v := values[i]
+ if decl.Values == nil {
+ v = treecopy(v, n.Pos)
+ }
+
+ n.Op = OLITERAL
+ declare(n, dclcontext)
+
+ n.Name.Param.Ntype = typ
+ n.Name.Defn = v
+ n.SetIota(cs.iota)
+
+ nn = append(nn, p.nod(decl, ODCLCONST, n, nil))
+ }
+
+ if len(values) > len(names) {
+ yyerror("extra expression in const declaration")
+ }
+
+ cs.iota++
+
+ return nn
+}
+
+func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node {
+ n := p.declName(decl.Name)
+ n.Op = OTYPE
+ declare(n, dclcontext)
+
+ // decl.Type may be nil but in that case we got a syntax error during parsing
+ typ := p.typeExprOrNil(decl.Type)
+
+ param := n.Name.Param
+ param.Ntype = typ
+ param.SetAlias(decl.Alias)
+ if pragma, ok := decl.Pragma.(*Pragma); ok {
+ if !decl.Alias {
+ param.SetPragma(pragma.Flag & TypePragmas)
+ pragma.Flag &^= TypePragmas
+ }
+ p.checkUnused(pragma)
+ }
+
+ nod := p.nod(decl, ODCLTYPE, n, nil)
+ if param.Alias() && !langSupported(1, 9, localpkg) {
+ yyerrorl(nod.Pos, "type aliases only supported as of -lang=go1.9")
+ }
+ return nod
+}
+
+func (p *noder) declNames(names []*syntax.Name) []*Node {
+ nodes := make([]*Node, 0, len(names))
+ for _, name := range names {
+ nodes = append(nodes, p.declName(name))
+ }
+ return nodes
+}
+
+func (p *noder) declName(name *syntax.Name) *Node {
+ n := dclname(p.name(name))
+ n.Pos = p.pos(name)
+ return n
+}
+
+func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node {
+ name := p.name(fun.Name)
+ t := p.signature(fun.Recv, fun.Type)
+ f := p.nod(fun, ODCLFUNC, nil, nil)
+
+ if fun.Recv == nil {
+ if name.Name == "init" {
+ name = renameinit()
+ if t.List.Len() > 0 || t.Rlist.Len() > 0 {
+ yyerrorl(f.Pos, "func init must have no arguments and no return values")
+ }
+ }
+
+ if localpkg.Name == "main" && name.Name == "main" {
+ if t.List.Len() > 0 || t.Rlist.Len() > 0 {
+ yyerrorl(f.Pos, "func main must have no arguments and no return values")
+ }
+ }
+ } else {
+ f.Func.Shortname = name
+ name = nblank.Sym // filled in by typecheckfunc
+ }
+
+ f.Func.Nname = newfuncnamel(p.pos(fun.Name), name)
+ f.Func.Nname.Name.Defn = f
+ f.Func.Nname.Name.Param.Ntype = t
+
+ if pragma, ok := fun.Pragma.(*Pragma); ok {
+ f.Func.Pragma = pragma.Flag & FuncPragmas
+ if pragma.Flag&Systemstack != 0 && pragma.Flag&Nosplit != 0 {
+ yyerrorl(f.Pos, "go:nosplit and go:systemstack cannot be combined")
+ }
+ pragma.Flag &^= FuncPragmas
+ p.checkUnused(pragma)
+ }
+
+ if fun.Recv == nil {
+ declare(f.Func.Nname, PFUNC)
+ }
+
+ p.funcBody(f, fun.Body)
+
+ if fun.Body != nil {
+ if f.Func.Pragma&Noescape != 0 {
+ yyerrorl(f.Pos, "can only use //go:noescape with external func implementations")
+ }
+ } else {
+ if pure_go || strings.HasPrefix(f.funcname(), "init.") {
+ // Linknamed functions are allowed to have no body. Hopefully
+ // the linkname target has a body. See issue 23311.
+ isLinknamed := false
+ for _, n := range p.linknames {
+ if f.funcname() == n.local {
+ isLinknamed = true
+ break
+ }
+ }
+ if !isLinknamed {
+ yyerrorl(f.Pos, "missing function body")
+ }
+ }
+ }
+
+ return f
+}
+
+func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *Node {
+ n := p.nod(typ, OTFUNC, nil, nil)
+ if recv != nil {
+ n.Left = p.param(recv, false, false)
+ }
+ n.List.Set(p.params(typ.ParamList, true))
+ n.Rlist.Set(p.params(typ.ResultList, false))
+ return n
+}
+
+func (p *noder) params(params []*syntax.Field, dddOk bool) []*Node {
+ nodes := make([]*Node, 0, len(params))
+ for i, param := range params {
+ p.setlineno(param)
+ nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
+ }
+ return nodes
+}
+
+func (p *noder) param(param *syntax.Field, dddOk, final bool) *Node {
+ var name *types.Sym
+ if param.Name != nil {
+ name = p.name(param.Name)
+ }
+
+ typ := p.typeExpr(param.Type)
+ n := p.nodSym(param, ODCLFIELD, typ, name)
+
+ // rewrite ...T parameter
+ if typ.Op == ODDD {
+ if !dddOk {
+ // We mark these as syntax errors to get automatic elimination
+ // of multiple such errors per line (see yyerrorl in subr.go).
+ yyerror("syntax error: cannot use ... in receiver or result parameter list")
+ } else if !final {
+ if param.Name == nil {
+ yyerror("syntax error: cannot use ... with non-final parameter")
+ } else {
+ p.yyerrorpos(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value)
+ }
+ }
+ typ.Op = OTARRAY
+ typ.Right = typ.Left
+ typ.Left = nil
+ n.SetIsDDD(true)
+ if n.Left != nil {
+ n.Left.SetIsDDD(true)
+ }
+ }
+
+ return n
+}
+
+func (p *noder) exprList(expr syntax.Expr) []*Node {
+ if list, ok := expr.(*syntax.ListExpr); ok {
+ return p.exprs(list.ElemList)
+ }
+ return []*Node{p.expr(expr)}
+}
+
+func (p *noder) exprs(exprs []syntax.Expr) []*Node {
+ nodes := make([]*Node, 0, len(exprs))
+ for _, expr := range exprs {
+ nodes = append(nodes, p.expr(expr))
+ }
+ return nodes
+}
+
+func (p *noder) expr(expr syntax.Expr) *Node {
+ p.setlineno(expr)
+ switch expr := expr.(type) {
+ case nil, *syntax.BadExpr:
+ return nil
+ case *syntax.Name:
+ return p.mkname(expr)
+ case *syntax.BasicLit:
+ n := nodlit(p.basicLit(expr))
+ n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error
+ return n
+ case *syntax.CompositeLit:
+ n := p.nod(expr, OCOMPLIT, nil, nil)
+ if expr.Type != nil {
+ n.Right = p.expr(expr.Type)
+ }
+ l := p.exprs(expr.ElemList)
+ for i, e := range l {
+ l[i] = p.wrapname(expr.ElemList[i], e)
+ }
+ n.List.Set(l)
+ lineno = p.makeXPos(expr.Rbrace)
+ return n
+ case *syntax.KeyValueExpr:
+ // use position of expr.Key rather than of expr (which has position of ':')
+ return p.nod(expr.Key, OKEY, p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
+ case *syntax.FuncLit:
+ return p.funcLit(expr)
+ case *syntax.ParenExpr:
+ return p.nod(expr, OPAREN, p.expr(expr.X), nil)
+ case *syntax.SelectorExpr:
+ // parser.new_dotname
+ obj := p.expr(expr.X)
+ if obj.Op == OPACK {
+ obj.Name.SetUsed(true)
+ return importName(obj.Name.Pkg.Lookup(expr.Sel.Value))
+ }
+ n := nodSym(OXDOT, obj, p.name(expr.Sel))
+ n.Pos = p.pos(expr) // lineno may have been changed by p.expr(expr.X)
+ return n
+ case *syntax.IndexExpr:
+ return p.nod(expr, OINDEX, p.expr(expr.X), p.expr(expr.Index))
+ case *syntax.SliceExpr:
+ op := OSLICE
+ if expr.Full {
+ op = OSLICE3
+ }
+ n := p.nod(expr, op, p.expr(expr.X), nil)
+ var index [3]*Node
+ for i, x := range &expr.Index {
+ if x != nil {
+ index[i] = p.expr(x)
+ }
+ }
+ n.SetSliceBounds(index[0], index[1], index[2])
+ return n
+ case *syntax.AssertExpr:
+ return p.nod(expr, ODOTTYPE, p.expr(expr.X), p.typeExpr(expr.Type))
+ case *syntax.Operation:
+ if expr.Op == syntax.Add && expr.Y != nil {
+ return p.sum(expr)
+ }
+ x := p.expr(expr.X)
+ if expr.Y == nil {
+ return p.nod(expr, p.unOp(expr.Op), x, nil)
+ }
+ return p.nod(expr, p.binOp(expr.Op), x, p.expr(expr.Y))
+ case *syntax.CallExpr:
+ n := p.nod(expr, OCALL, p.expr(expr.Fun), nil)
+ n.List.Set(p.exprs(expr.ArgList))
+ n.SetIsDDD(expr.HasDots)
+ return n
+
+ case *syntax.ArrayType:
+ var len *Node
+ if expr.Len != nil {
+ len = p.expr(expr.Len)
+ } else {
+ len = p.nod(expr, ODDD, nil, nil)
+ }
+ return p.nod(expr, OTARRAY, len, p.typeExpr(expr.Elem))
+ case *syntax.SliceType:
+ return p.nod(expr, OTARRAY, nil, p.typeExpr(expr.Elem))
+ case *syntax.DotsType:
+ return p.nod(expr, ODDD, p.typeExpr(expr.Elem), nil)
+ case *syntax.StructType:
+ return p.structType(expr)
+ case *syntax.InterfaceType:
+ return p.interfaceType(expr)
+ case *syntax.FuncType:
+ return p.signature(nil, expr)
+ case *syntax.MapType:
+ return p.nod(expr, OTMAP, p.typeExpr(expr.Key), p.typeExpr(expr.Value))
+ case *syntax.ChanType:
+ n := p.nod(expr, OTCHAN, p.typeExpr(expr.Elem), nil)
+ n.SetTChanDir(p.chanDir(expr.Dir))
+ return n
+
+ case *syntax.TypeSwitchGuard:
+ n := p.nod(expr, OTYPESW, nil, p.expr(expr.X))
+ if expr.Lhs != nil {
+ n.Left = p.declName(expr.Lhs)
+ if n.Left.isBlank() {
+ yyerror("invalid variable name %v in type switch", n.Left)
+ }
+ }
+ return n
+ }
+ panic("unhandled Expr")
+}
+
+// sum efficiently handles very large summation expressions (such as
+// in issue #16394). In particular, it avoids left recursion and
+// collapses string literals.
+func (p *noder) sum(x syntax.Expr) *Node {
+ // While we need to handle long sums with asymptotic
+ // efficiency, the vast majority of sums are very small: ~95%
+ // have only 2 or 3 operands, and ~99% of string literals are
+ // never concatenated.
+
+ adds := make([]*syntax.Operation, 0, 2)
+ for {
+ add, ok := x.(*syntax.Operation)
+ if !ok || add.Op != syntax.Add || add.Y == nil {
+ break
+ }
+ adds = append(adds, add)
+ x = add.X
+ }
+
+ // nstr is the current rightmost string literal in the
+ // summation (if any), and chunks holds its accumulated
+ // substrings.
+ //
+ // Consider the expression x + "a" + "b" + "c" + y. When we
+ // reach the string literal "a", we assign nstr to point to
+ // its corresponding Node and initialize chunks to {"a"}.
+ // Visiting the subsequent string literals "b" and "c", we
+ // simply append their values to chunks. Finally, when we
+ // reach the non-constant operand y, we'll join chunks to form
+ // "abc" and reassign the "a" string literal's value.
+ //
+ // N.B., we need to be careful about named string constants
+ // (indicated by Sym != nil) because 1) we can't modify their
+ // value, as doing so would affect other uses of the string
+ // constant, and 2) they may have types, which we need to
+ // handle correctly. For now, we avoid these problems by
+ // treating named string constants the same as non-constant
+ // operands.
+ var nstr *Node
+ chunks := make([]string, 0, 1)
+
+ n := p.expr(x)
+ if Isconst(n, CTSTR) && n.Sym == nil {
+ nstr = n
+ chunks = append(chunks, nstr.StringVal())
+ }
+
+ for i := len(adds) - 1; i >= 0; i-- {
+ add := adds[i]
+
+ r := p.expr(add.Y)
+ if Isconst(r, CTSTR) && r.Sym == nil {
+ if nstr != nil {
+ // Collapse r into nstr instead of adding to n.
+ chunks = append(chunks, r.StringVal())
+ continue
+ }
+
+ nstr = r
+ chunks = append(chunks, nstr.StringVal())
+ } else {
+ if len(chunks) > 1 {
+ nstr.SetVal(Val{U: strings.Join(chunks, "")})
+ }
+ nstr = nil
+ chunks = chunks[:0]
+ }
+ n = p.nod(add, OADD, n, r)
+ }
+ if len(chunks) > 1 {
+ nstr.SetVal(Val{U: strings.Join(chunks, "")})
+ }
+
+ return n
+}
+
+func (p *noder) typeExpr(typ syntax.Expr) *Node {
+ // TODO(mdempsky): Be stricter? typecheck should handle errors anyway.
+ return p.expr(typ)
+}
+
+func (p *noder) typeExprOrNil(typ syntax.Expr) *Node {
+ if typ != nil {
+ return p.expr(typ)
+ }
+ return nil
+}
+
+func (p *noder) chanDir(dir syntax.ChanDir) types.ChanDir {
+ switch dir {
+ case 0:
+ return types.Cboth
+ case syntax.SendOnly:
+ return types.Csend
+ case syntax.RecvOnly:
+ return types.Crecv
+ }
+ panic("unhandled ChanDir")
+}
+
+func (p *noder) structType(expr *syntax.StructType) *Node {
+ l := make([]*Node, 0, len(expr.FieldList))
+ for i, field := range expr.FieldList {
+ p.setlineno(field)
+ var n *Node
+ if field.Name == nil {
+ n = p.embedded(field.Type)
+ } else {
+ n = p.nodSym(field, ODCLFIELD, p.typeExpr(field.Type), p.name(field.Name))
+ }
+ if i < len(expr.TagList) && expr.TagList[i] != nil {
+ n.SetVal(p.basicLit(expr.TagList[i]))
+ }
+ l = append(l, n)
+ }
+
+ p.setlineno(expr)
+ n := p.nod(expr, OTSTRUCT, nil, nil)
+ n.List.Set(l)
+ return n
+}
+
+func (p *noder) interfaceType(expr *syntax.InterfaceType) *Node {
+ l := make([]*Node, 0, len(expr.MethodList))
+ for _, method := range expr.MethodList {
+ p.setlineno(method)
+ var n *Node
+ if method.Name == nil {
+ n = p.nodSym(method, ODCLFIELD, importName(p.packname(method.Type)), nil)
+ } else {
+ mname := p.name(method.Name)
+ sig := p.typeExpr(method.Type)
+ sig.Left = fakeRecv()
+ n = p.nodSym(method, ODCLFIELD, sig, mname)
+ ifacedcl(n)
+ }
+ l = append(l, n)
+ }
+
+ n := p.nod(expr, OTINTER, nil, nil)
+ n.List.Set(l)
+ return n
+}
+
+func (p *noder) packname(expr syntax.Expr) *types.Sym {
+ switch expr := expr.(type) {
+ case *syntax.Name:
+ name := p.name(expr)
+ if n := oldname(name); n.Name != nil && n.Name.Pack != nil {
+ n.Name.Pack.Name.SetUsed(true)
+ }
+ return name
+ case *syntax.SelectorExpr:
+ name := p.name(expr.X.(*syntax.Name))
+ def := asNode(name.Def)
+ if def == nil {
+ yyerror("undefined: %v", name)
+ return name
+ }
+ var pkg *types.Pkg
+ if def.Op != OPACK {
+ yyerror("%v is not a package", name)
+ pkg = localpkg
+ } else {
+ def.Name.SetUsed(true)
+ pkg = def.Name.Pkg
+ }
+ return pkg.Lookup(expr.Sel.Value)
+ }
+ panic(fmt.Sprintf("unexpected packname: %#v", expr))
+}
+
+func (p *noder) embedded(typ syntax.Expr) *Node {
+ op, isStar := typ.(*syntax.Operation)
+ if isStar {
+ if op.Op != syntax.Mul || op.Y != nil {
+ panic("unexpected Operation")
+ }
+ typ = op.X
+ }
+
+ sym := p.packname(typ)
+ n := p.nodSym(typ, ODCLFIELD, importName(sym), lookup(sym.Name))
+ n.SetEmbedded(true)
+
+ if isStar {
+ n.Left = p.nod(op, ODEREF, n.Left, nil)
+ }
+ return n
+}
+
+func (p *noder) stmts(stmts []syntax.Stmt) []*Node {
+ return p.stmtsFall(stmts, false)
+}
+
+func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*Node {
+ var nodes []*Node
+ for i, stmt := range stmts {
+ s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
+ if s == nil {
+ } else if s.Op == OBLOCK && s.Ninit.Len() == 0 {
+ nodes = append(nodes, s.List.Slice()...)
+ } else {
+ nodes = append(nodes, s)
+ }
+ }
+ return nodes
+}
+
+func (p *noder) stmt(stmt syntax.Stmt) *Node {
+ return p.stmtFall(stmt, false)
+}
+
+func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node {
+ p.setlineno(stmt)
+ switch stmt := stmt.(type) {
+ case *syntax.EmptyStmt:
+ return nil
+ case *syntax.LabeledStmt:
+ return p.labeledStmt(stmt, fallOK)
+ case *syntax.BlockStmt:
+ l := p.blockStmt(stmt)
+ if len(l) == 0 {
+ // TODO(mdempsky): Line number?
+ return nod(OEMPTY, nil, nil)
+ }
+ return liststmt(l)
+ case *syntax.ExprStmt:
+ return p.wrapname(stmt, p.expr(stmt.X))
+ case *syntax.SendStmt:
+ return p.nod(stmt, OSEND, p.expr(stmt.Chan), p.expr(stmt.Value))
+ case *syntax.DeclStmt:
+ return liststmt(p.decls(stmt.DeclList))
+ case *syntax.AssignStmt:
+ if stmt.Op != 0 && stmt.Op != syntax.Def {
+ n := p.nod(stmt, OASOP, p.expr(stmt.Lhs), p.expr(stmt.Rhs))
+ n.SetImplicit(stmt.Rhs == syntax.ImplicitOne)
+ n.SetSubOp(p.binOp(stmt.Op))
+ return n
+ }
+
+ n := p.nod(stmt, OAS, nil, nil) // assume common case
+
+ rhs := p.exprList(stmt.Rhs)
+ lhs := p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def)
+
+ if len(lhs) == 1 && len(rhs) == 1 {
+ // common case
+ n.Left = lhs[0]
+ n.Right = rhs[0]
+ } else {
+ n.Op = OAS2
+ n.List.Set(lhs)
+ n.Rlist.Set(rhs)
+ }
+ return n
+
+ case *syntax.BranchStmt:
+ var op Op
+ switch stmt.Tok {
+ case syntax.Break:
+ op = OBREAK
+ case syntax.Continue:
+ op = OCONTINUE
+ case syntax.Fallthrough:
+ if !fallOK {
+ yyerror("fallthrough statement out of place")
+ }
+ op = OFALL
+ case syntax.Goto:
+ op = OGOTO
+ default:
+ panic("unhandled BranchStmt")
+ }
+ n := p.nod(stmt, op, nil, nil)
+ if stmt.Label != nil {
+ n.Sym = p.name(stmt.Label)
+ }
+ return n
+ case *syntax.CallStmt:
+ var op Op
+ switch stmt.Tok {
+ case syntax.Defer:
+ op = ODEFER
+ case syntax.Go:
+ op = OGO
+ default:
+ panic("unhandled CallStmt")
+ }
+ return p.nod(stmt, op, p.expr(stmt.Call), nil)
+ case *syntax.ReturnStmt:
+ var results []*Node
+ if stmt.Results != nil {
+ results = p.exprList(stmt.Results)
+ }
+ n := p.nod(stmt, ORETURN, nil, nil)
+ n.List.Set(results)
+ if n.List.Len() == 0 && Curfn != nil {
+ for _, ln := range Curfn.Func.Dcl {
+ if ln.Class() == PPARAM {
+ continue
+ }
+ if ln.Class() != PPARAMOUT {
+ break
+ }
+ if asNode(ln.Sym.Def) != ln {
+ yyerror("%s is shadowed during return", ln.Sym.Name)
+ }
+ }
+ }
+ return n
+ case *syntax.IfStmt:
+ return p.ifStmt(stmt)
+ case *syntax.ForStmt:
+ return p.forStmt(stmt)
+ case *syntax.SwitchStmt:
+ return p.switchStmt(stmt)
+ case *syntax.SelectStmt:
+ return p.selectStmt(stmt)
+ }
+ panic("unhandled Stmt")
+}
+
+func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node {
+ if !colas {
+ return p.exprList(expr)
+ }
+
+ defn.SetColas(true)
+
+ var exprs []syntax.Expr
+ if list, ok := expr.(*syntax.ListExpr); ok {
+ exprs = list.ElemList
+ } else {
+ exprs = []syntax.Expr{expr}
+ }
+
+ res := make([]*Node, len(exprs))
+ seen := make(map[*types.Sym]bool, len(exprs))
+
+ newOrErr := false
+ for i, expr := range exprs {
+ p.setlineno(expr)
+ res[i] = nblank
+
+ name, ok := expr.(*syntax.Name)
+ if !ok {
+ p.yyerrorpos(expr.Pos(), "non-name %v on left side of :=", p.expr(expr))
+ newOrErr = true
+ continue
+ }
+
+ sym := p.name(name)
+ if sym.IsBlank() {
+ continue
+ }
+
+ if seen[sym] {
+ p.yyerrorpos(expr.Pos(), "%v repeated on left side of :=", sym)
+ newOrErr = true
+ continue
+ }
+ seen[sym] = true
+
+ if sym.Block == types.Block {
+ res[i] = oldname(sym)
+ continue
+ }
+
+ newOrErr = true
+ n := newname(sym)
+ declare(n, dclcontext)
+ n.Name.Defn = defn
+ defn.Ninit.Append(nod(ODCL, n, nil))
+ res[i] = n
+ }
+
+ if !newOrErr {
+ yyerrorl(defn.Pos, "no new variables on left side of :=")
+ }
+ return res
+}
+
+func (p *noder) blockStmt(stmt *syntax.BlockStmt) []*Node {
+ p.openScope(stmt.Pos())
+ nodes := p.stmts(stmt.List)
+ p.closeScope(stmt.Rbrace)
+ return nodes
+}
+
+func (p *noder) ifStmt(stmt *syntax.IfStmt) *Node {
+ p.openScope(stmt.Pos())
+ n := p.nod(stmt, OIF, nil, nil)
+ if stmt.Init != nil {
+ n.Ninit.Set1(p.stmt(stmt.Init))
+ }
+ if stmt.Cond != nil {
+ n.Left = p.expr(stmt.Cond)
+ }
+ n.Nbody.Set(p.blockStmt(stmt.Then))
+ if stmt.Else != nil {
+ e := p.stmt(stmt.Else)
+ if e.Op == OBLOCK && e.Ninit.Len() == 0 {
+ n.Rlist.Set(e.List.Slice())
+ } else {
+ n.Rlist.Set1(e)
+ }
+ }
+ p.closeAnotherScope()
+ return n
+}
+
+func (p *noder) forStmt(stmt *syntax.ForStmt) *Node {
+ p.openScope(stmt.Pos())
+ var n *Node
+ if r, ok := stmt.Init.(*syntax.RangeClause); ok {
+ if stmt.Cond != nil || stmt.Post != nil {
+ panic("unexpected RangeClause")
+ }
+
+ n = p.nod(r, ORANGE, nil, p.expr(r.X))
+ if r.Lhs != nil {
+ n.List.Set(p.assignList(r.Lhs, n, r.Def))
+ }
+ } else {
+ n = p.nod(stmt, OFOR, nil, nil)
+ if stmt.Init != nil {
+ n.Ninit.Set1(p.stmt(stmt.Init))
+ }
+ if stmt.Cond != nil {
+ n.Left = p.expr(stmt.Cond)
+ }
+ if stmt.Post != nil {
+ n.Right = p.stmt(stmt.Post)
+ }
+ }
+ n.Nbody.Set(p.blockStmt(stmt.Body))
+ p.closeAnotherScope()
+ return n
+}
+
+func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *Node {
+ p.openScope(stmt.Pos())
+ n := p.nod(stmt, OSWITCH, nil, nil)
+ if stmt.Init != nil {
+ n.Ninit.Set1(p.stmt(stmt.Init))
+ }
+ if stmt.Tag != nil {
+ n.Left = p.expr(stmt.Tag)
+ }
+
+ tswitch := n.Left
+ if tswitch != nil && tswitch.Op != OTYPESW {
+ tswitch = nil
+ }
+ n.List.Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
+
+ p.closeScope(stmt.Rbrace)
+ return n
+}
+
+func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace syntax.Pos) []*Node {
+ nodes := make([]*Node, 0, len(clauses))
+ for i, clause := range clauses {
+ p.setlineno(clause)
+ if i > 0 {
+ p.closeScope(clause.Pos())
+ }
+ p.openScope(clause.Pos())
+
+ n := p.nod(clause, OCASE, nil, nil)
+ if clause.Cases != nil {
+ n.List.Set(p.exprList(clause.Cases))
+ }
+ if tswitch != nil && tswitch.Left != nil {
+ nn := newname(tswitch.Left.Sym)
+ declare(nn, dclcontext)
+ n.Rlist.Set1(nn)
+ // keep track of the instances for reporting unused
+ nn.Name.Defn = tswitch
+ }
+
+ // Trim trailing empty statements. We omit them from
+ // the Node AST anyway, and it's easier to identify
+ // out-of-place fallthrough statements without them.
+ body := clause.Body
+ for len(body) > 0 {
+ if _, ok := body[len(body)-1].(*syntax.EmptyStmt); !ok {
+ break
+ }
+ body = body[:len(body)-1]
+ }
+
+ n.Nbody.Set(p.stmtsFall(body, true))
+ if l := n.Nbody.Len(); l > 0 && n.Nbody.Index(l-1).Op == OFALL {
+ if tswitch != nil {
+ yyerror("cannot fallthrough in type switch")
+ }
+ if i+1 == len(clauses) {
+ yyerror("cannot fallthrough final case in switch")
+ }
+ }
+
+ nodes = append(nodes, n)
+ }
+ if len(clauses) > 0 {
+ p.closeScope(rbrace)
+ }
+ return nodes
+}
+
+func (p *noder) selectStmt(stmt *syntax.SelectStmt) *Node {
+ n := p.nod(stmt, OSELECT, nil, nil)
+ n.List.Set(p.commClauses(stmt.Body, stmt.Rbrace))
+ return n
+}
+
+func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*Node {
+ nodes := make([]*Node, 0, len(clauses))
+ for i, clause := range clauses {
+ p.setlineno(clause)
+ if i > 0 {
+ p.closeScope(clause.Pos())
+ }
+ p.openScope(clause.Pos())
+
+ n := p.nod(clause, OCASE, nil, nil)
+ if clause.Comm != nil {
+ n.List.Set1(p.stmt(clause.Comm))
+ }
+ n.Nbody.Set(p.stmts(clause.Body))
+ nodes = append(nodes, n)
+ }
+ if len(clauses) > 0 {
+ p.closeScope(rbrace)
+ }
+ return nodes
+}
+
+func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *Node {
+ lhs := p.nodSym(label, OLABEL, nil, p.name(label.Label))
+
+ var ls *Node
+ if label.Stmt != nil { // TODO(mdempsky): Should always be present.
+ ls = p.stmtFall(label.Stmt, fallOK)
+ }
+
+ lhs.Name.Defn = ls
+ l := []*Node{lhs}
+ if ls != nil {
+ if ls.Op == OBLOCK && ls.Ninit.Len() == 0 {
+ l = append(l, ls.List.Slice()...)
+ } else {
+ l = append(l, ls)
+ }
+ }
+ return liststmt(l)
+}
+
+var unOps = [...]Op{
+ syntax.Recv: ORECV,
+ syntax.Mul: ODEREF,
+ syntax.And: OADDR,
+
+ syntax.Not: ONOT,
+ syntax.Xor: OBITNOT,
+ syntax.Add: OPLUS,
+ syntax.Sub: ONEG,
+}
+
+func (p *noder) unOp(op syntax.Operator) Op {
+ if uint64(op) >= uint64(len(unOps)) || unOps[op] == 0 {
+ panic("invalid Operator")
+ }
+ return unOps[op]
+}
+
+var binOps = [...]Op{
+ syntax.OrOr: OOROR,
+ syntax.AndAnd: OANDAND,
+
+ syntax.Eql: OEQ,
+ syntax.Neq: ONE,
+ syntax.Lss: OLT,
+ syntax.Leq: OLE,
+ syntax.Gtr: OGT,
+ syntax.Geq: OGE,
+
+ syntax.Add: OADD,
+ syntax.Sub: OSUB,
+ syntax.Or: OOR,
+ syntax.Xor: OXOR,
+
+ syntax.Mul: OMUL,
+ syntax.Div: ODIV,
+ syntax.Rem: OMOD,
+ syntax.And: OAND,
+ syntax.AndNot: OANDNOT,
+ syntax.Shl: OLSH,
+ syntax.Shr: ORSH,
+}
+
+func (p *noder) binOp(op syntax.Operator) Op {
+ if uint64(op) >= uint64(len(binOps)) || binOps[op] == 0 {
+ panic("invalid Operator")
+ }
+ return binOps[op]
+}
+
+// checkLangCompat reports an error if the representation of a numeric
+// literal is not compatible with the current language version.
+func checkLangCompat(lit *syntax.BasicLit) {
+ s := lit.Value
+ if len(s) <= 2 || langSupported(1, 13, localpkg) {
+ return
+ }
+ // len(s) > 2
+ if strings.Contains(s, "_") {
+ yyerrorv("go1.13", "underscores in numeric literals")
+ return
+ }
+ if s[0] != '0' {
+ return
+ }
+ base := s[1]
+ if base == 'b' || base == 'B' {
+ yyerrorv("go1.13", "binary literals")
+ return
+ }
+ if base == 'o' || base == 'O' {
+ yyerrorv("go1.13", "0o/0O-style octal literals")
+ return
+ }
+ if lit.Kind != syntax.IntLit && (base == 'x' || base == 'X') {
+ yyerrorv("go1.13", "hexadecimal floating-point literals")
+ }
+}
+
+func (p *noder) basicLit(lit *syntax.BasicLit) Val {
+ // We don't use the errors of the conversion routines to determine
+ // if a literal string is valid because the conversion routines may
+ // accept a wider syntax than the language permits. Rely on lit.Bad
+ // instead.
+ switch s := lit.Value; lit.Kind {
+ case syntax.IntLit:
+ checkLangCompat(lit)
+ x := new(Mpint)
+ if !lit.Bad {
+ x.SetString(s)
+ }
+ return Val{U: x}
+
+ case syntax.FloatLit:
+ checkLangCompat(lit)
+ x := newMpflt()
+ if !lit.Bad {
+ x.SetString(s)
+ }
+ return Val{U: x}
+
+ case syntax.ImagLit:
+ checkLangCompat(lit)
+ x := newMpcmplx()
+ if !lit.Bad {
+ x.Imag.SetString(strings.TrimSuffix(s, "i"))
+ }
+ return Val{U: x}
+
+ case syntax.RuneLit:
+ x := new(Mpint)
+ x.Rune = true
+ if !lit.Bad {
+ u, _ := strconv.Unquote(s)
+ var r rune
+ if len(u) == 1 {
+ r = rune(u[0])
+ } else {
+ r, _ = utf8.DecodeRuneInString(u)
+ }
+ x.SetInt64(int64(r))
+ }
+ return Val{U: x}
+
+ case syntax.StringLit:
+ var x string
+ if !lit.Bad {
+ if len(s) > 0 && s[0] == '`' {
+ // strip carriage returns from raw string
+ s = strings.Replace(s, "\r", "", -1)
+ }
+ x, _ = strconv.Unquote(s)
+ }
+ return Val{U: x}
+
+ default:
+ panic("unhandled BasicLit kind")
+ }
+}
+
+func (p *noder) name(name *syntax.Name) *types.Sym {
+ return lookup(name.Value)
+}
+
+func (p *noder) mkname(name *syntax.Name) *Node {
+ // TODO(mdempsky): Set line number?
+ return mkname(p.name(name))
+}
+
+func (p *noder) wrapname(n syntax.Node, x *Node) *Node {
+ // These nodes do not carry line numbers.
+ // Introduce a wrapper node to give them the correct line.
+ switch x.Op {
+ case OTYPE, OLITERAL:
+ if x.Sym == nil {
+ break
+ }
+ fallthrough
+ case ONAME, ONONAME, OPACK:
+ x = p.nod(n, OPAREN, x, nil)
+ x.SetImplicit(true)
+ }
+ return x
+}
+
+func (p *noder) nod(orig syntax.Node, op Op, left, right *Node) *Node {
+ return nodl(p.pos(orig), op, left, right)
+}
+
+func (p *noder) nodSym(orig syntax.Node, op Op, left *Node, sym *types.Sym) *Node {
+ n := nodSym(op, left, sym)
+ n.Pos = p.pos(orig)
+ return n
+}
+
+func (p *noder) pos(n syntax.Node) src.XPos {
+ // TODO(gri): orig.Pos() should always be known - fix package syntax
+ xpos := lineno
+ if pos := n.Pos(); pos.IsKnown() {
+ xpos = p.makeXPos(pos)
+ }
+ return xpos
+}
+
+func (p *noder) setlineno(n syntax.Node) {
+ if n != nil {
+ lineno = p.pos(n)
+ }
+}
+
+// error is called concurrently if files are parsed concurrently.
+func (p *noder) error(err error) {
+ p.err <- err.(syntax.Error)
+}
+
+// pragmas that are allowed in the std lib, but don't have
+// a syntax.Pragma value (see lex.go) associated with them.
+var allowedStdPragmas = map[string]bool{
+ "go:cgo_export_static": true,
+ "go:cgo_export_dynamic": true,
+ "go:cgo_import_static": true,
+ "go:cgo_import_dynamic": true,
+ "go:cgo_ldflag": true,
+ "go:cgo_dynamic_linker": true,
+ "go:embed": true,
+ "go:generate": true,
+}
+
+// *Pragma is the value stored in a syntax.Pragma during parsing.
+type Pragma struct {
+ Flag PragmaFlag // collected bits
+ Pos []PragmaPos // position of each individual flag
+ Embeds []PragmaEmbed
+}
+
+type PragmaPos struct {
+ Flag PragmaFlag
+ Pos syntax.Pos
+}
+
+type PragmaEmbed struct {
+ Pos syntax.Pos
+ Patterns []string
+}
+
+func (p *noder) checkUnused(pragma *Pragma) {
+ for _, pos := range pragma.Pos {
+ if pos.Flag&pragma.Flag != 0 {
+ p.yyerrorpos(pos.Pos, "misplaced compiler directive")
+ }
+ }
+ if len(pragma.Embeds) > 0 {
+ for _, e := range pragma.Embeds {
+ p.yyerrorpos(e.Pos, "misplaced go:embed directive")
+ }
+ }
+}
+
+func (p *noder) checkUnusedDuringParse(pragma *Pragma) {
+ for _, pos := range pragma.Pos {
+ if pos.Flag&pragma.Flag != 0 {
+ p.error(syntax.Error{Pos: pos.Pos, Msg: "misplaced compiler directive"})
+ }
+ }
+ if len(pragma.Embeds) > 0 {
+ for _, e := range pragma.Embeds {
+ p.error(syntax.Error{Pos: e.Pos, Msg: "misplaced go:embed directive"})
+ }
+ }
+}
+
+// pragma is called concurrently if files are parsed concurrently.
+func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.Pragma) syntax.Pragma {
+ pragma, _ := old.(*Pragma)
+ if pragma == nil {
+ pragma = new(Pragma)
+ }
+
+ if text == "" {
+ // unused pragma; only called with old != nil.
+ p.checkUnusedDuringParse(pragma)
+ return nil
+ }
+
+ if strings.HasPrefix(text, "line ") {
+ // line directives are handled by syntax package
+ panic("unreachable")
+ }
+
+ if !blankLine {
+ // directive must be on line by itself
+ p.error(syntax.Error{Pos: pos, Msg: "misplaced compiler directive"})
+ return pragma
+ }
+
+ switch {
+ case strings.HasPrefix(text, "go:linkname "):
+ f := strings.Fields(text)
+ if !(2 <= len(f) && len(f) <= 3) {
+ p.error(syntax.Error{Pos: pos, Msg: "usage: //go:linkname localname [linkname]"})
+ break
+ }
+ // The second argument is optional. If omitted, we use
+ // the default object symbol name for this and
+ // linkname only serves to mark this symbol as
+ // something that may be referenced via the object
+ // symbol name from another package.
+ var target string
+ if len(f) == 3 {
+ target = f[2]
+ }
+ p.linknames = append(p.linknames, linkname{pos, f[1], target})
+
+ case text == "go:embed", strings.HasPrefix(text, "go:embed "):
+ args, err := parseGoEmbed(text[len("go:embed"):])
+ if err != nil {
+ p.error(syntax.Error{Pos: pos, Msg: err.Error()})
+ }
+ if len(args) == 0 {
+ p.error(syntax.Error{Pos: pos, Msg: "usage: //go:embed pattern..."})
+ break
+ }
+ pragma.Embeds = append(pragma.Embeds, PragmaEmbed{pos, args})
+
+ case strings.HasPrefix(text, "go:cgo_import_dynamic "):
+ // This is permitted for general use because Solaris
+ // code relies on it in golang.org/x/sys/unix and others.
+ fields := pragmaFields(text)
+ if len(fields) >= 4 {
+ lib := strings.Trim(fields[3], `"`)
+ if lib != "" && !safeArg(lib) && !isCgoGeneratedFile(pos) {
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("invalid library name %q in cgo_import_dynamic directive", lib)})
+ }
+ p.pragcgo(pos, text)
+ pragma.Flag |= pragmaFlag("go:cgo_import_dynamic")
+ break
+ }
+ fallthrough
+ case strings.HasPrefix(text, "go:cgo_"):
+ // For security, we disallow //go:cgo_* directives other
+ // than cgo_import_dynamic outside cgo-generated files.
+ // Exception: they are allowed in the standard library, for runtime and syscall.
+ if !isCgoGeneratedFile(pos) && !compiling_std {
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in cgo-generated code", text)})
+ }
+ p.pragcgo(pos, text)
+ fallthrough // because of //go:cgo_unsafe_args
+ default:
+ verb := text
+ if i := strings.Index(text, " "); i >= 0 {
+ verb = verb[:i]
+ }
+ flag := pragmaFlag(verb)
+ const runtimePragmas = Systemstack | Nowritebarrier | Nowritebarrierrec | Yeswritebarrierrec
+ if !compiling_runtime && flag&runtimePragmas != 0 {
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)})
+ }
+ if flag == 0 && !allowedStdPragmas[verb] && compiling_std {
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)})
+ }
+ pragma.Flag |= flag
+ pragma.Pos = append(pragma.Pos, PragmaPos{flag, pos})
+ }
+
+ return pragma
+}
+
+// isCgoGeneratedFile reports whether pos is in a file
+// generated by cgo, which is to say a file with name
+// beginning with "_cgo_". Such files are allowed to
+// contain cgo directives, and for security reasons
+// (primarily misuse of linker flags), other files are not.
+// See golang.org/issue/23672.
+func isCgoGeneratedFile(pos syntax.Pos) bool {
+ return strings.HasPrefix(filepath.Base(filepath.Clean(fileh(pos.Base().Filename()))), "_cgo_")
+}
+
+// safeArg reports whether arg is a "safe" command-line argument,
+// meaning that when it appears in a command-line, it probably
+// doesn't have some special meaning other than its own name.
+// This is copied from SafeArg in cmd/go/internal/load/pkg.go.
+func safeArg(name string) bool {
+ if name == "" {
+ return false
+ }
+ c := name[0]
+ return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf
+}
+
+func mkname(sym *types.Sym) *Node {
+ n := oldname(sym)
+ if n.Name != nil && n.Name.Pack != nil {
+ n.Name.Pack.Name.SetUsed(true)
+ }
+ return n
+}
+
+// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
+// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
+// go/build/read.go also processes these strings and contains similar logic.
+func parseGoEmbed(args string) ([]string, error) {
+ var list []string
+ for args = strings.TrimSpace(args); args != ""; args = strings.TrimSpace(args) {
+ var path string
+ Switch:
+ switch args[0] {
+ default:
+ i := len(args)
+ for j, c := range args {
+ if unicode.IsSpace(c) {
+ i = j
+ break
+ }
+ }
+ path = args[:i]
+ args = args[i:]
+
+ case '`':
+ i := strings.Index(args[1:], "`")
+ if i < 0 {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ path = args[1 : 1+i]
+ args = args[1+i+1:]
+
+ case '"':
+ i := 1
+ for ; i < len(args); i++ {
+ if args[i] == '\\' {
+ i++
+ continue
+ }
+ if args[i] == '"' {
+ q, err := strconv.Unquote(args[:i+1])
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
+ }
+ path = q
+ args = args[i+1:]
+ break Switch
+ }
+ }
+ if i >= len(args) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+
+ if args != "" {
+ r, _ := utf8.DecodeRuneInString(args)
+ if !unicode.IsSpace(r) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+ list = append(list, path)
+ }
+ return list, nil
+}
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
new file mode 100644
index 0000000..da1869e
--- /dev/null
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -0,0 +1,639 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/bio"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "crypto/sha256"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "sort"
+ "strconv"
+)
+
+// architecture-independent object file output
+const ArhdrSize = 60
+
+func formathdr(arhdr []byte, name string, size int64) {
+ copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
+}
+
+// These modes say which kind of object file to generate.
+// The default use of the toolchain is to set both bits,
+// generating a combined compiler+linker object, one that
+// serves to describe the package to both the compiler and the linker.
+// In fact the compiler and linker read nearly disjoint sections of
+// that file, though, so in a distributed build setting it can be more
+// efficient to split the output into two files, supplying the compiler
+// object only to future compilations and the linker object only to
+// future links.
+//
+// By default a combined object is written, but if -linkobj is specified
+// on the command line then the default -o output is a compiler object
+// and the -linkobj output is a linker object.
+const (
+ modeCompilerObj = 1 << iota
+ modeLinkerObj
+)
+
+func dumpobj() {
+ if linkobj == "" {
+ dumpobj1(outfile, modeCompilerObj|modeLinkerObj)
+ return
+ }
+ dumpobj1(outfile, modeCompilerObj)
+ dumpobj1(linkobj, modeLinkerObj)
+}
+
+func dumpobj1(outfile string, mode int) {
+ bout, err := bio.Create(outfile)
+ if err != nil {
+ flusherrors()
+ fmt.Printf("can't create %s: %v\n", outfile, err)
+ errorexit()
+ }
+ defer bout.Close()
+ bout.WriteString("!<arch>\n")
+
+ if mode&modeCompilerObj != 0 {
+ start := startArchiveEntry(bout)
+ dumpCompilerObj(bout)
+ finishArchiveEntry(bout, start, "__.PKGDEF")
+ }
+ if mode&modeLinkerObj != 0 {
+ start := startArchiveEntry(bout)
+ dumpLinkerObj(bout)
+ finishArchiveEntry(bout, start, "_go_.o")
+ }
+}
+
+func printObjHeader(bout *bio.Writer) {
+ fmt.Fprintf(bout, "go object %s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
+ if buildid != "" {
+ fmt.Fprintf(bout, "build id %q\n", buildid)
+ }
+ if localpkg.Name == "main" {
+ fmt.Fprintf(bout, "main\n")
+ }
+ fmt.Fprintf(bout, "\n") // header ends with blank line
+}
+
+func startArchiveEntry(bout *bio.Writer) int64 {
+ var arhdr [ArhdrSize]byte
+ bout.Write(arhdr[:])
+ return bout.Offset()
+}
+
+func finishArchiveEntry(bout *bio.Writer, start int64, name string) {
+ bout.Flush()
+ size := bout.Offset() - start
+ if size&1 != 0 {
+ bout.WriteByte(0)
+ }
+ bout.MustSeek(start-ArhdrSize, 0)
+
+ var arhdr [ArhdrSize]byte
+ formathdr(arhdr[:], name, size)
+ bout.Write(arhdr[:])
+ bout.Flush()
+ bout.MustSeek(start+size+(size&1), 0)
+}
+
+func dumpCompilerObj(bout *bio.Writer) {
+ printObjHeader(bout)
+ dumpexport(bout)
+}
+
+func dumpdata() {
+ externs := len(externdcl)
+ xtops := len(xtop)
+
+ dumpglobls()
+ addptabs()
+ exportlistLen := len(exportlist)
+ addsignats(externdcl)
+ dumpsignats()
+ dumptabs()
+ ptabsLen := len(ptabs)
+ itabsLen := len(itabs)
+ dumpimportstrings()
+ dumpbasictypes()
+ dumpembeds()
+
+ // Calls to dumpsignats can generate functions,
+ // like method wrappers and hash and equality routines.
+ // Compile any generated functions, process any new resulting types, repeat.
+ // This can't loop forever, because there is no way to generate an infinite
+ // number of types in a finite amount of code.
+ // In the typical case, we loop 0 or 1 times.
+ // It was not until issue 24761 that we found any code that required a loop at all.
+ for {
+ for i := xtops; i < len(xtop); i++ {
+ n := xtop[i]
+ if n.Op == ODCLFUNC {
+ funccompile(n)
+ }
+ }
+ xtops = len(xtop)
+ compileFunctions()
+ dumpsignats()
+ if xtops == len(xtop) {
+ break
+ }
+ }
+
+ // Dump extra globals.
+ tmp := externdcl
+
+ if externdcl != nil {
+ externdcl = externdcl[externs:]
+ }
+ dumpglobls()
+ externdcl = tmp
+
+ if zerosize > 0 {
+ zero := mappkg.Lookup("zero")
+ ggloblsym(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA)
+ zero.Linksym().Set(obj.AttrStatic, true)
+ }
+
+ addGCLocals()
+
+ if exportlistLen != len(exportlist) {
+ Fatalf("exportlist changed after compile functions loop")
+ }
+ if ptabsLen != len(ptabs) {
+ Fatalf("ptabs changed after compile functions loop")
+ }
+ if itabsLen != len(itabs) {
+ Fatalf("itabs changed after compile functions loop")
+ }
+}
+
+func dumpLinkerObj(bout *bio.Writer) {
+ printObjHeader(bout)
+
+ if len(pragcgobuf) != 0 {
+ // write empty export section; must be before cgo section
+ fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
+ fmt.Fprintf(bout, "\n$$ // cgo\n")
+ if err := json.NewEncoder(bout).Encode(pragcgobuf); err != nil {
+ Fatalf("serializing pragcgobuf: %v", err)
+ }
+ fmt.Fprintf(bout, "\n$$\n\n")
+ }
+
+ fmt.Fprintf(bout, "\n!\n")
+
+ obj.WriteObjFile(Ctxt, bout)
+}
+
+func addptabs() {
+ if !Ctxt.Flag_dynlink || localpkg.Name != "main" {
+ return
+ }
+ for _, exportn := range exportlist {
+ s := exportn.Sym
+ n := asNode(s.Def)
+ if n == nil {
+ continue
+ }
+ if n.Op != ONAME {
+ continue
+ }
+ if !types.IsExported(s.Name) {
+ continue
+ }
+ if s.Pkg.Name != "main" {
+ continue
+ }
+ if n.Type.Etype == TFUNC && n.Class() == PFUNC {
+ // function
+ ptabs = append(ptabs, ptabEntry{s: s, t: asNode(s.Def).Type})
+ } else {
+ // variable
+ ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(asNode(s.Def).Type)})
+ }
+ }
+}
+
+func dumpGlobal(n *Node) {
+ if n.Type == nil {
+ Fatalf("external %v nil type\n", n)
+ }
+ if n.Class() == PFUNC {
+ return
+ }
+ if n.Sym.Pkg != localpkg {
+ return
+ }
+ dowidth(n.Type)
+ ggloblnod(n)
+}
+
+func dumpGlobalConst(n *Node) {
+ // only export typed constants
+ t := n.Type
+ if t == nil {
+ return
+ }
+ if n.Sym.Pkg != localpkg {
+ return
+ }
+ // only export integer constants for now
+ switch t.Etype {
+ case TINT8:
+ case TINT16:
+ case TINT32:
+ case TINT64:
+ case TINT:
+ case TUINT8:
+ case TUINT16:
+ case TUINT32:
+ case TUINT64:
+ case TUINT:
+ case TUINTPTR:
+ // ok
+ case TIDEAL:
+ if !Isconst(n, CTINT) {
+ return
+ }
+ x := n.Val().U.(*Mpint)
+ if x.Cmp(minintval[TINT]) < 0 || x.Cmp(maxintval[TINT]) > 0 {
+ return
+ }
+ // Ideal integers we export as int (if they fit).
+ t = types.Types[TINT]
+ default:
+ return
+ }
+ Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64Val())
+}
+
+func dumpglobls() {
+ // add globals
+ for _, n := range externdcl {
+ switch n.Op {
+ case ONAME:
+ dumpGlobal(n)
+ case OLITERAL:
+ dumpGlobalConst(n)
+ }
+ }
+
+ sort.Slice(funcsyms, func(i, j int) bool {
+ return funcsyms[i].LinksymName() < funcsyms[j].LinksymName()
+ })
+ for _, s := range funcsyms {
+ sf := s.Pkg.Lookup(funcsymname(s)).Linksym()
+ dsymptr(sf, 0, s.Linksym(), 0)
+ ggloblsym(sf, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ }
+
+ // Do not reprocess funcsyms on next dumpglobls call.
+ funcsyms = nil
+}
+
+// addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
+//
+// This is done during the sequential phase after compilation, since
+// global symbols can't be declared during parallel compilation.
+func addGCLocals() {
+ for _, s := range Ctxt.Text {
+ fn := s.Func()
+ if fn == nil {
+ continue
+ }
+ for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} {
+ if gcsym != nil && !gcsym.OnList() {
+ ggloblsym(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
+ }
+ }
+ if x := fn.StackObjects; x != nil {
+ attr := int16(obj.RODATA)
+ ggloblsym(x, int32(len(x.P)), attr)
+ x.Set(obj.AttrStatic, true)
+ }
+ if x := fn.OpenCodedDeferInfo; x != nil {
+ ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
+ }
+ }
+}
+
+func duintxx(s *obj.LSym, off int, v uint64, wid int) int {
+ if off&(wid-1) != 0 {
+ Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
+ }
+ s.WriteInt(Ctxt, int64(off), wid, int64(v))
+ return off + wid
+}
+
+func duint8(s *obj.LSym, off int, v uint8) int {
+ return duintxx(s, off, uint64(v), 1)
+}
+
+func duint16(s *obj.LSym, off int, v uint16) int {
+ return duintxx(s, off, uint64(v), 2)
+}
+
+func duint32(s *obj.LSym, off int, v uint32) int {
+ return duintxx(s, off, uint64(v), 4)
+}
+
+func duintptr(s *obj.LSym, off int, v uint64) int {
+ return duintxx(s, off, v, Widthptr)
+}
+
+func dbvec(s *obj.LSym, off int, bv bvec) int {
+ // Runtime reads the bitmaps as byte arrays. Oblige.
+ for j := 0; int32(j) < bv.n; j += 8 {
+ word := bv.b[j/32]
+ off = duint8(s, off, uint8(word>>(uint(j)%32)))
+ }
+ return off
+}
+
+const (
+ stringSymPrefix = "go.string."
+ stringSymPattern = ".gostring.%d.%x"
+)
+
+// stringsym returns a symbol containing the string s.
+// The symbol contains the string data, not a string header.
+func stringsym(pos src.XPos, s string) (data *obj.LSym) {
+ var symname string
+ if len(s) > 100 {
+ // Huge strings are hashed to avoid long names in object files.
+ // Indulge in some paranoia by writing the length of s, too,
+ // as protection against length extension attacks.
+ // Same pattern is known to fileStringSym below.
+ h := sha256.New()
+ io.WriteString(h, s)
+ symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
+ } else {
+ // Small strings get named directly by their contents.
+ symname = strconv.Quote(s)
+ }
+
+ symdata := Ctxt.Lookup(stringSymPrefix + symname)
+ if !symdata.OnList() {
+ off := dstringdata(symdata, 0, s, pos, "string")
+ ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ symdata.Set(obj.AttrContentAddressable, true)
+ }
+
+ return symdata
+}
+
+// fileStringSym returns a symbol for the contents and the size of file.
+// If readonly is true, the symbol shares storage with any literal string
+// or other file with the same content and is placed in a read-only section.
+// If readonly is false, the symbol is a read-write copy separate from any other,
+// for use as the backing store of a []byte.
+// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
+// The returned symbol contains the data itself, not a string header.
+func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer f.Close()
+ info, err := f.Stat()
+ if err != nil {
+ return nil, 0, err
+ }
+ if !info.Mode().IsRegular() {
+ return nil, 0, fmt.Errorf("not a regular file")
+ }
+ size := info.Size()
+ if size <= 1*1024 {
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, 0, err
+ }
+ if int64(len(data)) != size {
+ return nil, 0, fmt.Errorf("file changed between reads")
+ }
+ var sym *obj.LSym
+ if readonly {
+ sym = stringsym(pos, string(data))
+ } else {
+ sym = slicedata(pos, string(data)).Sym.Linksym()
+ }
+ if len(hash) > 0 {
+ sum := sha256.Sum256(data)
+ copy(hash, sum[:])
+ }
+ return sym, size, nil
+ }
+ if size > 2e9 {
+ // ggloblsym takes an int32,
+ // and probably the rest of the toolchain
+ // can't handle such big symbols either.
+ // See golang.org/issue/9862.
+ return nil, 0, fmt.Errorf("file too large")
+ }
+
+ // File is too big to read and keep in memory.
+ // Compute hash if needed for read-only content hashing or if the caller wants it.
+ var sum []byte
+ if readonly || len(hash) > 0 {
+ h := sha256.New()
+ n, err := io.Copy(h, f)
+ if err != nil {
+ return nil, 0, err
+ }
+ if n != size {
+ return nil, 0, fmt.Errorf("file changed between reads")
+ }
+ sum = h.Sum(nil)
+ copy(hash, sum)
+ }
+
+ var symdata *obj.LSym
+ if readonly {
+ symname := fmt.Sprintf(stringSymPattern, size, sum)
+ symdata = Ctxt.Lookup(stringSymPrefix + symname)
+ if !symdata.OnList() {
+ info := symdata.NewFileInfo()
+ info.Name = file
+ info.Size = size
+ ggloblsym(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ // Note: AttrContentAddressable cannot be set here,
+ // because the content-addressable-handling code
+ // does not know about file symbols.
+ }
+ } else {
+ // Emit a zero-length data symbol
+ // and then fix up length and content to use file.
+ symdata = slicedata(pos, "").Sym.Linksym()
+ symdata.Size = size
+ symdata.Type = objabi.SNOPTRDATA
+ info := symdata.NewFileInfo()
+ info.Name = file
+ info.Size = size
+ }
+
+ return symdata, size, nil
+}
+
+var slicedataGen int
+
+func slicedata(pos src.XPos, s string) *Node {
+ slicedataGen++
+ symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
+ sym := localpkg.Lookup(symname)
+ symnode := newname(sym)
+ sym.Def = asTypesNode(symnode)
+
+ lsym := sym.Linksym()
+ off := dstringdata(lsym, 0, s, pos, "slice")
+ ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL)
+
+ return symnode
+}
+
+func slicebytes(nam *Node, s string) {
+ if nam.Op != ONAME {
+ Fatalf("slicebytes %v", nam)
+ }
+ slicesym(nam, slicedata(nam.Pos, s), int64(len(s)))
+}
+
+func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
+ // Objects that are too large will cause the data section to overflow right away,
+ // causing a cryptic error message by the linker. Check for oversize objects here
+ // and provide a useful error message instead.
+ if int64(len(t)) > 2e9 {
+ yyerrorl(pos, "%v with length %v is too big", what, len(t))
+ return 0
+ }
+
+ s.WriteString(Ctxt, int64(off), len(t), t)
+ return off + len(t)
+}
+
+func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
+ off = int(Rnd(int64(off), int64(Widthptr)))
+ s.WriteAddr(Ctxt, int64(off), Widthptr, x, int64(xoff))
+ off += Widthptr
+ return off
+}
+
+func dsymptrOff(s *obj.LSym, off int, x *obj.LSym) int {
+ s.WriteOff(Ctxt, int64(off), x, 0)
+ off += 4
+ return off
+}
+
+func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
+ s.WriteWeakOff(Ctxt, int64(off), x, 0)
+ off += 4
+ return off
+}
+
+// slicesym writes a static slice symbol {&arr, lencap, lencap} to n.
+// arr must be an ONAME. slicesym does not modify n.
+func slicesym(n, arr *Node, lencap int64) {
+ s := n.Sym.Linksym()
+ base := n.Xoffset
+ if arr.Op != ONAME {
+ Fatalf("slicesym non-name arr %v", arr)
+ }
+ s.WriteAddr(Ctxt, base, Widthptr, arr.Sym.Linksym(), arr.Xoffset)
+ s.WriteInt(Ctxt, base+sliceLenOffset, Widthptr, lencap)
+ s.WriteInt(Ctxt, base+sliceCapOffset, Widthptr, lencap)
+}
+
+// addrsym writes the static address of a to n. a must be an ONAME.
+// Neither n nor a is modified.
+func addrsym(n, a *Node) {
+ if n.Op != ONAME {
+ Fatalf("addrsym n op %v", n.Op)
+ }
+ if n.Sym == nil {
+ Fatalf("addrsym nil n sym")
+ }
+ if a.Op != ONAME {
+ Fatalf("addrsym a op %v", a.Op)
+ }
+ s := n.Sym.Linksym()
+ s.WriteAddr(Ctxt, n.Xoffset, Widthptr, a.Sym.Linksym(), a.Xoffset)
+}
+
+// pfuncsym writes the static address of f to n. f must be a global function.
+// Neither n nor f is modified.
+func pfuncsym(n, f *Node) {
+ if n.Op != ONAME {
+ Fatalf("pfuncsym n op %v", n.Op)
+ }
+ if n.Sym == nil {
+ Fatalf("pfuncsym nil n sym")
+ }
+ if f.Class() != PFUNC {
+ Fatalf("pfuncsym class not PFUNC %d", f.Class())
+ }
+ s := n.Sym.Linksym()
+ s.WriteAddr(Ctxt, n.Xoffset, Widthptr, funcsym(f.Sym).Linksym(), f.Xoffset)
+}
+
+// litsym writes the static literal c to n.
+// Neither n nor c is modified.
+func litsym(n, c *Node, wid int) {
+ if n.Op != ONAME {
+ Fatalf("litsym n op %v", n.Op)
+ }
+ if c.Op != OLITERAL {
+ Fatalf("litsym c op %v", c.Op)
+ }
+ if n.Sym == nil {
+ Fatalf("litsym nil n sym")
+ }
+ s := n.Sym.Linksym()
+ switch u := c.Val().U.(type) {
+ case bool:
+ i := int64(obj.Bool2int(u))
+ s.WriteInt(Ctxt, n.Xoffset, wid, i)
+
+ case *Mpint:
+ s.WriteInt(Ctxt, n.Xoffset, wid, u.Int64())
+
+ case *Mpflt:
+ f := u.Float64()
+ switch n.Type.Etype {
+ case TFLOAT32:
+ s.WriteFloat32(Ctxt, n.Xoffset, float32(f))
+ case TFLOAT64:
+ s.WriteFloat64(Ctxt, n.Xoffset, f)
+ }
+
+ case *Mpcplx:
+ r := u.Real.Float64()
+ i := u.Imag.Float64()
+ switch n.Type.Etype {
+ case TCOMPLEX64:
+ s.WriteFloat32(Ctxt, n.Xoffset, float32(r))
+ s.WriteFloat32(Ctxt, n.Xoffset+4, float32(i))
+ case TCOMPLEX128:
+ s.WriteFloat64(Ctxt, n.Xoffset, r)
+ s.WriteFloat64(Ctxt, n.Xoffset+8, i)
+ }
+
+ case string:
+ symdata := stringsym(n.Pos, u)
+ s.WriteAddr(Ctxt, n.Xoffset, Widthptr, symdata, 0)
+ s.WriteInt(Ctxt, n.Xoffset+int64(Widthptr), Widthptr, int64(len(u)))
+
+ default:
+ Fatalf("litsym unhandled OLITERAL %v", c)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/op_string.go b/src/cmd/compile/internal/gc/op_string.go
new file mode 100644
index 0000000..41d5883
--- /dev/null
+++ b/src/cmd/compile/internal/gc/op_string.go
@@ -0,0 +1,175 @@
+// Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT.
+
+package gc
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[OXXX-0]
+ _ = x[ONAME-1]
+ _ = x[ONONAME-2]
+ _ = x[OTYPE-3]
+ _ = x[OPACK-4]
+ _ = x[OLITERAL-5]
+ _ = x[OADD-6]
+ _ = x[OSUB-7]
+ _ = x[OOR-8]
+ _ = x[OXOR-9]
+ _ = x[OADDSTR-10]
+ _ = x[OADDR-11]
+ _ = x[OANDAND-12]
+ _ = x[OAPPEND-13]
+ _ = x[OBYTES2STR-14]
+ _ = x[OBYTES2STRTMP-15]
+ _ = x[ORUNES2STR-16]
+ _ = x[OSTR2BYTES-17]
+ _ = x[OSTR2BYTESTMP-18]
+ _ = x[OSTR2RUNES-19]
+ _ = x[OAS-20]
+ _ = x[OAS2-21]
+ _ = x[OAS2DOTTYPE-22]
+ _ = x[OAS2FUNC-23]
+ _ = x[OAS2MAPR-24]
+ _ = x[OAS2RECV-25]
+ _ = x[OASOP-26]
+ _ = x[OCALL-27]
+ _ = x[OCALLFUNC-28]
+ _ = x[OCALLMETH-29]
+ _ = x[OCALLINTER-30]
+ _ = x[OCALLPART-31]
+ _ = x[OCAP-32]
+ _ = x[OCLOSE-33]
+ _ = x[OCLOSURE-34]
+ _ = x[OCOMPLIT-35]
+ _ = x[OMAPLIT-36]
+ _ = x[OSTRUCTLIT-37]
+ _ = x[OARRAYLIT-38]
+ _ = x[OSLICELIT-39]
+ _ = x[OPTRLIT-40]
+ _ = x[OCONV-41]
+ _ = x[OCONVIFACE-42]
+ _ = x[OCONVNOP-43]
+ _ = x[OCOPY-44]
+ _ = x[ODCL-45]
+ _ = x[ODCLFUNC-46]
+ _ = x[ODCLFIELD-47]
+ _ = x[ODCLCONST-48]
+ _ = x[ODCLTYPE-49]
+ _ = x[ODELETE-50]
+ _ = x[ODOT-51]
+ _ = x[ODOTPTR-52]
+ _ = x[ODOTMETH-53]
+ _ = x[ODOTINTER-54]
+ _ = x[OXDOT-55]
+ _ = x[ODOTTYPE-56]
+ _ = x[ODOTTYPE2-57]
+ _ = x[OEQ-58]
+ _ = x[ONE-59]
+ _ = x[OLT-60]
+ _ = x[OLE-61]
+ _ = x[OGE-62]
+ _ = x[OGT-63]
+ _ = x[ODEREF-64]
+ _ = x[OINDEX-65]
+ _ = x[OINDEXMAP-66]
+ _ = x[OKEY-67]
+ _ = x[OSTRUCTKEY-68]
+ _ = x[OLEN-69]
+ _ = x[OMAKE-70]
+ _ = x[OMAKECHAN-71]
+ _ = x[OMAKEMAP-72]
+ _ = x[OMAKESLICE-73]
+ _ = x[OMAKESLICECOPY-74]
+ _ = x[OMUL-75]
+ _ = x[ODIV-76]
+ _ = x[OMOD-77]
+ _ = x[OLSH-78]
+ _ = x[ORSH-79]
+ _ = x[OAND-80]
+ _ = x[OANDNOT-81]
+ _ = x[ONEW-82]
+ _ = x[ONEWOBJ-83]
+ _ = x[ONOT-84]
+ _ = x[OBITNOT-85]
+ _ = x[OPLUS-86]
+ _ = x[ONEG-87]
+ _ = x[OOROR-88]
+ _ = x[OPANIC-89]
+ _ = x[OPRINT-90]
+ _ = x[OPRINTN-91]
+ _ = x[OPAREN-92]
+ _ = x[OSEND-93]
+ _ = x[OSLICE-94]
+ _ = x[OSLICEARR-95]
+ _ = x[OSLICESTR-96]
+ _ = x[OSLICE3-97]
+ _ = x[OSLICE3ARR-98]
+ _ = x[OSLICEHEADER-99]
+ _ = x[ORECOVER-100]
+ _ = x[ORECV-101]
+ _ = x[ORUNESTR-102]
+ _ = x[OSELRECV-103]
+ _ = x[OSELRECV2-104]
+ _ = x[OIOTA-105]
+ _ = x[OREAL-106]
+ _ = x[OIMAG-107]
+ _ = x[OCOMPLEX-108]
+ _ = x[OALIGNOF-109]
+ _ = x[OOFFSETOF-110]
+ _ = x[OSIZEOF-111]
+ _ = x[OBLOCK-112]
+ _ = x[OBREAK-113]
+ _ = x[OCASE-114]
+ _ = x[OCONTINUE-115]
+ _ = x[ODEFER-116]
+ _ = x[OEMPTY-117]
+ _ = x[OFALL-118]
+ _ = x[OFOR-119]
+ _ = x[OFORUNTIL-120]
+ _ = x[OGOTO-121]
+ _ = x[OIF-122]
+ _ = x[OLABEL-123]
+ _ = x[OGO-124]
+ _ = x[ORANGE-125]
+ _ = x[ORETURN-126]
+ _ = x[OSELECT-127]
+ _ = x[OSWITCH-128]
+ _ = x[OTYPESW-129]
+ _ = x[OTCHAN-130]
+ _ = x[OTMAP-131]
+ _ = x[OTSTRUCT-132]
+ _ = x[OTINTER-133]
+ _ = x[OTFUNC-134]
+ _ = x[OTARRAY-135]
+ _ = x[ODDD-136]
+ _ = x[OINLCALL-137]
+ _ = x[OEFACE-138]
+ _ = x[OITAB-139]
+ _ = x[OIDATA-140]
+ _ = x[OSPTR-141]
+ _ = x[OCLOSUREVAR-142]
+ _ = x[OCFUNC-143]
+ _ = x[OCHECKNIL-144]
+ _ = x[OVARDEF-145]
+ _ = x[OVARKILL-146]
+ _ = x[OVARLIVE-147]
+ _ = x[ORESULT-148]
+ _ = x[OINLMARK-149]
+ _ = x[ORETJMP-150]
+ _ = x[OGETG-151]
+ _ = x[OEND-152]
+}
+
+const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNEWOBJNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKRETJMPGETGEND"
+
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 70, 82, 91, 100, 112, 121, 123, 126, 136, 143, 150, 157, 161, 165, 173, 181, 190, 198, 201, 206, 213, 220, 226, 235, 243, 251, 257, 261, 270, 277, 281, 284, 291, 299, 307, 314, 320, 323, 329, 336, 344, 348, 355, 363, 365, 367, 369, 371, 373, 375, 380, 385, 393, 396, 405, 408, 412, 420, 427, 436, 449, 452, 455, 458, 461, 464, 467, 473, 476, 482, 485, 491, 495, 498, 502, 507, 512, 518, 523, 527, 532, 540, 548, 554, 563, 574, 581, 585, 592, 599, 607, 611, 615, 619, 626, 633, 641, 647, 652, 657, 661, 669, 674, 679, 683, 686, 694, 698, 700, 705, 707, 712, 718, 724, 730, 736, 741, 745, 752, 758, 763, 769, 772, 779, 784, 788, 793, 797, 807, 812, 820, 826, 833, 840, 846, 853, 859, 863, 866}
+
+func (i Op) String() string {
+ if i >= Op(len(_Op_index)-1) {
+ return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Op_name[_Op_index[i]:_Op_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
new file mode 100644
index 0000000..30e1535
--- /dev/null
+++ b/src/cmd/compile/internal/gc/order.go
@@ -0,0 +1,1441 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+)
+
+// Rewrite tree to use separate statements to enforce
+// order of evaluation. Makes walk easier, because it
+// can (after this runs) reorder at will within an expression.
+//
+// Rewrite m[k] op= r into m[k] = m[k] op r if op is / or %.
+//
+// Introduce temporaries as needed by runtime routines.
+// For example, the map runtime routines take the map key
+// by reference, so make sure all map keys are addressable
+// by copying them to temporaries as needed.
+// The same is true for channel operations.
+//
+// Arrange that map index expressions only appear in direct
+// assignments x = m[k] or m[k] = x, never in larger expressions.
+//
+// Arrange that receive expressions only appear in direct assignments
+// x = <-c or as standalone statements <-c, never in larger expressions.
+
+// TODO(rsc): The temporary introduction during multiple assignments
+// should be moved into this file, so that the temporaries can be cleaned
+// and so that conversions implicit in the OAS2FUNC and OAS2RECV
+// nodes can be made explicit and then have their temporaries cleaned.
+
+// TODO(rsc): Goto and multilevel break/continue can jump over
+// inserted VARKILL annotations. Work out a way to handle these.
+// The current implementation is safe, in that it will execute correctly.
+// But it won't reuse temporaries as aggressively as it might, and
+// it can result in unnecessary zeroing of those variables in the function
+// prologue.
+
+// Order holds state during the ordering process.
+type Order struct {
+ out []*Node // list of generated statements
+ temp []*Node // stack of temporary variables
+ free map[string][]*Node // free list of unused temporaries, by type.LongString().
+}
+
+// Order rewrites fn.Nbody to apply the ordering constraints
+// described in the comment at the top of the file.
+func order(fn *Node) {
+ if Debug.W > 1 {
+ s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym)
+ dumplist(s, fn.Nbody)
+ }
+
+ orderBlock(&fn.Nbody, map[string][]*Node{})
+}
+
+// newTemp allocates a new temporary with the given type,
+// pushes it onto the temp stack, and returns it.
+// If clear is true, newTemp emits code to zero the temporary.
+func (o *Order) newTemp(t *types.Type, clear bool) *Node {
+ var v *Node
+ // Note: LongString is close to the type equality we want,
+ // but not exactly. We still need to double-check with types.Identical.
+ key := t.LongString()
+ a := o.free[key]
+ for i, n := range a {
+ if types.Identical(t, n.Type) {
+ v = a[i]
+ a[i] = a[len(a)-1]
+ a = a[:len(a)-1]
+ o.free[key] = a
+ break
+ }
+ }
+ if v == nil {
+ v = temp(t)
+ }
+ if clear {
+ a := nod(OAS, v, nil)
+ a = typecheck(a, ctxStmt)
+ o.out = append(o.out, a)
+ }
+
+ o.temp = append(o.temp, v)
+ return v
+}
+
+// copyExpr behaves like newTemp but also emits
+// code to initialize the temporary to the value n.
+//
+// The clear argument is provided for use when the evaluation
+// of tmp = n turns into a function call that is passed a pointer
+// to the temporary as the output space. If the call blocks before
+// tmp has been written, the garbage collector will still treat the
+// temporary as live, so we must zero it before entering that call.
+// Today, this only happens for channel receive operations.
+// (The other candidate would be map access, but map access
+// returns a pointer to the result data instead of taking a pointer
+// to be filled in.)
+func (o *Order) copyExpr(n *Node, t *types.Type, clear bool) *Node {
+ v := o.newTemp(t, clear)
+ a := nod(OAS, v, n)
+ a = typecheck(a, ctxStmt)
+ o.out = append(o.out, a)
+ return v
+}
+
+// cheapExpr returns a cheap version of n.
+// The definition of cheap is that n is a variable or constant.
+// If not, cheapExpr allocates a new tmp, emits tmp = n,
+// and then returns tmp.
+func (o *Order) cheapExpr(n *Node) *Node {
+ if n == nil {
+ return nil
+ }
+
+ switch n.Op {
+ case ONAME, OLITERAL:
+ return n
+ case OLEN, OCAP:
+ l := o.cheapExpr(n.Left)
+ if l == n.Left {
+ return n
+ }
+ a := n.sepcopy()
+ a.Left = l
+ return typecheck(a, ctxExpr)
+ }
+
+ return o.copyExpr(n, n.Type, false)
+}
+
+// safeExpr returns a safe version of n.
+// The definition of safe is that n can appear multiple times
+// without violating the semantics of the original program,
+// and that assigning to the safe version has the same effect
+// as assigning to the original n.
+//
+// The intended use is to apply to x when rewriting x += y into x = x + y.
+func (o *Order) safeExpr(n *Node) *Node {
+ switch n.Op {
+ case ONAME, OLITERAL:
+ return n
+
+ case ODOT, OLEN, OCAP:
+ l := o.safeExpr(n.Left)
+ if l == n.Left {
+ return n
+ }
+ a := n.sepcopy()
+ a.Left = l
+ return typecheck(a, ctxExpr)
+
+ case ODOTPTR, ODEREF:
+ l := o.cheapExpr(n.Left)
+ if l == n.Left {
+ return n
+ }
+ a := n.sepcopy()
+ a.Left = l
+ return typecheck(a, ctxExpr)
+
+ case OINDEX, OINDEXMAP:
+ var l *Node
+ if n.Left.Type.IsArray() {
+ l = o.safeExpr(n.Left)
+ } else {
+ l = o.cheapExpr(n.Left)
+ }
+ r := o.cheapExpr(n.Right)
+ if l == n.Left && r == n.Right {
+ return n
+ }
+ a := n.sepcopy()
+ a.Left = l
+ a.Right = r
+ return typecheck(a, ctxExpr)
+
+ default:
+ Fatalf("order.safeExpr %v", n.Op)
+ return nil // not reached
+ }
+}
+
+// isaddrokay reports whether it is okay to pass n's address to runtime routines.
+// Taking the address of a variable makes the liveness and optimization analyses
+// lose track of where the variable's lifetime ends. To avoid hurting the analyses
+// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
+// because we emit explicit VARKILL instructions marking the end of those
+// temporaries' lifetimes.
+func isaddrokay(n *Node) bool {
+ return islvalue(n) && (n.Op != ONAME || n.Class() == PEXTERN || n.IsAutoTmp())
+}
+
+// addrTemp ensures that n is okay to pass by address to runtime routines.
+// If the original argument n is not okay, addrTemp creates a tmp, emits
+// tmp = n, and then returns tmp.
+// The result of addrTemp MUST be assigned back to n, e.g.
+// n.Left = o.addrTemp(n.Left)
+func (o *Order) addrTemp(n *Node) *Node {
+ if consttype(n) != CTxxx {
+ // TODO: expand this to all static composite literal nodes?
+ n = defaultlit(n, nil)
+ dowidth(n.Type)
+ vstat := readonlystaticname(n.Type)
+ var s InitSchedule
+ s.staticassign(vstat, n)
+ if s.out != nil {
+ Fatalf("staticassign of const generated code: %+v", n)
+ }
+ vstat = typecheck(vstat, ctxExpr)
+ return vstat
+ }
+ if isaddrokay(n) {
+ return n
+ }
+ return o.copyExpr(n, n.Type, false)
+}
+
+// mapKeyTemp prepares n to be a key in a map runtime call and returns n.
+// It should only be used for map runtime calls which have *_fast* versions.
+func (o *Order) mapKeyTemp(t *types.Type, n *Node) *Node {
+ // Most map calls need to take the address of the key.
+ // Exception: map*_fast* calls. See golang.org/issue/19015.
+ if mapfast(t) == mapslow {
+ return o.addrTemp(n)
+ }
+ return n
+}
+
+// mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP
+// in n to avoid string allocations for keys in map lookups.
+// Returns a bool that signals if a modification was made.
+//
+// For:
+// x = m[string(k)]
+// x = m[T1{... Tn{..., string(k), ...}]
+// where k is []byte, T1 to Tn is a nesting of struct and array literals,
+// the allocation of backing bytes for the string can be avoided
+// by reusing the []byte backing array. These are special cases
+// for avoiding allocations when converting byte slices to strings.
+// It would be nice to handle these generally, but because
+// []byte keys are not allowed in maps, the use of string(k)
+// comes up in important cases in practice. See issue 3512.
+func mapKeyReplaceStrConv(n *Node) bool {
+ var replaced bool
+ switch n.Op {
+ case OBYTES2STR:
+ n.Op = OBYTES2STRTMP
+ replaced = true
+ case OSTRUCTLIT:
+ for _, elem := range n.List.Slice() {
+ if mapKeyReplaceStrConv(elem.Left) {
+ replaced = true
+ }
+ }
+ case OARRAYLIT:
+ for _, elem := range n.List.Slice() {
+ if elem.Op == OKEY {
+ elem = elem.Right
+ }
+ if mapKeyReplaceStrConv(elem) {
+ replaced = true
+ }
+ }
+ }
+ return replaced
+}
+
+type ordermarker int
+
+// markTemp returns the top of the temporary variable stack.
+func (o *Order) markTemp() ordermarker {
+ return ordermarker(len(o.temp))
+}
+
+// popTemp pops temporaries off the stack until reaching the mark,
+// which must have been returned by markTemp.
+func (o *Order) popTemp(mark ordermarker) {
+ for _, n := range o.temp[mark:] {
+ key := n.Type.LongString()
+ o.free[key] = append(o.free[key], n)
+ }
+ o.temp = o.temp[:mark]
+}
+
+// cleanTempNoPop emits VARKILL instructions to *out
+// for each temporary above the mark on the temporary stack.
+// It does not pop the temporaries from the stack.
+func (o *Order) cleanTempNoPop(mark ordermarker) []*Node {
+ var out []*Node
+ for i := len(o.temp) - 1; i >= int(mark); i-- {
+ n := o.temp[i]
+ kill := nod(OVARKILL, n, nil)
+ kill = typecheck(kill, ctxStmt)
+ out = append(out, kill)
+ }
+ return out
+}
+
+// cleanTemp emits VARKILL instructions for each temporary above the
+// mark on the temporary stack and removes them from the stack.
+func (o *Order) cleanTemp(top ordermarker) {
+ o.out = append(o.out, o.cleanTempNoPop(top)...)
+ o.popTemp(top)
+}
+
+// stmtList orders each of the statements in the list.
+func (o *Order) stmtList(l Nodes) {
+ s := l.Slice()
+ for i := range s {
+ orderMakeSliceCopy(s[i:])
+ o.stmt(s[i])
+ }
+}
+
+// orderMakeSliceCopy matches the pattern:
+// m = OMAKESLICE([]T, x); OCOPY(m, s)
+// and rewrites it to:
+// m = OMAKESLICECOPY([]T, x, s); nil
+func orderMakeSliceCopy(s []*Node) {
+ if Debug.N != 0 || instrumenting {
+ return
+ }
+
+ if len(s) < 2 {
+ return
+ }
+
+ asn := s[0]
+ copyn := s[1]
+
+ if asn == nil || asn.Op != OAS {
+ return
+ }
+ if asn.Left.Op != ONAME {
+ return
+ }
+ if asn.Left.isBlank() {
+ return
+ }
+ maken := asn.Right
+ if maken == nil || maken.Op != OMAKESLICE {
+ return
+ }
+ if maken.Esc == EscNone {
+ return
+ }
+ if maken.Left == nil || maken.Right != nil {
+ return
+ }
+ if copyn.Op != OCOPY {
+ return
+ }
+ if copyn.Left.Op != ONAME {
+ return
+ }
+ if asn.Left.Sym != copyn.Left.Sym {
+ return
+ }
+ if copyn.Right.Op != ONAME {
+ return
+ }
+
+ if copyn.Left.Sym == copyn.Right.Sym {
+ return
+ }
+
+ maken.Op = OMAKESLICECOPY
+ maken.Right = copyn.Right
+ // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
+ maken.SetBounded(maken.Left.Op == OLEN && samesafeexpr(maken.Left.Left, copyn.Right))
+
+ maken = typecheck(maken, ctxExpr)
+
+ s[1] = nil // remove separate copy call
+
+ return
+}
+
+// edge inserts coverage instrumentation for libfuzzer.
+func (o *Order) edge() {
+ if Debug_libfuzzer == 0 {
+ return
+ }
+
+ // Create a new uint8 counter to be allocated in section
+ // __libfuzzer_extra_counters.
+ counter := staticname(types.Types[TUINT8])
+ counter.Name.SetLibfuzzerExtraCounter(true)
+
+ // counter += 1
+ incr := nod(OASOP, counter, nodintconst(1))
+ incr.SetSubOp(OADD)
+ incr = typecheck(incr, ctxStmt)
+
+ o.out = append(o.out, incr)
+}
+
+// orderBlock orders the block of statements in n into a new slice,
+// and then replaces the old slice in n with the new slice.
+// free is a map that can be used to obtain temporary variables by type.
+func orderBlock(n *Nodes, free map[string][]*Node) {
+ var order Order
+ order.free = free
+ mark := order.markTemp()
+ order.edge()
+ order.stmtList(*n)
+ order.cleanTemp(mark)
+ n.Set(order.out)
+}
+
+// exprInPlace orders the side effects in *np and
+// leaves them as the init list of the final *np.
+// The result of exprInPlace MUST be assigned back to n, e.g.
+// n.Left = o.exprInPlace(n.Left)
+func (o *Order) exprInPlace(n *Node) *Node {
+ var order Order
+ order.free = o.free
+ n = order.expr(n, nil)
+ n = addinit(n, order.out)
+
+ // insert new temporaries from order
+ // at head of outer list.
+ o.temp = append(o.temp, order.temp...)
+ return n
+}
+
+// orderStmtInPlace orders the side effects of the single statement *np
+// and replaces it with the resulting statement list.
+// The result of orderStmtInPlace MUST be assigned back to n, e.g.
+// n.Left = orderStmtInPlace(n.Left)
+// free is a map that can be used to obtain temporary variables by type.
+func orderStmtInPlace(n *Node, free map[string][]*Node) *Node {
+ var order Order
+ order.free = free
+ mark := order.markTemp()
+ order.stmt(n)
+ order.cleanTemp(mark)
+ return liststmt(order.out)
+}
+
+// init moves n's init list to o.out.
+func (o *Order) init(n *Node) {
+ if n.mayBeShared() {
+ // For concurrency safety, don't mutate potentially shared nodes.
+ // First, ensure that no work is required here.
+ if n.Ninit.Len() > 0 {
+ Fatalf("order.init shared node with ninit")
+ }
+ return
+ }
+ o.stmtList(n.Ninit)
+ n.Ninit.Set(nil)
+}
+
+// call orders the call expression n.
+// n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
+func (o *Order) call(n *Node) {
+ if n.Ninit.Len() > 0 {
+ // Caller should have already called o.init(n).
+ Fatalf("%v with unexpected ninit", n.Op)
+ }
+
+ // Builtin functions.
+ if n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER {
+ n.Left = o.expr(n.Left, nil)
+ n.Right = o.expr(n.Right, nil)
+ o.exprList(n.List)
+ return
+ }
+
+ fixVariadicCall(n)
+ n.Left = o.expr(n.Left, nil)
+ o.exprList(n.List)
+
+ if n.Op == OCALLINTER {
+ return
+ }
+ keepAlive := func(arg *Node) {
+ // If the argument is really a pointer being converted to uintptr,
+ // arrange for the pointer to be kept alive until the call returns,
+ // by copying it into a temp and marking that temp
+ // still alive when we pop the temp stack.
+ if arg.Op == OCONVNOP && arg.Left.Type.IsUnsafePtr() {
+ x := o.copyExpr(arg.Left, arg.Left.Type, false)
+ arg.Left = x
+ x.Name.SetAddrtaken(true) // ensure SSA keeps the x variable
+ n.Nbody.Append(typecheck(nod(OVARLIVE, x, nil), ctxStmt))
+ }
+ }
+
+ // Check for "unsafe-uintptr" tag provided by escape analysis.
+ for i, param := range n.Left.Type.Params().FieldSlice() {
+ if param.Note == unsafeUintptrTag || param.Note == uintptrEscapesTag {
+ if arg := n.List.Index(i); arg.Op == OSLICELIT {
+ for _, elt := range arg.List.Slice() {
+ keepAlive(elt)
+ }
+ } else {
+ keepAlive(arg)
+ }
+ }
+ }
+}
+
+// mapAssign appends n to o.out, introducing temporaries
+// to make sure that all map assignments have the form m[k] = x.
+// (Note: expr has already been called on n, so we know k is addressable.)
+//
+// If n is the multiple assignment form ..., m[k], ... = ..., x, ..., the rewrite is
+// t1 = m
+// t2 = k
+// ...., t3, ... = ..., x, ...
+// t1[t2] = t3
+//
+// The temporaries t1, t2 are needed in case the ... being assigned
+// contain m or k. They are usually unnecessary, but in the unnecessary
+// cases they are also typically registerizable, so not much harm done.
+// And this only applies to the multiple-assignment form.
+// We could do a more precise analysis if needed, like in walk.go.
+func (o *Order) mapAssign(n *Node) {
+ switch n.Op {
+ default:
+ Fatalf("order.mapAssign %v", n.Op)
+
+ case OAS, OASOP:
+ if n.Left.Op == OINDEXMAP {
+ // Make sure we evaluate the RHS before starting the map insert.
+ // We need to make sure the RHS won't panic. See issue 22881.
+ if n.Right.Op == OAPPEND {
+ s := n.Right.List.Slice()[1:]
+ for i, n := range s {
+ s[i] = o.cheapExpr(n)
+ }
+ } else {
+ n.Right = o.cheapExpr(n.Right)
+ }
+ }
+ o.out = append(o.out, n)
+
+ case OAS2, OAS2DOTTYPE, OAS2MAPR, OAS2FUNC:
+ var post []*Node
+ for i, m := range n.List.Slice() {
+ switch {
+ case m.Op == OINDEXMAP:
+ if !m.Left.IsAutoTmp() {
+ m.Left = o.copyExpr(m.Left, m.Left.Type, false)
+ }
+ if !m.Right.IsAutoTmp() {
+ m.Right = o.copyExpr(m.Right, m.Right.Type, false)
+ }
+ fallthrough
+ case instrumenting && n.Op == OAS2FUNC && !m.isBlank():
+ t := o.newTemp(m.Type, false)
+ n.List.SetIndex(i, t)
+ a := nod(OAS, m, t)
+ a = typecheck(a, ctxStmt)
+ post = append(post, a)
+ }
+ }
+
+ o.out = append(o.out, n)
+ o.out = append(o.out, post...)
+ }
+}
+
+// stmt orders the statement n, appending to o.out.
+// Temporaries created during the statement are cleaned
+// up using VARKILL instructions as possible.
+func (o *Order) stmt(n *Node) {
+ if n == nil {
+ return
+ }
+
+ lno := setlineno(n)
+ o.init(n)
+
+ switch n.Op {
+ default:
+ Fatalf("order.stmt %v", n.Op)
+
+ case OVARKILL, OVARLIVE, OINLMARK:
+ o.out = append(o.out, n)
+
+ case OAS:
+ t := o.markTemp()
+ n.Left = o.expr(n.Left, nil)
+ n.Right = o.expr(n.Right, n.Left)
+ o.mapAssign(n)
+ o.cleanTemp(t)
+
+ case OASOP:
+ t := o.markTemp()
+ n.Left = o.expr(n.Left, nil)
+ n.Right = o.expr(n.Right, nil)
+
+ if instrumenting || n.Left.Op == OINDEXMAP && (n.SubOp() == ODIV || n.SubOp() == OMOD) {
+ // Rewrite m[k] op= r into m[k] = m[k] op r so
+ // that we can ensure that if op panics
+ // because r is zero, the panic happens before
+ // the map assignment.
+
+ n.Left = o.safeExpr(n.Left)
+
+ l := treecopy(n.Left, src.NoXPos)
+ if l.Op == OINDEXMAP {
+ l.SetIndexMapLValue(false)
+ }
+ l = o.copyExpr(l, n.Left.Type, false)
+ n.Right = nod(n.SubOp(), l, n.Right)
+ n.Right = typecheck(n.Right, ctxExpr)
+ n.Right = o.expr(n.Right, nil)
+
+ n.Op = OAS
+ n.ResetAux()
+ }
+
+ o.mapAssign(n)
+ o.cleanTemp(t)
+
+ case OAS2:
+ t := o.markTemp()
+ o.exprList(n.List)
+ o.exprList(n.Rlist)
+ o.mapAssign(n)
+ o.cleanTemp(t)
+
+ // Special: avoid copy of func call n.Right
+ case OAS2FUNC:
+ t := o.markTemp()
+ o.exprList(n.List)
+ o.init(n.Right)
+ o.call(n.Right)
+ o.as2(n)
+ o.cleanTemp(t)
+
+ // Special: use temporary variables to hold result,
+ // so that runtime can take address of temporary.
+ // No temporary for blank assignment.
+ //
+ // OAS2MAPR: make sure key is addressable if needed,
+ // and make sure OINDEXMAP is not copied out.
+ case OAS2DOTTYPE, OAS2RECV, OAS2MAPR:
+ t := o.markTemp()
+ o.exprList(n.List)
+
+ switch r := n.Right; r.Op {
+ case ODOTTYPE2, ORECV:
+ r.Left = o.expr(r.Left, nil)
+ case OINDEXMAP:
+ r.Left = o.expr(r.Left, nil)
+ r.Right = o.expr(r.Right, nil)
+ // See similar conversion for OINDEXMAP below.
+ _ = mapKeyReplaceStrConv(r.Right)
+ r.Right = o.mapKeyTemp(r.Left.Type, r.Right)
+ default:
+ Fatalf("order.stmt: %v", r.Op)
+ }
+
+ o.okAs2(n)
+ o.cleanTemp(t)
+
+ // Special: does not save n onto out.
+ case OBLOCK, OEMPTY:
+ o.stmtList(n.List)
+
+ // Special: n->left is not an expression; save as is.
+ case OBREAK,
+ OCONTINUE,
+ ODCL,
+ ODCLCONST,
+ ODCLTYPE,
+ OFALL,
+ OGOTO,
+ OLABEL,
+ ORETJMP:
+ o.out = append(o.out, n)
+
+ // Special: handle call arguments.
+ case OCALLFUNC, OCALLINTER, OCALLMETH:
+ t := o.markTemp()
+ o.call(n)
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ case OCLOSE,
+ OCOPY,
+ OPRINT,
+ OPRINTN,
+ ORECOVER,
+ ORECV:
+ t := o.markTemp()
+ n.Left = o.expr(n.Left, nil)
+ n.Right = o.expr(n.Right, nil)
+ o.exprList(n.List)
+ o.exprList(n.Rlist)
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ // Special: order arguments to inner call but not call itself.
+ case ODEFER, OGO:
+ t := o.markTemp()
+ o.init(n.Left)
+ o.call(n.Left)
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ case ODELETE:
+ t := o.markTemp()
+ n.List.SetFirst(o.expr(n.List.First(), nil))
+ n.List.SetSecond(o.expr(n.List.Second(), nil))
+ n.List.SetSecond(o.mapKeyTemp(n.List.First().Type, n.List.Second()))
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ // Clean temporaries from condition evaluation at
+ // beginning of loop body and after for statement.
+ case OFOR:
+ t := o.markTemp()
+ n.Left = o.exprInPlace(n.Left)
+ n.Nbody.Prepend(o.cleanTempNoPop(t)...)
+ orderBlock(&n.Nbody, o.free)
+ n.Right = orderStmtInPlace(n.Right, o.free)
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ // Clean temporaries from condition at
+ // beginning of both branches.
+ case OIF:
+ t := o.markTemp()
+ n.Left = o.exprInPlace(n.Left)
+ n.Nbody.Prepend(o.cleanTempNoPop(t)...)
+ n.Rlist.Prepend(o.cleanTempNoPop(t)...)
+ o.popTemp(t)
+ orderBlock(&n.Nbody, o.free)
+ orderBlock(&n.Rlist, o.free)
+ o.out = append(o.out, n)
+
+ // Special: argument will be converted to interface using convT2E
+ // so make sure it is an addressable temporary.
+ case OPANIC:
+ t := o.markTemp()
+ n.Left = o.expr(n.Left, nil)
+ if !n.Left.Type.IsInterface() {
+ n.Left = o.addrTemp(n.Left)
+ }
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ case ORANGE:
+ // n.Right is the expression being ranged over.
+ // order it, and then make a copy if we need one.
+ // We almost always do, to ensure that we don't
+ // see any value changes made during the loop.
+ // Usually the copy is cheap (e.g., array pointer,
+ // chan, slice, string are all tiny).
+ // The exception is ranging over an array value
+ // (not a slice, not a pointer to array),
+ // which must make a copy to avoid seeing updates made during
+ // the range body. Ranging over an array value is uncommon though.
+
+ // Mark []byte(str) range expression to reuse string backing storage.
+ // It is safe because the storage cannot be mutated.
+ if n.Right.Op == OSTR2BYTES {
+ n.Right.Op = OSTR2BYTESTMP
+ }
+
+ t := o.markTemp()
+ n.Right = o.expr(n.Right, nil)
+
+ orderBody := true
+ switch n.Type.Etype {
+ default:
+ Fatalf("order.stmt range %v", n.Type)
+
+ case TARRAY, TSLICE:
+ if n.List.Len() < 2 || n.List.Second().isBlank() {
+ // for i := range x will only use x once, to compute len(x).
+ // No need to copy it.
+ break
+ }
+ fallthrough
+
+ case TCHAN, TSTRING:
+ // chan, string, slice, array ranges use value multiple times.
+ // make copy.
+ r := n.Right
+
+ if r.Type.IsString() && r.Type != types.Types[TSTRING] {
+ r = nod(OCONV, r, nil)
+ r.Type = types.Types[TSTRING]
+ r = typecheck(r, ctxExpr)
+ }
+
+ n.Right = o.copyExpr(r, r.Type, false)
+
+ case TMAP:
+ if isMapClear(n) {
+ // Preserve the body of the map clear pattern so it can
+ // be detected during walk. The loop body will not be used
+ // when optimizing away the range loop to a runtime call.
+ orderBody = false
+ break
+ }
+
+ // copy the map value in case it is a map literal.
+ // TODO(rsc): Make tmp = literal expressions reuse tmp.
+ // For maps tmp is just one word so it hardly matters.
+ r := n.Right
+ n.Right = o.copyExpr(r, r.Type, false)
+
+ // prealloc[n] is the temp for the iterator.
+ // hiter contains pointers and needs to be zeroed.
+ prealloc[n] = o.newTemp(hiter(n.Type), true)
+ }
+ o.exprListInPlace(n.List)
+ if orderBody {
+ orderBlock(&n.Nbody, o.free)
+ }
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ case ORETURN:
+ o.exprList(n.List)
+ o.out = append(o.out, n)
+
+ // Special: clean case temporaries in each block entry.
+ // Select must enter one of its blocks, so there is no
+ // need for a cleaning at the end.
+ // Doubly special: evaluation order for select is stricter
+ // than ordinary expressions. Even something like p.c
+ // has to be hoisted into a temporary, so that it cannot be
+ // reordered after the channel evaluation for a different
+ // case (if p were nil, then the timing of the fault would
+ // give this away).
+ case OSELECT:
+ t := o.markTemp()
+
+ for _, n2 := range n.List.Slice() {
+ if n2.Op != OCASE {
+ Fatalf("order select case %v", n2.Op)
+ }
+ r := n2.Left
+ setlineno(n2)
+
+ // Append any new body prologue to ninit.
+ // The next loop will insert ninit into nbody.
+ if n2.Ninit.Len() != 0 {
+ Fatalf("order select ninit")
+ }
+ if r == nil {
+ continue
+ }
+ switch r.Op {
+ default:
+ Dump("select case", r)
+ Fatalf("unknown op in select %v", r.Op)
+
+ // If this is case x := <-ch or case x, y := <-ch, the case has
+ // the ODCL nodes to declare x and y. We want to delay that
+ // declaration (and possible allocation) until inside the case body.
+ // Delete the ODCL nodes here and recreate them inside the body below.
+ case OSELRECV, OSELRECV2:
+ if r.Colas() {
+ i := 0
+ if r.Ninit.Len() != 0 && r.Ninit.First().Op == ODCL && r.Ninit.First().Left == r.Left {
+ i++
+ }
+ if i < r.Ninit.Len() && r.Ninit.Index(i).Op == ODCL && r.List.Len() != 0 && r.Ninit.Index(i).Left == r.List.First() {
+ i++
+ }
+ if i >= r.Ninit.Len() {
+ r.Ninit.Set(nil)
+ }
+ }
+
+ if r.Ninit.Len() != 0 {
+ dumplist("ninit", r.Ninit)
+ Fatalf("ninit on select recv")
+ }
+
+ // case x = <-c
+ // case x, ok = <-c
+ // r->left is x, r->ntest is ok, r->right is ORECV, r->right->left is c.
+ // r->left == N means 'case <-c'.
+ // c is always evaluated; x and ok are only evaluated when assigned.
+ r.Right.Left = o.expr(r.Right.Left, nil)
+
+ if !r.Right.Left.IsAutoTmp() {
+ r.Right.Left = o.copyExpr(r.Right.Left, r.Right.Left.Type, false)
+ }
+
+ // Introduce temporary for receive and move actual copy into case body.
+ // avoids problems with target being addressed, as usual.
+ // NOTE: If we wanted to be clever, we could arrange for just one
+ // temporary per distinct type, sharing the temp among all receives
+ // with that temp. Similarly one ok bool could be shared among all
+ // the x,ok receives. Not worth doing until there's a clear need.
+ if r.Left != nil && r.Left.isBlank() {
+ r.Left = nil
+ }
+ if r.Left != nil {
+ // use channel element type for temporary to avoid conversions,
+ // such as in case interfacevalue = <-intchan.
+ // the conversion happens in the OAS instead.
+ tmp1 := r.Left
+
+ if r.Colas() {
+ tmp2 := nod(ODCL, tmp1, nil)
+ tmp2 = typecheck(tmp2, ctxStmt)
+ n2.Ninit.Append(tmp2)
+ }
+
+ r.Left = o.newTemp(r.Right.Left.Type.Elem(), r.Right.Left.Type.Elem().HasPointers())
+ tmp2 := nod(OAS, tmp1, r.Left)
+ tmp2 = typecheck(tmp2, ctxStmt)
+ n2.Ninit.Append(tmp2)
+ }
+
+ if r.List.Len() != 0 && r.List.First().isBlank() {
+ r.List.Set(nil)
+ }
+ if r.List.Len() != 0 {
+ tmp1 := r.List.First()
+ if r.Colas() {
+ tmp2 := nod(ODCL, tmp1, nil)
+ tmp2 = typecheck(tmp2, ctxStmt)
+ n2.Ninit.Append(tmp2)
+ }
+
+ r.List.Set1(o.newTemp(types.Types[TBOOL], false))
+ tmp2 := okas(tmp1, r.List.First())
+ tmp2 = typecheck(tmp2, ctxStmt)
+ n2.Ninit.Append(tmp2)
+ }
+ orderBlock(&n2.Ninit, o.free)
+
+ case OSEND:
+ if r.Ninit.Len() != 0 {
+ dumplist("ninit", r.Ninit)
+ Fatalf("ninit on select send")
+ }
+
+ // case c <- x
+ // r->left is c, r->right is x, both are always evaluated.
+ r.Left = o.expr(r.Left, nil)
+
+ if !r.Left.IsAutoTmp() {
+ r.Left = o.copyExpr(r.Left, r.Left.Type, false)
+ }
+ r.Right = o.expr(r.Right, nil)
+ if !r.Right.IsAutoTmp() {
+ r.Right = o.copyExpr(r.Right, r.Right.Type, false)
+ }
+ }
+ }
+ // Now that we have accumulated all the temporaries, clean them.
+ // Also insert any ninit queued during the previous loop.
+ // (The temporary cleaning must follow that ninit work.)
+ for _, n3 := range n.List.Slice() {
+ orderBlock(&n3.Nbody, o.free)
+ n3.Nbody.Prepend(o.cleanTempNoPop(t)...)
+
+ // TODO(mdempsky): Is this actually necessary?
+ // walkselect appears to walk Ninit.
+ n3.Nbody.Prepend(n3.Ninit.Slice()...)
+ n3.Ninit.Set(nil)
+ }
+
+ o.out = append(o.out, n)
+ o.popTemp(t)
+
+ // Special: value being sent is passed as a pointer; make it addressable.
+ case OSEND:
+ t := o.markTemp()
+ n.Left = o.expr(n.Left, nil)
+ n.Right = o.expr(n.Right, nil)
+ if instrumenting {
+ // Force copying to the stack so that (chan T)(nil) <- x
+ // is still instrumented as a read of x.
+ n.Right = o.copyExpr(n.Right, n.Right.Type, false)
+ } else {
+ n.Right = o.addrTemp(n.Right)
+ }
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ // TODO(rsc): Clean temporaries more aggressively.
+ // Note that because walkswitch will rewrite some of the
+ // switch into a binary search, this is not as easy as it looks.
+ // (If we ran that code here we could invoke order.stmt on
+ // the if-else chain instead.)
+ // For now just clean all the temporaries at the end.
+ // In practice that's fine.
+ case OSWITCH:
+ if Debug_libfuzzer != 0 && !hasDefaultCase(n) {
+ // Add empty "default:" case for instrumentation.
+ n.List.Append(nod(OCASE, nil, nil))
+ }
+
+ t := o.markTemp()
+ n.Left = o.expr(n.Left, nil)
+ for _, ncas := range n.List.Slice() {
+ if ncas.Op != OCASE {
+ Fatalf("order switch case %v", ncas.Op)
+ }
+ o.exprListInPlace(ncas.List)
+ orderBlock(&ncas.Nbody, o.free)
+ }
+
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+ }
+
+ lineno = lno
+}
+
+func hasDefaultCase(n *Node) bool {
+ for _, ncas := range n.List.Slice() {
+ if ncas.Op != OCASE {
+ Fatalf("expected case, found %v", ncas.Op)
+ }
+ if ncas.List.Len() == 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// exprList orders the expression list l into o.
+func (o *Order) exprList(l Nodes) {
+ s := l.Slice()
+ for i := range s {
+ s[i] = o.expr(s[i], nil)
+ }
+}
+
+// exprListInPlace orders the expression list l but saves
+// the side effects on the individual expression ninit lists.
+func (o *Order) exprListInPlace(l Nodes) {
+ s := l.Slice()
+ for i := range s {
+ s[i] = o.exprInPlace(s[i])
+ }
+}
+
+// prealloc[x] records the allocation to use for x.
+var prealloc = map[*Node]*Node{}
+
+// expr orders a single expression, appending side
+// effects to o.out as needed.
+// If this is part of an assignment lhs = *np, lhs is given.
+// Otherwise lhs == nil. (When lhs != nil it may be possible
+// to avoid copying the result of the expression to a temporary.)
+// The result of expr MUST be assigned back to n, e.g.
+// n.Left = o.expr(n.Left, lhs)
+func (o *Order) expr(n, lhs *Node) *Node {
+ if n == nil {
+ return n
+ }
+
+ lno := setlineno(n)
+ o.init(n)
+
+ switch n.Op {
+ default:
+ n.Left = o.expr(n.Left, nil)
+ n.Right = o.expr(n.Right, nil)
+ o.exprList(n.List)
+ o.exprList(n.Rlist)
+
+ // Addition of strings turns into a function call.
+ // Allocate a temporary to hold the strings.
+ // Fewer than 5 strings use direct runtime helpers.
+ case OADDSTR:
+ o.exprList(n.List)
+
+ if n.List.Len() > 5 {
+ t := types.NewArray(types.Types[TSTRING], int64(n.List.Len()))
+ prealloc[n] = o.newTemp(t, false)
+ }
+
+ // Mark string(byteSlice) arguments to reuse byteSlice backing
+ // buffer during conversion. String concatenation does not
+ // memorize the strings for later use, so it is safe.
+ // However, we can do it only if there is at least one non-empty string literal.
+ // Otherwise if all other arguments are empty strings,
+ // concatstrings will return the reference to the temp string
+ // to the caller.
+ hasbyte := false
+
+ haslit := false
+ for _, n1 := range n.List.Slice() {
+ hasbyte = hasbyte || n1.Op == OBYTES2STR
+ haslit = haslit || n1.Op == OLITERAL && len(n1.StringVal()) != 0
+ }
+
+ if haslit && hasbyte {
+ for _, n2 := range n.List.Slice() {
+ if n2.Op == OBYTES2STR {
+ n2.Op = OBYTES2STRTMP
+ }
+ }
+ }
+
+ case OINDEXMAP:
+ n.Left = o.expr(n.Left, nil)
+ n.Right = o.expr(n.Right, nil)
+ needCopy := false
+
+ if !n.IndexMapLValue() {
+ // Enforce that any []byte slices we are not copying
+ // can not be changed before the map index by forcing
+ // the map index to happen immediately following the
+ // conversions. See copyExpr a few lines below.
+ needCopy = mapKeyReplaceStrConv(n.Right)
+
+ if instrumenting {
+ // Race detector needs the copy so it can
+ // call treecopy on the result.
+ needCopy = true
+ }
+ }
+
+ // key must be addressable
+ n.Right = o.mapKeyTemp(n.Left.Type, n.Right)
+ if needCopy {
+ n = o.copyExpr(n, n.Type, false)
+ }
+
+ // concrete type (not interface) argument might need an addressable
+ // temporary to pass to the runtime conversion routine.
+ case OCONVIFACE:
+ n.Left = o.expr(n.Left, nil)
+ if n.Left.Type.IsInterface() {
+ break
+ }
+ if _, needsaddr := convFuncName(n.Left.Type, n.Type); needsaddr || isStaticCompositeLiteral(n.Left) {
+ // Need a temp if we need to pass the address to the conversion function.
+ // We also process static composite literal node here, making a named static global
+ // whose address we can put directly in an interface (see OCONVIFACE case in walk).
+ n.Left = o.addrTemp(n.Left)
+ }
+
+ case OCONVNOP:
+ if n.Type.IsKind(TUNSAFEPTR) && n.Left.Type.IsKind(TUINTPTR) && (n.Left.Op == OCALLFUNC || n.Left.Op == OCALLINTER || n.Left.Op == OCALLMETH) {
+ // When reordering unsafe.Pointer(f()) into a separate
+ // statement, the conversion and function call must stay
+ // together. See golang.org/issue/15329.
+ o.init(n.Left)
+ o.call(n.Left)
+ if lhs == nil || lhs.Op != ONAME || instrumenting {
+ n = o.copyExpr(n, n.Type, false)
+ }
+ } else {
+ n.Left = o.expr(n.Left, nil)
+ }
+
+ case OANDAND, OOROR:
+ // ... = LHS && RHS
+ //
+ // var r bool
+ // r = LHS
+ // if r { // or !r, for OROR
+ // r = RHS
+ // }
+ // ... = r
+
+ r := o.newTemp(n.Type, false)
+
+ // Evaluate left-hand side.
+ lhs := o.expr(n.Left, nil)
+ o.out = append(o.out, typecheck(nod(OAS, r, lhs), ctxStmt))
+
+ // Evaluate right-hand side, save generated code.
+ saveout := o.out
+ o.out = nil
+ t := o.markTemp()
+ o.edge()
+ rhs := o.expr(n.Right, nil)
+ o.out = append(o.out, typecheck(nod(OAS, r, rhs), ctxStmt))
+ o.cleanTemp(t)
+ gen := o.out
+ o.out = saveout
+
+ // If left-hand side doesn't cause a short-circuit, issue right-hand side.
+ nif := nod(OIF, r, nil)
+ if n.Op == OANDAND {
+ nif.Nbody.Set(gen)
+ } else {
+ nif.Rlist.Set(gen)
+ }
+ o.out = append(o.out, nif)
+ n = r
+
+ case OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH,
+ OCAP,
+ OCOMPLEX,
+ OCOPY,
+ OIMAG,
+ OLEN,
+ OMAKECHAN,
+ OMAKEMAP,
+ OMAKESLICE,
+ OMAKESLICECOPY,
+ ONEW,
+ OREAL,
+ ORECOVER,
+ OSTR2BYTES,
+ OSTR2BYTESTMP,
+ OSTR2RUNES:
+
+ if isRuneCount(n) {
+ // len([]rune(s)) is rewritten to runtime.countrunes(s) later.
+ n.Left.Left = o.expr(n.Left.Left, nil)
+ } else {
+ o.call(n)
+ }
+
+ if lhs == nil || lhs.Op != ONAME || instrumenting {
+ n = o.copyExpr(n, n.Type, false)
+ }
+
+ case OAPPEND:
+ // Check for append(x, make([]T, y)...) .
+ if isAppendOfMake(n) {
+ n.List.SetFirst(o.expr(n.List.First(), nil)) // order x
+ n.List.Second().Left = o.expr(n.List.Second().Left, nil) // order y
+ } else {
+ o.exprList(n.List)
+ }
+
+ if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.List.First()) {
+ n = o.copyExpr(n, n.Type, false)
+ }
+
+ case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
+ n.Left = o.expr(n.Left, nil)
+ low, high, max := n.SliceBounds()
+ low = o.expr(low, nil)
+ low = o.cheapExpr(low)
+ high = o.expr(high, nil)
+ high = o.cheapExpr(high)
+ max = o.expr(max, nil)
+ max = o.cheapExpr(max)
+ n.SetSliceBounds(low, high, max)
+ if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.Left) {
+ n = o.copyExpr(n, n.Type, false)
+ }
+
+ case OCLOSURE:
+ if n.Transient() && n.Func.Closure.Func.Cvars.Len() > 0 {
+ prealloc[n] = o.newTemp(closureType(n), false)
+ }
+
+ case OSLICELIT, OCALLPART:
+ n.Left = o.expr(n.Left, nil)
+ n.Right = o.expr(n.Right, nil)
+ o.exprList(n.List)
+ o.exprList(n.Rlist)
+ if n.Transient() {
+ var t *types.Type
+ switch n.Op {
+ case OSLICELIT:
+ t = types.NewArray(n.Type.Elem(), n.Right.Int64Val())
+ case OCALLPART:
+ t = partialCallType(n)
+ }
+ prealloc[n] = o.newTemp(t, false)
+ }
+
+ case ODOTTYPE, ODOTTYPE2:
+ n.Left = o.expr(n.Left, nil)
+ if !isdirectiface(n.Type) || instrumenting {
+ n = o.copyExpr(n, n.Type, true)
+ }
+
+ case ORECV:
+ n.Left = o.expr(n.Left, nil)
+ n = o.copyExpr(n, n.Type, true)
+
+ case OEQ, ONE, OLT, OLE, OGT, OGE:
+ n.Left = o.expr(n.Left, nil)
+ n.Right = o.expr(n.Right, nil)
+
+ t := n.Left.Type
+ switch {
+ case t.IsString():
+ // Mark string(byteSlice) arguments to reuse byteSlice backing
+ // buffer during conversion. String comparison does not
+ // memorize the strings for later use, so it is safe.
+ if n.Left.Op == OBYTES2STR {
+ n.Left.Op = OBYTES2STRTMP
+ }
+ if n.Right.Op == OBYTES2STR {
+ n.Right.Op = OBYTES2STRTMP
+ }
+
+ case t.IsStruct() || t.IsArray():
+ // for complex comparisons, we need both args to be
+ // addressable so we can pass them to the runtime.
+ n.Left = o.addrTemp(n.Left)
+ n.Right = o.addrTemp(n.Right)
+ }
+ case OMAPLIT:
+ // Order map by converting:
+ // map[int]int{
+ // a(): b(),
+ // c(): d(),
+ // e(): f(),
+ // }
+ // to
+ // m := map[int]int{}
+ // m[a()] = b()
+ // m[c()] = d()
+ // m[e()] = f()
+ // Then order the result.
+ // Without this special case, order would otherwise compute all
+ // the keys and values before storing any of them to the map.
+ // See issue 26552.
+ entries := n.List.Slice()
+ statics := entries[:0]
+ var dynamics []*Node
+ for _, r := range entries {
+ if r.Op != OKEY {
+ Fatalf("OMAPLIT entry not OKEY: %v\n", r)
+ }
+
+ if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
+ dynamics = append(dynamics, r)
+ continue
+ }
+
+ // Recursively ordering some static entries can change them to dynamic;
+ // e.g., OCONVIFACE nodes. See #31777.
+ r = o.expr(r, nil)
+ if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
+ dynamics = append(dynamics, r)
+ continue
+ }
+
+ statics = append(statics, r)
+ }
+ n.List.Set(statics)
+
+ if len(dynamics) == 0 {
+ break
+ }
+
+ // Emit the creation of the map (with all its static entries).
+ m := o.newTemp(n.Type, false)
+ as := nod(OAS, m, n)
+ typecheck(as, ctxStmt)
+ o.stmt(as)
+ n = m
+
+ // Emit eval+insert of dynamic entries, one at a time.
+ for _, r := range dynamics {
+ as := nod(OAS, nod(OINDEX, n, r.Left), r.Right)
+ typecheck(as, ctxStmt) // Note: this converts the OINDEX to an OINDEXMAP
+ o.stmt(as)
+ }
+ }
+
+ lineno = lno
+ return n
+}
+
+// okas creates and returns an assignment of val to ok,
+// including an explicit conversion if necessary.
+func okas(ok, val *Node) *Node {
+ if !ok.isBlank() {
+ val = conv(val, ok.Type)
+ }
+ return nod(OAS, ok, val)
+}
+
+// as2 orders OAS2XXXX nodes. It creates temporaries to ensure left-to-right assignment.
+// The caller should order the right-hand side of the assignment before calling order.as2.
+// It rewrites,
+// a, b, a = ...
+// as
+// tmp1, tmp2, tmp3 = ...
+// a, b, a = tmp1, tmp2, tmp3
+// This is necessary to ensure left to right assignment order.
+func (o *Order) as2(n *Node) {
+ tmplist := []*Node{}
+ left := []*Node{}
+ for ni, l := range n.List.Slice() {
+ if !l.isBlank() {
+ tmp := o.newTemp(l.Type, l.Type.HasPointers())
+ n.List.SetIndex(ni, tmp)
+ tmplist = append(tmplist, tmp)
+ left = append(left, l)
+ }
+ }
+
+ o.out = append(o.out, n)
+
+ as := nod(OAS2, nil, nil)
+ as.List.Set(left)
+ as.Rlist.Set(tmplist)
+ as = typecheck(as, ctxStmt)
+ o.stmt(as)
+}
+
+// okAs2 orders OAS2XXX with ok.
+// Just like as2, this also adds temporaries to ensure left-to-right assignment.
+func (o *Order) okAs2(n *Node) {
+ var tmp1, tmp2 *Node
+ if !n.List.First().isBlank() {
+ typ := n.Right.Type
+ tmp1 = o.newTemp(typ, typ.HasPointers())
+ }
+
+ if !n.List.Second().isBlank() {
+ tmp2 = o.newTemp(types.Types[TBOOL], false)
+ }
+
+ o.out = append(o.out, n)
+
+ if tmp1 != nil {
+ r := nod(OAS, n.List.First(), tmp1)
+ r = typecheck(r, ctxStmt)
+ o.mapAssign(r)
+ n.List.SetFirst(tmp1)
+ }
+ if tmp2 != nil {
+ r := okas(n.List.Second(), tmp2)
+ r = typecheck(r, ctxStmt)
+ o.mapAssign(r)
+ n.List.SetSecond(tmp2)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
new file mode 100644
index 0000000..353f4b0
--- /dev/null
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -0,0 +1,798 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+ "internal/race"
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+)
+
+// "Portable" code generation.
+
+var (
+ nBackendWorkers int // number of concurrent backend workers, set by a compiler flag
+ compilequeue []*Node // functions waiting to be compiled
+)
+
+func emitptrargsmap(fn *Node) {
+ if fn.funcname() == "_" || fn.Func.Nname.Sym.Linkname != "" {
+ return
+ }
+ lsym := Ctxt.Lookup(fn.Func.lsym.Name + ".args_stackmap")
+
+ nptr := int(fn.Type.ArgWidth() / int64(Widthptr))
+ bv := bvalloc(int32(nptr) * 2)
+ nbitmap := 1
+ if fn.Type.NumResults() > 0 {
+ nbitmap = 2
+ }
+ off := duint32(lsym, 0, uint32(nbitmap))
+ off = duint32(lsym, off, uint32(bv.n))
+
+ if fn.IsMethod() {
+ onebitwalktype1(fn.Type.Recvs(), 0, bv)
+ }
+ if fn.Type.NumParams() > 0 {
+ onebitwalktype1(fn.Type.Params(), 0, bv)
+ }
+ off = dbvec(lsym, off, bv)
+
+ if fn.Type.NumResults() > 0 {
+ onebitwalktype1(fn.Type.Results(), 0, bv)
+ off = dbvec(lsym, off, bv)
+ }
+
+ ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL)
+}
+
+// cmpstackvarlt reports whether the stack variable a sorts before b.
+//
+// Sort the list of stack variables. Autos after anything else,
+// within autos, unused after used, within used, things with
+// pointers first, zeroed things first, and then decreasing size.
+// Because autos are laid out in decreasing addresses
+// on the stack, pointers first, zeroed things first and decreasing size
+// really means, in memory, things with pointers needing zeroing at
+// the top of the stack and increasing in size.
+// Non-autos sort on offset.
+func cmpstackvarlt(a, b *Node) bool {
+ if (a.Class() == PAUTO) != (b.Class() == PAUTO) {
+ return b.Class() == PAUTO
+ }
+
+ if a.Class() != PAUTO {
+ return a.Xoffset < b.Xoffset
+ }
+
+ if a.Name.Used() != b.Name.Used() {
+ return a.Name.Used()
+ }
+
+ ap := a.Type.HasPointers()
+ bp := b.Type.HasPointers()
+ if ap != bp {
+ return ap
+ }
+
+ ap = a.Name.Needzero()
+ bp = b.Name.Needzero()
+ if ap != bp {
+ return ap
+ }
+
+ if a.Type.Width != b.Type.Width {
+ return a.Type.Width > b.Type.Width
+ }
+
+ return a.Sym.Name < b.Sym.Name
+}
+
+// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
+type byStackVar []*Node
+
+func (s byStackVar) Len() int { return len(s) }
+func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
+func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func (s *ssafn) AllocFrame(f *ssa.Func) {
+ s.stksize = 0
+ s.stkptrsize = 0
+ fn := s.curfn.Func
+
+ // Mark the PAUTO's unused.
+ for _, ln := range fn.Dcl {
+ if ln.Class() == PAUTO {
+ ln.Name.SetUsed(false)
+ }
+ }
+
+ for _, l := range f.RegAlloc {
+ if ls, ok := l.(ssa.LocalSlot); ok {
+ ls.N.(*Node).Name.SetUsed(true)
+ }
+ }
+
+ scratchUsed := false
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if n, ok := v.Aux.(*Node); ok {
+ switch n.Class() {
+ case PPARAM, PPARAMOUT:
+ // Don't modify nodfp; it is a global.
+ if n != nodfp {
+ n.Name.SetUsed(true)
+ }
+ case PAUTO:
+ n.Name.SetUsed(true)
+ }
+ }
+ if !scratchUsed {
+ scratchUsed = v.Op.UsesScratch()
+ }
+
+ }
+ }
+
+ if f.Config.NeedsFpScratch && scratchUsed {
+ s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64])
+ }
+
+ sort.Sort(byStackVar(fn.Dcl))
+
+ // Reassign stack offsets of the locals that are used.
+ lastHasPtr := false
+ for i, n := range fn.Dcl {
+ if n.Op != ONAME || n.Class() != PAUTO {
+ continue
+ }
+ if !n.Name.Used() {
+ fn.Dcl = fn.Dcl[:i]
+ break
+ }
+
+ dowidth(n.Type)
+ w := n.Type.Width
+ if w >= thearch.MAXWIDTH || w < 0 {
+ Fatalf("bad width")
+ }
+ if w == 0 && lastHasPtr {
+ // Pad between a pointer-containing object and a zero-sized object.
+ // This prevents a pointer to the zero-sized object from being interpreted
+ // as a pointer to the pointer-containing object (and causing it
+ // to be scanned when it shouldn't be). See issue 24993.
+ w = 1
+ }
+ s.stksize += w
+ s.stksize = Rnd(s.stksize, int64(n.Type.Align))
+ if n.Type.HasPointers() {
+ s.stkptrsize = s.stksize
+ lastHasPtr = true
+ } else {
+ lastHasPtr = false
+ }
+ if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
+ s.stksize = Rnd(s.stksize, int64(Widthptr))
+ }
+ n.Xoffset = -s.stksize
+ }
+
+ s.stksize = Rnd(s.stksize, int64(Widthreg))
+ s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
+}
+
+func funccompile(fn *Node) {
+ if Curfn != nil {
+ Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym)
+ }
+
+ if fn.Type == nil {
+ if nerrors == 0 {
+ Fatalf("funccompile missing type")
+ }
+ return
+ }
+
+ // assign parameter offsets
+ dowidth(fn.Type)
+
+ if fn.Nbody.Len() == 0 {
+ // Initialize ABI wrappers if necessary.
+ fn.Func.initLSym(false)
+ emitptrargsmap(fn)
+ return
+ }
+
+ dclcontext = PAUTO
+ Curfn = fn
+
+ compile(fn)
+
+ Curfn = nil
+ dclcontext = PEXTERN
+}
+
+func compile(fn *Node) {
+ saveerrors()
+
+ order(fn)
+ if nerrors != 0 {
+ return
+ }
+
+ // Set up the function's LSym early to avoid data races with the assemblers.
+ // Do this before walk, as walk needs the LSym to set attributes/relocations
+ // (e.g. in markTypeUsedInInterface).
+ fn.Func.initLSym(true)
+
+ walk(fn)
+ if nerrors != 0 {
+ return
+ }
+ if instrumenting {
+ instrument(fn)
+ }
+
+ // From this point, there should be no uses of Curfn. Enforce that.
+ Curfn = nil
+
+ if fn.funcname() == "_" {
+ // We don't need to generate code for this function, just report errors in its body.
+ // At this point we've generated any errors needed.
+ // (Beyond here we generate only non-spec errors, like "stack frame too large".)
+ // See issue 29870.
+ return
+ }
+
+ // Make sure type syms are declared for all types that might
+ // be types of stack objects. We need to do this here
+ // because symbols must be allocated before the parallel
+ // phase of the compiler.
+ for _, n := range fn.Func.Dcl {
+ switch n.Class() {
+ case PPARAM, PPARAMOUT, PAUTO:
+ if livenessShouldTrack(n) && n.Name.Addrtaken() {
+ dtypesym(n.Type)
+ // Also make sure we allocate a linker symbol
+ // for the stack object data, for the same reason.
+ if fn.Func.lsym.Func().StackObjects == nil {
+ fn.Func.lsym.Func().StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj")
+ }
+ }
+ }
+ }
+
+ if compilenow(fn) {
+ compileSSA(fn, 0)
+ } else {
+ compilequeue = append(compilequeue, fn)
+ }
+}
+
+// compilenow reports whether to compile immediately.
+// If functions are not compiled immediately,
+// they are enqueued in compilequeue,
+// which is drained by compileFunctions.
+func compilenow(fn *Node) bool {
+ // Issue 38068: if this function is a method AND an inline
+ // candidate AND was not inlined (yet), put it onto the compile
+ // queue instead of compiling it immediately. This is in case we
+ // wind up inlining it into a method wrapper that is generated by
+ // compiling a function later on in the xtop list.
+ if fn.IsMethod() && isInlinableButNotInlined(fn) {
+ return false
+ }
+ return nBackendWorkers == 1 && Debug_compilelater == 0
+}
+
+// isInlinableButNotInlined returns true if 'fn' was marked as an
+// inline candidate but then never inlined (presumably because we
+// found no call sites).
+func isInlinableButNotInlined(fn *Node) bool {
+ if fn.Func.Nname.Func.Inl == nil {
+ return false
+ }
+ if fn.Sym == nil {
+ return true
+ }
+ return !fn.Sym.Linksym().WasInlined()
+}
+
+const maxStackSize = 1 << 30
+
+// compileSSA builds an SSA backend function,
+// uses it to generate a plist,
+// and flushes that plist to machine code.
+// worker indicates which of the backend workers is doing the processing.
+func compileSSA(fn *Node, worker int) {
+ f := buildssa(fn, worker)
+ // Note: check arg size to fix issue 25507.
+ if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize {
+ largeStackFramesMu.Lock()
+ largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos})
+ largeStackFramesMu.Unlock()
+ return
+ }
+ pp := newProgs(fn, worker)
+ defer pp.Free()
+ genssa(f, pp)
+ // Check frame size again.
+ // The check above included only the space needed for local variables.
+ // After genssa, the space needed includes local variables and the callee arg region.
+ // We must do this check prior to calling pp.Flush.
+ // If there are any oversized stack frames,
+ // the assembler may emit inscrutable complaints about invalid instructions.
+ if pp.Text.To.Offset >= maxStackSize {
+ largeStackFramesMu.Lock()
+ locals := f.Frontend().(*ssafn).stksize
+ largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos})
+ largeStackFramesMu.Unlock()
+ return
+ }
+
+ pp.Flush() // assemble, fill in boilerplate, etc.
+ // fieldtrack must be called after pp.Flush. See issue 20014.
+ fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack)
+}
+
+func init() {
+ if race.Enabled {
+ rand.Seed(time.Now().UnixNano())
+ }
+}
+
+// compileFunctions compiles all functions in compilequeue.
+// It fans out nBackendWorkers to do the work
+// and waits for them to complete.
+func compileFunctions() {
+ if len(compilequeue) != 0 {
+ sizeCalculationDisabled = true // not safe to calculate sizes concurrently
+ if race.Enabled {
+ // Randomize compilation order to try to shake out races.
+ tmp := make([]*Node, len(compilequeue))
+ perm := rand.Perm(len(compilequeue))
+ for i, v := range perm {
+ tmp[v] = compilequeue[i]
+ }
+ copy(compilequeue, tmp)
+ } else {
+ // Compile the longest functions first,
+ // since they're most likely to be the slowest.
+ // This helps avoid stragglers.
+ sort.Slice(compilequeue, func(i, j int) bool {
+ return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len()
+ })
+ }
+ var wg sync.WaitGroup
+ Ctxt.InParallel = true
+ c := make(chan *Node, nBackendWorkers)
+ for i := 0; i < nBackendWorkers; i++ {
+ wg.Add(1)
+ go func(worker int) {
+ for fn := range c {
+ compileSSA(fn, worker)
+ }
+ wg.Done()
+ }(i)
+ }
+ for _, fn := range compilequeue {
+ c <- fn
+ }
+ close(c)
+ compilequeue = nil
+ wg.Wait()
+ Ctxt.InParallel = false
+ sizeCalculationDisabled = false
+ }
+}
+
+func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
+ fn := curfn.(*Node)
+ if fn.Func.Nname != nil {
+ if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
+ Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
+ }
+ }
+
+ var apdecls []*Node
+ // Populate decls for fn.
+ for _, n := range fn.Func.Dcl {
+ if n.Op != ONAME { // might be OTYPE or OLITERAL
+ continue
+ }
+ switch n.Class() {
+ case PAUTO:
+ if !n.Name.Used() {
+ // Text == nil -> generating abstract function
+ if fnsym.Func().Text != nil {
+ Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
+ }
+ continue
+ }
+ case PPARAM, PPARAMOUT:
+ default:
+ continue
+ }
+ apdecls = append(apdecls, n)
+ fnsym.Func().RecordAutoType(ngotype(n).Linksym())
+ }
+
+ decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls)
+
+ // For each type referenced by the functions auto vars but not
+ // already referenced by a dwarf var, attach a dummy relocation to
+ // the function symbol to insure that the type included in DWARF
+ // processing during linking.
+ typesyms := []*obj.LSym{}
+ for t, _ := range fnsym.Func().Autot {
+ typesyms = append(typesyms, t)
+ }
+ sort.Sort(obj.BySymName(typesyms))
+ for _, sym := range typesyms {
+ r := obj.Addrel(infosym)
+ r.Sym = sym
+ r.Type = objabi.R_USETYPE
+ }
+ fnsym.Func().Autot = nil
+
+ var varScopes []ScopeID
+ for _, decl := range decls {
+ pos := declPos(decl)
+ varScopes = append(varScopes, findScope(fn.Func.Marks, pos))
+ }
+
+ scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
+ var inlcalls dwarf.InlCalls
+ if genDwarfInline > 0 {
+ inlcalls = assembleInlines(fnsym, dwarfVars)
+ }
+ return scopes, inlcalls
+}
+
+func declPos(decl *Node) src.XPos {
+ if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) {
+ // It's not clear which position is correct for captured variables here:
+ // * decl.Pos is the wrong position for captured variables, in the inner
+ // function, but it is the right position in the outer function.
+ // * decl.Name.Defn is nil for captured variables that were arguments
+ // on the outer function, however the decl.Pos for those seems to be
+ // correct.
+ // * decl.Name.Defn is the "wrong" thing for variables declared in the
+ // header of a type switch, it's their position in the header, rather
+ // than the position of the case statement. In principle this is the
+ // right thing, but here we prefer the latter because it makes each
+ // instance of the header variable local to the lexical block of its
+ // case statement.
+ // This code is probably wrong for type switch variables that are also
+ // captured.
+ return decl.Name.Defn.Pos
+ }
+ return decl.Pos
+}
+
+// createSimpleVars creates a DWARF entry for every variable declared in the
+// function, claiming that they are permanently on the stack.
+func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
+ var vars []*dwarf.Var
+ var decls []*Node
+ selected := make(map[*Node]bool)
+ for _, n := range apDecls {
+ if n.IsAutoTmp() {
+ continue
+ }
+
+ decls = append(decls, n)
+ vars = append(vars, createSimpleVar(fnsym, n))
+ selected[n] = true
+ }
+ return decls, vars, selected
+}
+
+func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
+ var abbrev int
+ offs := n.Xoffset
+
+ switch n.Class() {
+ case PAUTO:
+ abbrev = dwarf.DW_ABRV_AUTO
+ if Ctxt.FixedFrameSize() == 0 {
+ offs -= int64(Widthptr)
+ }
+ if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
+ // There is a word space for FP on ARM64 even if the frame pointer is disabled
+ offs -= int64(Widthptr)
+ }
+
+ case PPARAM, PPARAMOUT:
+ abbrev = dwarf.DW_ABRV_PARAM
+ offs += Ctxt.FixedFrameSize()
+ default:
+ Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
+ }
+
+ typename := dwarf.InfoPrefix + typesymname(n.Type)
+ delete(fnsym.Func().Autot, ngotype(n).Linksym())
+ inlIndex := 0
+ if genDwarfInline > 1 {
+ if n.Name.InlFormal() || n.Name.InlLocal() {
+ inlIndex = posInlIndex(n.Pos) + 1
+ if n.Name.InlFormal() {
+ abbrev = dwarf.DW_ABRV_PARAM
+ }
+ }
+ }
+ declpos := Ctxt.InnermostPos(declPos(n))
+ return &dwarf.Var{
+ Name: n.Sym.Name,
+ IsReturnValue: n.Class() == PPARAMOUT,
+ IsInlFormal: n.Name.InlFormal(),
+ Abbrev: abbrev,
+ StackOffset: int32(offs),
+ Type: Ctxt.Lookup(typename),
+ DeclFile: declpos.RelFilename(),
+ DeclLine: declpos.RelLine(),
+ DeclCol: declpos.Col(),
+ InlIndex: int32(inlIndex),
+ ChildIndex: -1,
+ }
+}
+
+// createComplexVars creates recomposed DWARF vars with location lists,
+// suitable for describing optimized code.
+func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) {
+ debugInfo := fn.DebugInfo
+
+ // Produce a DWARF variable entry for each user variable.
+ var decls []*Node
+ var vars []*dwarf.Var
+ ssaVars := make(map[*Node]bool)
+
+ for varID, dvar := range debugInfo.Vars {
+ n := dvar.(*Node)
+ ssaVars[n] = true
+ for _, slot := range debugInfo.VarSlots[varID] {
+ ssaVars[debugInfo.Slots[slot].N.(*Node)] = true
+ }
+
+ if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
+ decls = append(decls, n)
+ vars = append(vars, dvar)
+ }
+ }
+
+ return decls, vars, ssaVars
+}
+
+// createDwarfVars process fn, returning a list of DWARF variables and the
+// Nodes they represent.
+func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dwarf.Var) {
+ // Collect a raw list of DWARF vars.
+ var vars []*dwarf.Var
+ var decls []*Node
+ var selected map[*Node]bool
+ if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil {
+ decls, vars, selected = createComplexVars(fnsym, fn)
+ } else {
+ decls, vars, selected = createSimpleVars(fnsym, apDecls)
+ }
+
+ dcl := apDecls
+ if fnsym.WasInlined() {
+ dcl = preInliningDcls(fnsym)
+ }
+
+ // If optimization is enabled, the list above will typically be
+ // missing some of the original pre-optimization variables in the
+ // function (they may have been promoted to registers, folded into
+ // constants, dead-coded away, etc). Input arguments not eligible
+ // for SSA optimization are also missing. Here we add back in entries
+ // for selected missing vars. Note that the recipe below creates a
+ // conservative location. The idea here is that we want to
+ // communicate to the user that "yes, there is a variable named X
+ // in this function, but no, I don't have enough information to
+ // reliably report its contents."
+ // For non-SSA-able arguments, however, the correct information
+ // is known -- they have a single home on the stack.
+ for _, n := range dcl {
+ if _, found := selected[n]; found {
+ continue
+ }
+ c := n.Sym.Name[0]
+ if c == '.' || n.Type.IsUntyped() {
+ continue
+ }
+ if n.Class() == PPARAM && !canSSAType(n.Type) {
+ // SSA-able args get location lists, and may move in and
+ // out of registers, so those are handled elsewhere.
+ // Autos and named output params seem to get handled
+ // with VARDEF, which creates location lists.
+ // Args not of SSA-able type are treated here; they
+ // are homed on the stack in a single place for the
+ // entire call.
+ vars = append(vars, createSimpleVar(fnsym, n))
+ decls = append(decls, n)
+ continue
+ }
+ typename := dwarf.InfoPrefix + typesymname(n.Type)
+ decls = append(decls, n)
+ abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
+ isReturnValue := (n.Class() == PPARAMOUT)
+ if n.Class() == PPARAM || n.Class() == PPARAMOUT {
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ } else if n.Class() == PAUTOHEAP {
+ // If dcl in question has been promoted to heap, do a bit
+ // of extra work to recover original class (auto or param);
+ // see issue 30908. This insures that we get the proper
+ // signature in the abstract function DIE, but leaves a
+ // misleading location for the param (we want pointer-to-heap
+ // and not stack).
+ // TODO(thanm): generate a better location expression
+ stackcopy := n.Name.Param.Stackcopy
+ if stackcopy != nil && (stackcopy.Class() == PPARAM || stackcopy.Class() == PPARAMOUT) {
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ isReturnValue = (stackcopy.Class() == PPARAMOUT)
+ }
+ }
+ inlIndex := 0
+ if genDwarfInline > 1 {
+ if n.Name.InlFormal() || n.Name.InlLocal() {
+ inlIndex = posInlIndex(n.Pos) + 1
+ if n.Name.InlFormal() {
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ }
+ }
+ }
+ declpos := Ctxt.InnermostPos(n.Pos)
+ vars = append(vars, &dwarf.Var{
+ Name: n.Sym.Name,
+ IsReturnValue: isReturnValue,
+ Abbrev: abbrev,
+ StackOffset: int32(n.Xoffset),
+ Type: Ctxt.Lookup(typename),
+ DeclFile: declpos.RelFilename(),
+ DeclLine: declpos.RelLine(),
+ DeclCol: declpos.Col(),
+ InlIndex: int32(inlIndex),
+ ChildIndex: -1,
+ })
+ // Record go type of to insure that it gets emitted by the linker.
+ fnsym.Func().RecordAutoType(ngotype(n).Linksym())
+ }
+
+ return decls, vars
+}
+
+// Given a function that was inlined at some point during the
+// compilation, return a sorted list of nodes corresponding to the
+// autos/locals in that function prior to inlining. If this is a
+// function that is not local to the package being compiled, then the
+// names of the variables may have been "versioned" to avoid conflicts
+// with local vars; disregard this versioning when sorting.
+func preInliningDcls(fnsym *obj.LSym) []*Node {
+ fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node)
+ var rdcl []*Node
+ for _, n := range fn.Func.Inl.Dcl {
+ c := n.Sym.Name[0]
+ // Avoid reporting "_" parameters, since if there are more than
+ // one, it can result in a collision later on, as in #23179.
+ if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() {
+ continue
+ }
+ rdcl = append(rdcl, n)
+ }
+ return rdcl
+}
+
+// stackOffset returns the stack location of a LocalSlot relative to the
+// stack pointer, suitable for use in a DWARF location entry. This has nothing
+// to do with its offset in the user variable.
+func stackOffset(slot ssa.LocalSlot) int32 {
+ n := slot.N.(*Node)
+ var base int64
+ switch n.Class() {
+ case PAUTO:
+ if Ctxt.FixedFrameSize() == 0 {
+ base -= int64(Widthptr)
+ }
+ if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
+ // There is a word space for FP on ARM64 even if the frame pointer is disabled
+ base -= int64(Widthptr)
+ }
+ case PPARAM, PPARAMOUT:
+ base += Ctxt.FixedFrameSize()
+ }
+ return int32(base + n.Xoffset + slot.Off)
+}
+
+// createComplexVar builds a single DWARF variable entry and location list.
+func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
+ debug := fn.DebugInfo
+ n := debug.Vars[varID].(*Node)
+
+ var abbrev int
+ switch n.Class() {
+ case PAUTO:
+ abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
+ case PPARAM, PPARAMOUT:
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ default:
+ return nil
+ }
+
+ gotype := ngotype(n).Linksym()
+ delete(fnsym.Func().Autot, gotype)
+ typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
+ inlIndex := 0
+ if genDwarfInline > 1 {
+ if n.Name.InlFormal() || n.Name.InlLocal() {
+ inlIndex = posInlIndex(n.Pos) + 1
+ if n.Name.InlFormal() {
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ }
+ }
+ }
+ declpos := Ctxt.InnermostPos(n.Pos)
+ dvar := &dwarf.Var{
+ Name: n.Sym.Name,
+ IsReturnValue: n.Class() == PPARAMOUT,
+ IsInlFormal: n.Name.InlFormal(),
+ Abbrev: abbrev,
+ Type: Ctxt.Lookup(typename),
+ // The stack offset is used as a sorting key, so for decomposed
+ // variables just give it the first one. It's not used otherwise.
+ // This won't work well if the first slot hasn't been assigned a stack
+ // location, but it's not obvious how to do better.
+ StackOffset: stackOffset(debug.Slots[debug.VarSlots[varID][0]]),
+ DeclFile: declpos.RelFilename(),
+ DeclLine: declpos.RelLine(),
+ DeclCol: declpos.Col(),
+ InlIndex: int32(inlIndex),
+ ChildIndex: -1,
+ }
+ list := debug.LocationLists[varID]
+ if len(list) != 0 {
+ dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
+ debug.PutLocationList(list, Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
+ }
+ }
+ return dvar
+}
+
+// fieldtrack adds R_USEFIELD relocations to fnsym to record any
+// struct fields that it used.
+func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
+ if fnsym == nil {
+ return
+ }
+ if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
+ return
+ }
+
+ trackSyms := make([]*types.Sym, 0, len(tracked))
+ for sym := range tracked {
+ trackSyms = append(trackSyms, sym)
+ }
+ sort.Sort(symByName(trackSyms))
+ for _, sym := range trackSyms {
+ r := obj.Addrel(fnsym)
+ r.Sym = sym.Linksym()
+ r.Type = objabi.R_USEFIELD
+ }
+}
+
+type symByName []*types.Sym
+
+func (a symByName) Len() int { return len(a) }
+func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
+func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go
new file mode 100644
index 0000000..b1db298
--- /dev/null
+++ b/src/cmd/compile/internal/gc/pgen_test.go
@@ -0,0 +1,196 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func typeWithoutPointers() *types.Type {
+ t := types.New(TSTRUCT)
+ f := &types.Field{Type: types.New(TINT)}
+ t.SetFields([]*types.Field{f})
+ return t
+}
+
+func typeWithPointers() *types.Type {
+ t := types.New(TSTRUCT)
+ f := &types.Field{Type: types.NewPtr(types.New(TINT))}
+ t.SetFields([]*types.Field{f})
+ return t
+}
+
+func markUsed(n *Node) *Node {
+ n.Name.SetUsed(true)
+ return n
+}
+
+func markNeedZero(n *Node) *Node {
+ n.Name.SetNeedzero(true)
+ return n
+}
+
+func nodeWithClass(n Node, c Class) *Node {
+ n.SetClass(c)
+ n.Name = new(Name)
+ return &n
+}
+
+// Test all code paths for cmpstackvarlt.
+func TestCmpstackvar(t *testing.T) {
+ testdata := []struct {
+ a, b *Node
+ lt bool
+ }{
+ {
+ nodeWithClass(Node{}, PAUTO),
+ nodeWithClass(Node{}, PFUNC),
+ false,
+ },
+ {
+ nodeWithClass(Node{}, PFUNC),
+ nodeWithClass(Node{}, PAUTO),
+ true,
+ },
+ {
+ nodeWithClass(Node{Xoffset: 0}, PFUNC),
+ nodeWithClass(Node{Xoffset: 10}, PFUNC),
+ true,
+ },
+ {
+ nodeWithClass(Node{Xoffset: 20}, PFUNC),
+ nodeWithClass(Node{Xoffset: 10}, PFUNC),
+ false,
+ },
+ {
+ nodeWithClass(Node{Xoffset: 10}, PFUNC),
+ nodeWithClass(Node{Xoffset: 10}, PFUNC),
+ false,
+ },
+ {
+ nodeWithClass(Node{Xoffset: 10}, PPARAM),
+ nodeWithClass(Node{Xoffset: 20}, PPARAMOUT),
+ true,
+ },
+ {
+ nodeWithClass(Node{Xoffset: 10}, PPARAMOUT),
+ nodeWithClass(Node{Xoffset: 20}, PPARAM),
+ true,
+ },
+ {
+ markUsed(nodeWithClass(Node{}, PAUTO)),
+ nodeWithClass(Node{}, PAUTO),
+ true,
+ },
+ {
+ nodeWithClass(Node{}, PAUTO),
+ markUsed(nodeWithClass(Node{}, PAUTO)),
+ false,
+ },
+ {
+ nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
+ nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
+ false,
+ },
+ {
+ nodeWithClass(Node{Type: typeWithPointers()}, PAUTO),
+ nodeWithClass(Node{Type: typeWithoutPointers()}, PAUTO),
+ true,
+ },
+ {
+ markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
+ nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
+ true,
+ },
+ {
+ nodeWithClass(Node{Type: &types.Type{}, Name: &Name{}}, PAUTO),
+ markNeedZero(nodeWithClass(Node{Type: &types.Type{}}, PAUTO)),
+ false,
+ },
+ {
+ nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
+ false,
+ },
+ {
+ nodeWithClass(Node{Type: &types.Type{Width: 2}, Name: &Name{}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{Width: 1}, Name: &Name{}}, PAUTO),
+ true,
+ },
+ {
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
+ true,
+ },
+ {
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
+ false,
+ },
+ {
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
+ false,
+ },
+ }
+ for _, d := range testdata {
+ got := cmpstackvarlt(d.a, d.b)
+ if got != d.lt {
+ t.Errorf("want %#v < %#v", d.a, d.b)
+ }
+ // If we expect a < b to be true, check that b < a is false.
+ if d.lt && cmpstackvarlt(d.b, d.a) {
+ t.Errorf("unexpected %#v < %#v", d.b, d.a)
+ }
+ }
+}
+
+func TestStackvarSort(t *testing.T) {
+ inp := []*Node{
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
+ nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
+ nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
+ nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
+ markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
+ nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
+ markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
+ nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
+ }
+ want := []*Node{
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
+ nodeWithClass(Node{Xoffset: 0, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
+ nodeWithClass(Node{Xoffset: 10, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
+ nodeWithClass(Node{Xoffset: 20, Type: &types.Type{}, Sym: &types.Sym{}}, PFUNC),
+ markUsed(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
+ markNeedZero(nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO)),
+ nodeWithClass(Node{Type: &types.Type{Width: 2}, Sym: &types.Sym{}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{Width: 1}, Sym: &types.Sym{}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "abc"}}, PAUTO),
+ nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
+ nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
+ }
+ sort.Sort(byStackVar(inp))
+ if !reflect.DeepEqual(want, inp) {
+ t.Error("sort failed")
+ for i := range inp {
+ g := inp[i]
+ w := want[i]
+ eq := reflect.DeepEqual(w, g)
+ if !eq {
+ t.Log(i, w, g)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/gc/phi.go
new file mode 100644
index 0000000..5218cd0
--- /dev/null
+++ b/src/cmd/compile/internal/gc/phi.go
@@ -0,0 +1,538 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "container/heap"
+ "fmt"
+)
+
+// This file contains the algorithm to place phi nodes in a function.
+// For small functions, we use Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau.
+// https://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
+// For large functions, we use Sreedhar & Gao: A Linear Time Algorithm for Placing Φ-Nodes.
+// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.8.1979&rep=rep1&type=pdf
+
+const smallBlocks = 500
+
+const debugPhi = false
+
+// insertPhis finds all the places in the function where a phi is
+// necessary and inserts them.
+// Uses FwdRef ops to find all uses of variables, and s.defvars to find
+// all definitions.
+// Phi values are inserted, and all FwdRefs are changed to a Copy
+// of the appropriate phi or definition.
+// TODO: make this part of cmd/compile/internal/ssa somehow?
+func (s *state) insertPhis() {
+ if len(s.f.Blocks) <= smallBlocks {
+ sps := simplePhiState{s: s, f: s.f, defvars: s.defvars}
+ sps.insertPhis()
+ return
+ }
+ ps := phiState{s: s, f: s.f, defvars: s.defvars}
+ ps.insertPhis()
+}
+
+type phiState struct {
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ defvars []map[*Node]*ssa.Value // defined variables at end of each block
+
+ varnum map[*Node]int32 // variable numbering
+
+ // properties of the dominator tree
+ idom []*ssa.Block // dominator parents
+ tree []domBlock // dominator child+sibling
+ level []int32 // level in dominator tree (0 = root or unreachable, 1 = children of root, ...)
+
+ // scratch locations
+ priq blockHeap // priority queue of blocks, higher level (toward leaves) = higher priority
+ q []*ssa.Block // inner loop queue
+ queued *sparseSet // has been put in q
+ hasPhi *sparseSet // has a phi
+ hasDef *sparseSet // has a write of the variable we're processing
+
+ // miscellaneous
+ placeholder *ssa.Value // dummy value to use as a "not set yet" placeholder.
+}
+
+func (s *phiState) insertPhis() {
+ if debugPhi {
+ fmt.Println(s.f.String())
+ }
+
+ // Find all the variables for which we need to match up reads & writes.
+ // This step prunes any basic-block-only variables from consideration.
+ // Generate a numbering for these variables.
+ s.varnum = map[*Node]int32{}
+ var vars []*Node
+ var vartypes []*types.Type
+ for _, b := range s.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != ssa.OpFwdRef {
+ continue
+ }
+ var_ := v.Aux.(*Node)
+
+ // Optimization: look back 1 block for the definition.
+ if len(b.Preds) == 1 {
+ c := b.Preds[0].Block()
+ if w := s.defvars[c.ID][var_]; w != nil {
+ v.Op = ssa.OpCopy
+ v.Aux = nil
+ v.AddArg(w)
+ continue
+ }
+ }
+
+ if _, ok := s.varnum[var_]; ok {
+ continue
+ }
+ s.varnum[var_] = int32(len(vartypes))
+ if debugPhi {
+ fmt.Printf("var%d = %v\n", len(vartypes), var_)
+ }
+ vars = append(vars, var_)
+ vartypes = append(vartypes, v.Type)
+ }
+ }
+
+ if len(vartypes) == 0 {
+ return
+ }
+
+ // Find all definitions of the variables we need to process.
+ // defs[n] contains all the blocks in which variable number n is assigned.
+ defs := make([][]*ssa.Block, len(vartypes))
+ for _, b := range s.f.Blocks {
+ for var_ := range s.defvars[b.ID] { // TODO: encode defvars some other way (explicit ops)? make defvars[n] a slice instead of a map.
+ if n, ok := s.varnum[var_]; ok {
+ defs[n] = append(defs[n], b)
+ }
+ }
+ }
+
+ // Make dominator tree.
+ s.idom = s.f.Idom()
+ s.tree = make([]domBlock, s.f.NumBlocks())
+ for _, b := range s.f.Blocks {
+ p := s.idom[b.ID]
+ if p != nil {
+ s.tree[b.ID].sibling = s.tree[p.ID].firstChild
+ s.tree[p.ID].firstChild = b
+ }
+ }
+ // Compute levels in dominator tree.
+ // With parent pointers we can do a depth-first walk without
+ // any auxiliary storage.
+ s.level = make([]int32, s.f.NumBlocks())
+ b := s.f.Entry
+levels:
+ for {
+ if p := s.idom[b.ID]; p != nil {
+ s.level[b.ID] = s.level[p.ID] + 1
+ if debugPhi {
+ fmt.Printf("level %s = %d\n", b, s.level[b.ID])
+ }
+ }
+ if c := s.tree[b.ID].firstChild; c != nil {
+ b = c
+ continue
+ }
+ for {
+ if c := s.tree[b.ID].sibling; c != nil {
+ b = c
+ continue levels
+ }
+ b = s.idom[b.ID]
+ if b == nil {
+ break levels
+ }
+ }
+ }
+
+ // Allocate scratch locations.
+ s.priq.level = s.level
+ s.q = make([]*ssa.Block, 0, s.f.NumBlocks())
+ s.queued = newSparseSet(s.f.NumBlocks())
+ s.hasPhi = newSparseSet(s.f.NumBlocks())
+ s.hasDef = newSparseSet(s.f.NumBlocks())
+ s.placeholder = s.s.entryNewValue0(ssa.OpUnknown, types.TypeInvalid)
+
+ // Generate phi ops for each variable.
+ for n := range vartypes {
+ s.insertVarPhis(n, vars[n], defs[n], vartypes[n])
+ }
+
+ // Resolve FwdRefs to the correct write or phi.
+ s.resolveFwdRefs()
+
+ // Erase variable numbers stored in AuxInt fields of phi ops. They are no longer needed.
+ for _, b := range s.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == ssa.OpPhi {
+ v.AuxInt = 0
+ }
+ }
+ }
+}
+
+func (s *phiState) insertVarPhis(n int, var_ *Node, defs []*ssa.Block, typ *types.Type) {
+ priq := &s.priq
+ q := s.q
+ queued := s.queued
+ queued.clear()
+ hasPhi := s.hasPhi
+ hasPhi.clear()
+ hasDef := s.hasDef
+ hasDef.clear()
+
+ // Add defining blocks to priority queue.
+ for _, b := range defs {
+ priq.a = append(priq.a, b)
+ hasDef.add(b.ID)
+ if debugPhi {
+ fmt.Printf("def of var%d in %s\n", n, b)
+ }
+ }
+ heap.Init(priq)
+
+ // Visit blocks defining variable n, from deepest to shallowest.
+ for len(priq.a) > 0 {
+ currentRoot := heap.Pop(priq).(*ssa.Block)
+ if debugPhi {
+ fmt.Printf("currentRoot %s\n", currentRoot)
+ }
+ // Walk subtree below definition.
+ // Skip subtrees we've done in previous iterations.
+ // Find edges exiting tree dominated by definition (the dominance frontier).
+ // Insert phis at target blocks.
+ if queued.contains(currentRoot.ID) {
+ s.s.Fatalf("root already in queue")
+ }
+ q = append(q, currentRoot)
+ queued.add(currentRoot.ID)
+ for len(q) > 0 {
+ b := q[len(q)-1]
+ q = q[:len(q)-1]
+ if debugPhi {
+ fmt.Printf(" processing %s\n", b)
+ }
+
+ currentRootLevel := s.level[currentRoot.ID]
+ for _, e := range b.Succs {
+ c := e.Block()
+ // TODO: if the variable is dead at c, skip it.
+ if s.level[c.ID] > currentRootLevel {
+ // a D-edge, or an edge whose target is in currentRoot's subtree.
+ continue
+ }
+ if hasPhi.contains(c.ID) {
+ continue
+ }
+ // Add a phi to block c for variable n.
+ hasPhi.add(c.ID)
+ v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right?
+ // Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building.
+ s.s.addNamedValue(var_, v)
+ for range c.Preds {
+ v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs.
+ }
+ if debugPhi {
+ fmt.Printf("new phi for var%d in %s: %s\n", n, c, v)
+ }
+ if !hasDef.contains(c.ID) {
+ // There's now a new definition of this variable in block c.
+ // Add it to the priority queue to explore.
+ heap.Push(priq, c)
+ hasDef.add(c.ID)
+ }
+ }
+
+ // Visit children if they have not been visited yet.
+ for c := s.tree[b.ID].firstChild; c != nil; c = s.tree[c.ID].sibling {
+ if !queued.contains(c.ID) {
+ q = append(q, c)
+ queued.add(c.ID)
+ }
+ }
+ }
+ }
+}
+
+// resolveFwdRefs links all FwdRef uses up to their nearest dominating definition.
+func (s *phiState) resolveFwdRefs() {
+ // Do a depth-first walk of the dominator tree, keeping track
+ // of the most-recently-seen value for each variable.
+
+ // Map from variable ID to SSA value at the current point of the walk.
+ values := make([]*ssa.Value, len(s.varnum))
+ for i := range values {
+ values[i] = s.placeholder
+ }
+
+ // Stack of work to do.
+ type stackEntry struct {
+ b *ssa.Block // block to explore
+
+ // variable/value pair to reinstate on exit
+ n int32 // variable ID
+ v *ssa.Value
+
+ // Note: only one of b or n,v will be set.
+ }
+ var stk []stackEntry
+
+ stk = append(stk, stackEntry{b: s.f.Entry})
+ for len(stk) > 0 {
+ work := stk[len(stk)-1]
+ stk = stk[:len(stk)-1]
+
+ b := work.b
+ if b == nil {
+ // On exit from a block, this case will undo any assignments done below.
+ values[work.n] = work.v
+ continue
+ }
+
+ // Process phis as new defs. They come before FwdRefs in this block.
+ for _, v := range b.Values {
+ if v.Op != ssa.OpPhi {
+ continue
+ }
+ n := int32(v.AuxInt)
+ // Remember the old assignment so we can undo it when we exit b.
+ stk = append(stk, stackEntry{n: n, v: values[n]})
+ // Record the new assignment.
+ values[n] = v
+ }
+
+ // Replace a FwdRef op with the current incoming value for its variable.
+ for _, v := range b.Values {
+ if v.Op != ssa.OpFwdRef {
+ continue
+ }
+ n := s.varnum[v.Aux.(*Node)]
+ v.Op = ssa.OpCopy
+ v.Aux = nil
+ v.AddArg(values[n])
+ }
+
+ // Establish values for variables defined in b.
+ for var_, v := range s.defvars[b.ID] {
+ n, ok := s.varnum[var_]
+ if !ok {
+ // some variable not live across a basic block boundary.
+ continue
+ }
+ // Remember the old assignment so we can undo it when we exit b.
+ stk = append(stk, stackEntry{n: n, v: values[n]})
+ // Record the new assignment.
+ values[n] = v
+ }
+
+ // Replace phi args in successors with the current incoming value.
+ for _, e := range b.Succs {
+ c, i := e.Block(), e.Index()
+ for j := len(c.Values) - 1; j >= 0; j-- {
+ v := c.Values[j]
+ if v.Op != ssa.OpPhi {
+ break // All phis will be at the end of the block during phi building.
+ }
+ // Only set arguments that have been resolved.
+ // For very wide CFGs, this significantly speeds up phi resolution.
+ // See golang.org/issue/8225.
+ if w := values[v.AuxInt]; w.Op != ssa.OpUnknown {
+ v.SetArg(i, w)
+ }
+ }
+ }
+
+ // Walk children in dominator tree.
+ for c := s.tree[b.ID].firstChild; c != nil; c = s.tree[c.ID].sibling {
+ stk = append(stk, stackEntry{b: c})
+ }
+ }
+}
+
+// domBlock contains extra per-block information to record the dominator tree.
+type domBlock struct {
+ firstChild *ssa.Block // first child of block in dominator tree
+ sibling *ssa.Block // next child of parent in dominator tree
+}
+
+// A block heap is used as a priority queue to implement the PiggyBank
+// from Sreedhar and Gao. That paper uses an array which is better
+// asymptotically but worse in the common case when the PiggyBank
+// holds a sparse set of blocks.
+type blockHeap struct {
+ a []*ssa.Block // block IDs in heap
+ level []int32 // depth in dominator tree (static, used for determining priority)
+}
+
+func (h *blockHeap) Len() int { return len(h.a) }
+func (h *blockHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] }
+
+func (h *blockHeap) Push(x interface{}) {
+ v := x.(*ssa.Block)
+ h.a = append(h.a, v)
+}
+func (h *blockHeap) Pop() interface{} {
+ old := h.a
+ n := len(old)
+ x := old[n-1]
+ h.a = old[:n-1]
+ return x
+}
+func (h *blockHeap) Less(i, j int) bool {
+ return h.level[h.a[i].ID] > h.level[h.a[j].ID]
+}
+
+// TODO: stop walking the iterated domininance frontier when
+// the variable is dead. Maybe detect that by checking if the
+// node we're on is reverse dominated by all the reads?
+// Reverse dominated by the highest common successor of all the reads?
+
+// copy of ../ssa/sparseset.go
+// TODO: move this file to ../ssa, then use sparseSet there.
+type sparseSet struct {
+ dense []ssa.ID
+ sparse []int32
+}
+
+// newSparseSet returns a sparseSet that can represent
+// integers between 0 and n-1
+func newSparseSet(n int) *sparseSet {
+ return &sparseSet{dense: nil, sparse: make([]int32, n)}
+}
+
+func (s *sparseSet) contains(x ssa.ID) bool {
+ i := s.sparse[x]
+ return i < int32(len(s.dense)) && s.dense[i] == x
+}
+
+func (s *sparseSet) add(x ssa.ID) {
+ i := s.sparse[x]
+ if i < int32(len(s.dense)) && s.dense[i] == x {
+ return
+ }
+ s.dense = append(s.dense, x)
+ s.sparse[x] = int32(len(s.dense)) - 1
+}
+
+func (s *sparseSet) clear() {
+ s.dense = s.dense[:0]
+}
+
+// Variant to use for small functions.
+type simplePhiState struct {
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ fwdrefs []*ssa.Value // list of FwdRefs to be processed
+ defvars []map[*Node]*ssa.Value // defined variables at end of each block
+ reachable []bool // which blocks are reachable
+}
+
+func (s *simplePhiState) insertPhis() {
+ s.reachable = ssa.ReachableBlocks(s.f)
+
+ // Find FwdRef ops.
+ for _, b := range s.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != ssa.OpFwdRef {
+ continue
+ }
+ s.fwdrefs = append(s.fwdrefs, v)
+ var_ := v.Aux.(*Node)
+ if _, ok := s.defvars[b.ID][var_]; !ok {
+ s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
+ }
+ }
+ }
+
+ var args []*ssa.Value
+
+loop:
+ for len(s.fwdrefs) > 0 {
+ v := s.fwdrefs[len(s.fwdrefs)-1]
+ s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
+ b := v.Block
+ var_ := v.Aux.(*Node)
+ if b == s.f.Entry {
+ // No variable should be live at entry.
+ s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v)
+ }
+ if !s.reachable[b.ID] {
+ // This block is dead.
+ // It doesn't matter what we use here as long as it is well-formed.
+ v.Op = ssa.OpUnknown
+ v.Aux = nil
+ continue
+ }
+ // Find variable value on each predecessor.
+ args = args[:0]
+ for _, e := range b.Preds {
+ args = append(args, s.lookupVarOutgoing(e.Block(), v.Type, var_, v.Pos))
+ }
+
+ // Decide if we need a phi or not. We need a phi if there
+ // are two different args (which are both not v).
+ var w *ssa.Value
+ for _, a := range args {
+ if a == v {
+ continue // self-reference
+ }
+ if a == w {
+ continue // already have this witness
+ }
+ if w != nil {
+ // two witnesses, need a phi value
+ v.Op = ssa.OpPhi
+ v.AddArgs(args...)
+ v.Aux = nil
+ continue loop
+ }
+ w = a // save witness
+ }
+ if w == nil {
+ s.s.Fatalf("no witness for reachable phi %s", v)
+ }
+ // One witness. Make v a copy of w.
+ v.Op = ssa.OpCopy
+ v.Aux = nil
+ v.AddArg(w)
+ }
+}
+
+// lookupVarOutgoing finds the variable's value at the end of block b.
+func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ *Node, line src.XPos) *ssa.Value {
+ for {
+ if v := s.defvars[b.ID][var_]; v != nil {
+ return v
+ }
+ // The variable is not defined by b and we haven't looked it up yet.
+ // If b has exactly one predecessor, loop to look it up there.
+ // Otherwise, give up and insert a new FwdRef and resolve it later.
+ if len(b.Preds) != 1 {
+ break
+ }
+ b = b.Preds[0].Block()
+ if !s.reachable[b.ID] {
+ // This is rare; it happens with oddly interleaved infinite loops in dead code.
+ // See issue 19783.
+ break
+ }
+ }
+ // Generate a FwdRef for the variable and return that.
+ v := b.NewValue0A(line, ssa.OpFwdRef, t, var_)
+ s.defvars[b.ID][var_] = v
+ s.s.addNamedValue(var_, v)
+ s.fwdrefs = append(s.fwdrefs, v)
+ return v
+}
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
new file mode 100644
index 0000000..a48173e
--- /dev/null
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -0,0 +1,1321 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector liveness bitmap generation.
+
+// The command line flag -live causes this code to print debug information.
+// The levels are:
+//
+// -live (aka -live=1): print liveness lists as code warnings at safe points
+// -live=2: print an assembly listing with liveness annotations
+//
+// Each level includes the earlier output as well.
+
+package gc
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "crypto/md5"
+ "fmt"
+ "strings"
+)
+
+// OpVarDef is an annotation for the liveness analysis, marking a place
+// where a complete initialization (definition) of a variable begins.
+// Since the liveness analysis can see initialization of single-word
+// variables quite easy, OpVarDef is only needed for multi-word
+// variables satisfying isfat(n.Type). For simplicity though, buildssa
+// emits OpVarDef regardless of variable width.
+//
+// An 'OpVarDef x' annotation in the instruction stream tells the liveness
+// analysis to behave as though the variable x is being initialized at that
+// point in the instruction stream. The OpVarDef must appear before the
+// actual (multi-instruction) initialization, and it must also appear after
+// any uses of the previous value, if any. For example, if compiling:
+//
+// x = x[1:]
+//
+// it is important to generate code like:
+//
+// base, len, cap = pieces of x[1:]
+// OpVarDef x
+// x = {base, len, cap}
+//
+// If instead the generated code looked like:
+//
+// OpVarDef x
+// base, len, cap = pieces of x[1:]
+// x = {base, len, cap}
+//
+// then the liveness analysis would decide the previous value of x was
+// unnecessary even though it is about to be used by the x[1:] computation.
+// Similarly, if the generated code looked like:
+//
+// base, len, cap = pieces of x[1:]
+// x = {base, len, cap}
+// OpVarDef x
+//
+// then the liveness analysis will not preserve the new value of x, because
+// the OpVarDef appears to have "overwritten" it.
+//
+// OpVarDef is a bit of a kludge to work around the fact that the instruction
+// stream is working on single-word values but the liveness analysis
+// wants to work on individual variables, which might be multi-word
+// aggregates. It might make sense at some point to look into letting
+// the liveness analysis work on single-word values as well, although
+// there are complications around interface values, slices, and strings,
+// all of which cannot be treated as individual words.
+//
+// OpVarKill is the opposite of OpVarDef: it marks a value as no longer needed,
+// even if its address has been taken. That is, an OpVarKill annotation asserts
+// that its argument is certainly dead, for use when the liveness analysis
+// would not otherwise be able to deduce that fact.
+
+// TODO: get rid of OpVarKill here. It's useful for stack frame allocation
+// so the compiler can allocate two temps to the same location. Here it's now
+// useless, since the implementation of stack objects.
+
+// BlockEffects summarizes the liveness effects on an SSA block.
+type BlockEffects struct {
+ // Computed during Liveness.prologue using only the content of
+ // individual blocks:
+ //
+ // uevar: upward exposed variables (used before set in block)
+ // varkill: killed variables (set in block)
+ uevar bvec
+ varkill bvec
+
+ // Computed during Liveness.solve using control flow information:
+ //
+ // livein: variables live at block entry
+ // liveout: variables live at block exit
+ livein bvec
+ liveout bvec
+}
+
+// A collection of global state used by liveness analysis.
+type Liveness struct {
+ fn *Node
+ f *ssa.Func
+ vars []*Node
+ idx map[*Node]int32
+ stkptrsize int64
+
+ be []BlockEffects
+
+ // allUnsafe indicates that all points in this function are
+ // unsafe-points.
+ allUnsafe bool
+ // unsafePoints bit i is set if Value ID i is an unsafe-point
+ // (preemption is not allowed). Only valid if !allUnsafe.
+ unsafePoints bvec
+
+ // An array with a bit vector for each safe point in the
+ // current Block during Liveness.epilogue. Indexed in Value
+ // order for that block. Additionally, for the entry block
+ // livevars[0] is the entry bitmap. Liveness.compact moves
+ // these to stackMaps.
+ livevars []bvec
+
+ // livenessMap maps from safe points (i.e., CALLs) to their
+ // liveness map indexes.
+ livenessMap LivenessMap
+ stackMapSet bvecSet
+ stackMaps []bvec
+
+ cache progeffectscache
+}
+
+// LivenessMap maps from *ssa.Value to LivenessIndex.
+type LivenessMap struct {
+ vals map[ssa.ID]LivenessIndex
+ // The set of live, pointer-containing variables at the deferreturn
+ // call (only set when open-coded defers are used).
+ deferreturn LivenessIndex
+}
+
+func (m *LivenessMap) reset() {
+ if m.vals == nil {
+ m.vals = make(map[ssa.ID]LivenessIndex)
+ } else {
+ for k := range m.vals {
+ delete(m.vals, k)
+ }
+ }
+ m.deferreturn = LivenessDontCare
+}
+
+func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) {
+ m.vals[v.ID] = i
+}
+
+func (m LivenessMap) Get(v *ssa.Value) LivenessIndex {
+ // If v isn't in the map, then it's a "don't care" and not an
+ // unsafe-point.
+ if idx, ok := m.vals[v.ID]; ok {
+ return idx
+ }
+ return LivenessIndex{StackMapDontCare, false}
+}
+
+// LivenessIndex stores the liveness map information for a Value.
+type LivenessIndex struct {
+ stackMapIndex int
+
+ // isUnsafePoint indicates that this is an unsafe-point.
+ //
+ // Note that it's possible for a call Value to have a stack
+ // map while also being an unsafe-point. This means it cannot
+ // be preempted at this instruction, but that a preemption or
+ // stack growth may happen in the called function.
+ isUnsafePoint bool
+}
+
+// LivenessDontCare indicates that the liveness information doesn't
+// matter. Currently it is used in deferreturn liveness when we don't
+// actually need it. It should never be emitted to the PCDATA stream.
+var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
+
+// StackMapDontCare indicates that the stack map index at a Value
+// doesn't matter.
+//
+// This is a sentinel value that should never be emitted to the PCDATA
+// stream. We use -1000 because that's obviously never a valid stack
+// index (but -1 is).
+const StackMapDontCare = -1000
+
+func (idx LivenessIndex) StackMapValid() bool {
+ return idx.stackMapIndex != StackMapDontCare
+}
+
+type progeffectscache struct {
+ retuevar []int32
+ tailuevar []int32
+ initialized bool
+}
+
+// livenessShouldTrack reports whether the liveness analysis
+// should track the variable n.
+// We don't care about variables that have no pointers,
+// nor do we care about non-local variables,
+// nor do we care about empty structs (handled by the pointer check),
+// nor do we care about the fake PAUTOHEAP variables.
+func livenessShouldTrack(n *Node) bool {
+ return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Type.HasPointers()
+}
+
+// getvariables returns the list of on-stack variables that we need to track
+// and a map for looking up indices by *Node.
+func getvariables(fn *Node) ([]*Node, map[*Node]int32) {
+ var vars []*Node
+ for _, n := range fn.Func.Dcl {
+ if livenessShouldTrack(n) {
+ vars = append(vars, n)
+ }
+ }
+ idx := make(map[*Node]int32, len(vars))
+ for i, n := range vars {
+ idx[n] = int32(i)
+ }
+ return vars, idx
+}
+
+func (lv *Liveness) initcache() {
+ if lv.cache.initialized {
+ Fatalf("liveness cache initialized twice")
+ return
+ }
+ lv.cache.initialized = true
+
+ for i, node := range lv.vars {
+ switch node.Class() {
+ case PPARAM:
+ // A return instruction with a p.to is a tail return, which brings
+ // the stack pointer back up (if it ever went down) and then jumps
+ // to a new function entirely. That form of instruction must read
+ // all the parameters for correctness, and similarly it must not
+ // read the out arguments - they won't be set until the new
+ // function runs.
+ lv.cache.tailuevar = append(lv.cache.tailuevar, int32(i))
+
+ case PPARAMOUT:
+ // All results are live at every return point.
+ // Note that this point is after escaping return values
+ // are copied back to the stack using their PAUTOHEAP references.
+ lv.cache.retuevar = append(lv.cache.retuevar, int32(i))
+ }
+ }
+}
+
+// A liveEffect is a set of flags that describe an instruction's
+// liveness effects on a variable.
+//
+// The possible flags are:
+// uevar - used by the instruction
+// varkill - killed by the instruction (set)
+// A kill happens after the use (for an instruction that updates a value, for example).
+type liveEffect int
+
+const (
+ uevar liveEffect = 1 << iota
+ varkill
+)
+
+// valueEffects returns the index of a variable in lv.vars and the
+// liveness effects v has on that variable.
+// If v does not affect any tracked variables, it returns -1, 0.
+func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
+ n, e := affectedNode(v)
+ if e == 0 || n == nil || n.Op != ONAME { // cheapest checks first
+ return -1, 0
+ }
+
+ // AllocFrame has dropped unused variables from
+ // lv.fn.Func.Dcl, but they might still be referenced by
+ // OpVarFoo pseudo-ops. Ignore them to prevent "lost track of
+ // variable" ICEs (issue 19632).
+ switch v.Op {
+ case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive:
+ if !n.Name.Used() {
+ return -1, 0
+ }
+ }
+
+ var effect liveEffect
+ // Read is a read, obviously.
+ //
+ // Addr is a read also, as any subsequent holder of the pointer must be able
+ // to see all the values (including initialization) written so far.
+ // This also prevents a variable from "coming back from the dead" and presenting
+ // stale pointers to the garbage collector. See issue 28445.
+ if e&(ssa.SymRead|ssa.SymAddr) != 0 {
+ effect |= uevar
+ }
+ if e&ssa.SymWrite != 0 && (!isfat(n.Type) || v.Op == ssa.OpVarDef) {
+ effect |= varkill
+ }
+
+ if effect == 0 {
+ return -1, 0
+ }
+
+ if pos, ok := lv.idx[n]; ok {
+ return pos, effect
+ }
+ return -1, 0
+}
+
+// affectedNode returns the *Node affected by v
+func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
+ // Special cases.
+ switch v.Op {
+ case ssa.OpLoadReg:
+ n, _ := AutoVar(v.Args[0])
+ return n, ssa.SymRead
+ case ssa.OpStoreReg:
+ n, _ := AutoVar(v)
+ return n, ssa.SymWrite
+
+ case ssa.OpVarLive:
+ return v.Aux.(*Node), ssa.SymRead
+ case ssa.OpVarDef, ssa.OpVarKill:
+ return v.Aux.(*Node), ssa.SymWrite
+ case ssa.OpKeepAlive:
+ n, _ := AutoVar(v.Args[0])
+ return n, ssa.SymRead
+ }
+
+ e := v.Op.SymEffect()
+ if e == 0 {
+ return nil, 0
+ }
+
+ switch a := v.Aux.(type) {
+ case nil, *obj.LSym:
+ // ok, but no node
+ return nil, e
+ case *Node:
+ return a, e
+ default:
+ Fatalf("weird aux: %s", v.LongString())
+ return nil, e
+ }
+}
+
+type livenessFuncCache struct {
+ be []BlockEffects
+ livenessMap LivenessMap
+}
+
+// Constructs a new liveness structure used to hold the global state of the
+// liveness computation. The cfg argument is a slice of *BasicBlocks and the
+// vars argument is a slice of *Nodes.
+func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkptrsize int64) *Liveness {
+ lv := &Liveness{
+ fn: fn,
+ f: f,
+ vars: vars,
+ idx: idx,
+ stkptrsize: stkptrsize,
+ }
+
+ // Significant sources of allocation are kept in the ssa.Cache
+ // and reused. Surprisingly, the bit vectors themselves aren't
+ // a major source of allocation, but the liveness maps are.
+ if lc, _ := f.Cache.Liveness.(*livenessFuncCache); lc == nil {
+ // Prep the cache so liveness can fill it later.
+ f.Cache.Liveness = new(livenessFuncCache)
+ } else {
+ if cap(lc.be) >= f.NumBlocks() {
+ lv.be = lc.be[:f.NumBlocks()]
+ }
+ lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessDontCare}
+ lc.livenessMap.vals = nil
+ }
+ if lv.be == nil {
+ lv.be = make([]BlockEffects, f.NumBlocks())
+ }
+
+ nblocks := int32(len(f.Blocks))
+ nvars := int32(len(vars))
+ bulk := bvbulkalloc(nvars, nblocks*7)
+ for _, b := range f.Blocks {
+ be := lv.blockEffects(b)
+
+ be.uevar = bulk.next()
+ be.varkill = bulk.next()
+ be.livein = bulk.next()
+ be.liveout = bulk.next()
+ }
+ lv.livenessMap.reset()
+
+ lv.markUnsafePoints()
+ return lv
+}
+
+func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects {
+ return &lv.be[b.ID]
+}
+
+// NOTE: The bitmap for a specific type t could be cached in t after
+// the first run and then simply copied into bv at the correct offset
+// on future calls with the same type t.
+func onebitwalktype1(t *types.Type, off int64, bv bvec) {
+ if t.Align > 0 && off&int64(t.Align-1) != 0 {
+ Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
+ }
+ if !t.HasPointers() {
+ // Note: this case ensures that pointers to go:notinheap types
+ // are not considered pointers by garbage collection and stack copying.
+ return
+ }
+
+ switch t.Etype {
+ case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP:
+ if off&int64(Widthptr-1) != 0 {
+ Fatalf("onebitwalktype1: invalid alignment, %v", t)
+ }
+ bv.Set(int32(off / int64(Widthptr))) // pointer
+
+ case TSTRING:
+ // struct { byte *str; intgo len; }
+ if off&int64(Widthptr-1) != 0 {
+ Fatalf("onebitwalktype1: invalid alignment, %v", t)
+ }
+ bv.Set(int32(off / int64(Widthptr))) //pointer in first slot
+
+ case TINTER:
+ // struct { Itab *tab; void *data; }
+ // or, when isnilinter(t)==true:
+ // struct { Type *type; void *data; }
+ if off&int64(Widthptr-1) != 0 {
+ Fatalf("onebitwalktype1: invalid alignment, %v", t)
+ }
+ // The first word of an interface is a pointer, but we don't
+ // treat it as such.
+ // 1. If it is a non-empty interface, the pointer points to an itab
+ // which is always in persistentalloc space.
+ // 2. If it is an empty interface, the pointer points to a _type.
+ // a. If it is a compile-time-allocated type, it points into
+ // the read-only data section.
+ // b. If it is a reflect-allocated type, it points into the Go heap.
+ // Reflect is responsible for keeping a reference to
+ // the underlying type so it won't be GCd.
+ // If we ever have a moving GC, we need to change this for 2b (as
+ // well as scan itabs to update their itab._type fields).
+ bv.Set(int32(off/int64(Widthptr) + 1)) // pointer in second slot
+
+ case TSLICE:
+ // struct { byte *array; uintgo len; uintgo cap; }
+ if off&int64(Widthptr-1) != 0 {
+ Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
+ }
+ bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer)
+
+ case TARRAY:
+ elt := t.Elem()
+ if elt.Width == 0 {
+ // Short-circuit for #20739.
+ break
+ }
+ for i := int64(0); i < t.NumElem(); i++ {
+ onebitwalktype1(elt, off, bv)
+ off += elt.Width
+ }
+
+ case TSTRUCT:
+ for _, f := range t.Fields().Slice() {
+ onebitwalktype1(f.Type, off+f.Offset, bv)
+ }
+
+ default:
+ Fatalf("onebitwalktype1: unexpected type, %v", t)
+ }
+}
+
+// Generates live pointer value maps for arguments and local variables. The
+// this argument and the in arguments are always assumed live. The vars
+// argument is a slice of *Nodes.
+func (lv *Liveness) pointerMap(liveout bvec, vars []*Node, args, locals bvec) {
+ for i := int32(0); ; i++ {
+ i = liveout.Next(i)
+ if i < 0 {
+ break
+ }
+ node := vars[i]
+ switch node.Class() {
+ case PAUTO:
+ onebitwalktype1(node.Type, node.Xoffset+lv.stkptrsize, locals)
+
+ case PPARAM, PPARAMOUT:
+ onebitwalktype1(node.Type, node.Xoffset, args)
+ }
+ }
+}
+
+// allUnsafe indicates that all points in this function are
+// unsafe-points.
+func allUnsafe(f *ssa.Func) bool {
+ // The runtime assumes the only safe-points are function
+ // prologues (because that's how it used to be). We could and
+ // should improve that, but for now keep consider all points
+ // in the runtime unsafe. obj will add prologues and their
+ // safe-points.
+ //
+ // go:nosplit functions are similar. Since safe points used to
+ // be coupled with stack checks, go:nosplit often actually
+ // means "no safe points in this function".
+ return compiling_runtime || f.NoSplit
+}
+
+// markUnsafePoints finds unsafe points and computes lv.unsafePoints.
+func (lv *Liveness) markUnsafePoints() {
+ if allUnsafe(lv.f) {
+ // No complex analysis necessary.
+ lv.allUnsafe = true
+ return
+ }
+
+ lv.unsafePoints = bvalloc(int32(lv.f.NumValues()))
+
+ // Mark architecture-specific unsafe points.
+ for _, b := range lv.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op.UnsafePoint() {
+ lv.unsafePoints.Set(int32(v.ID))
+ }
+ }
+ }
+
+ // Mark write barrier unsafe points.
+ for _, wbBlock := range lv.f.WBLoads {
+ if wbBlock.Kind == ssa.BlockPlain && len(wbBlock.Values) == 0 {
+ // The write barrier block was optimized away
+ // but we haven't done dead block elimination.
+ // (This can happen in -N mode.)
+ continue
+ }
+ // Check that we have the expected diamond shape.
+ if len(wbBlock.Succs) != 2 {
+ lv.f.Fatalf("expected branch at write barrier block %v", wbBlock)
+ }
+ s0, s1 := wbBlock.Succs[0].Block(), wbBlock.Succs[1].Block()
+ if s0 == s1 {
+ // There's no difference between write barrier on and off.
+ // Thus there's no unsafe locations. See issue 26024.
+ continue
+ }
+ if s0.Kind != ssa.BlockPlain || s1.Kind != ssa.BlockPlain {
+ lv.f.Fatalf("expected successors of write barrier block %v to be plain", wbBlock)
+ }
+ if s0.Succs[0].Block() != s1.Succs[0].Block() {
+ lv.f.Fatalf("expected successors of write barrier block %v to converge", wbBlock)
+ }
+
+ // Flow backwards from the control value to find the
+ // flag load. We don't know what lowered ops we're
+ // looking for, but all current arches produce a
+ // single op that does the memory load from the flag
+ // address, so we look for that.
+ var load *ssa.Value
+ v := wbBlock.Controls[0]
+ for {
+ if sym, ok := v.Aux.(*obj.LSym); ok && sym == writeBarrier {
+ load = v
+ break
+ }
+ switch v.Op {
+ case ssa.Op386TESTL:
+ // 386 lowers Neq32 to (TESTL cond cond),
+ if v.Args[0] == v.Args[1] {
+ v = v.Args[0]
+ continue
+ }
+ case ssa.Op386MOVLload, ssa.OpARM64MOVWUload, ssa.OpPPC64MOVWZload, ssa.OpWasmI64Load32U:
+ // Args[0] is the address of the write
+ // barrier control. Ignore Args[1],
+ // which is the mem operand.
+ // TODO: Just ignore mem operands?
+ v = v.Args[0]
+ continue
+ }
+ // Common case: just flow backwards.
+ if len(v.Args) != 1 {
+ v.Fatalf("write barrier control value has more than one argument: %s", v.LongString())
+ }
+ v = v.Args[0]
+ }
+
+ // Mark everything after the load unsafe.
+ found := false
+ for _, v := range wbBlock.Values {
+ found = found || v == load
+ if found {
+ lv.unsafePoints.Set(int32(v.ID))
+ }
+ }
+
+ // Mark the two successor blocks unsafe. These come
+ // back together immediately after the direct write in
+ // one successor and the last write barrier call in
+ // the other, so there's no need to be more precise.
+ for _, succ := range wbBlock.Succs {
+ for _, v := range succ.Block().Values {
+ lv.unsafePoints.Set(int32(v.ID))
+ }
+ }
+ }
+
+ // Find uintptr -> unsafe.Pointer conversions and flood
+ // unsafeness back to a call (which is always a safe point).
+ //
+ // Looking for the uintptr -> unsafe.Pointer conversion has a
+ // few advantages over looking for unsafe.Pointer -> uintptr
+ // conversions:
+ //
+ // 1. We avoid needlessly blocking safe-points for
+ // unsafe.Pointer -> uintptr conversions that never go back to
+ // a Pointer.
+ //
+ // 2. We don't have to detect calls to reflect.Value.Pointer,
+ // reflect.Value.UnsafeAddr, and reflect.Value.InterfaceData,
+ // which are implicit unsafe.Pointer -> uintptr conversions.
+ // We can't even reliably detect this if there's an indirect
+ // call to one of these methods.
+ //
+ // TODO: For trivial unsafe.Pointer arithmetic, it would be
+ // nice to only flood as far as the unsafe.Pointer -> uintptr
+ // conversion, but it's hard to know which argument of an Add
+ // or Sub to follow.
+ var flooded bvec
+ var flood func(b *ssa.Block, vi int)
+ flood = func(b *ssa.Block, vi int) {
+ if flooded.n == 0 {
+ flooded = bvalloc(int32(lv.f.NumBlocks()))
+ }
+ if flooded.Get(int32(b.ID)) {
+ return
+ }
+ for i := vi - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if v.Op.IsCall() {
+ // Uintptrs must not contain live
+ // pointers across calls, so stop
+ // flooding.
+ return
+ }
+ lv.unsafePoints.Set(int32(v.ID))
+ }
+ if vi == len(b.Values) {
+ // We marked all values in this block, so no
+ // need to flood this block again.
+ flooded.Set(int32(b.ID))
+ }
+ for _, pred := range b.Preds {
+ flood(pred.Block(), len(pred.Block().Values))
+ }
+ }
+ for _, b := range lv.f.Blocks {
+ for i, v := range b.Values {
+ if !(v.Op == ssa.OpConvert && v.Type.IsPtrShaped()) {
+ continue
+ }
+ // Flood the unsafe-ness of this backwards
+ // until we hit a call.
+ flood(b, i+1)
+ }
+ }
+}
+
+// Returns true for instructions that must have a stack map.
+//
+// This does not necessarily mean the instruction is a safe-point. In
+// particular, call Values can have a stack map in case the callee
+// grows the stack, but not themselves be a safe-point.
+func (lv *Liveness) hasStackMap(v *ssa.Value) bool {
+ if !v.Op.IsCall() {
+ return false
+ }
+ // typedmemclr and typedmemmove are write barriers and
+ // deeply non-preemptible. They are unsafe points and
+ // hence should not have liveness maps.
+ if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == typedmemclr || sym.Fn == typedmemmove) {
+ return false
+ }
+ return true
+}
+
+// Initializes the sets for solving the live variables. Visits all the
+// instructions in each basic block to summarizes the information at each basic
+// block
+func (lv *Liveness) prologue() {
+ lv.initcache()
+
+ for _, b := range lv.f.Blocks {
+ be := lv.blockEffects(b)
+
+ // Walk the block instructions backward and update the block
+ // effects with the each prog effects.
+ for j := len(b.Values) - 1; j >= 0; j-- {
+ pos, e := lv.valueEffects(b.Values[j])
+ if e&varkill != 0 {
+ be.varkill.Set(pos)
+ be.uevar.Unset(pos)
+ }
+ if e&uevar != 0 {
+ be.uevar.Set(pos)
+ }
+ }
+ }
+}
+
+// Solve the liveness dataflow equations.
+func (lv *Liveness) solve() {
+ // These temporary bitvectors exist to avoid successive allocations and
+ // frees within the loop.
+ nvars := int32(len(lv.vars))
+ newlivein := bvalloc(nvars)
+ newliveout := bvalloc(nvars)
+
+ // Walk blocks in postorder ordering. This improves convergence.
+ po := lv.f.Postorder()
+
+ // Iterate through the blocks in reverse round-robin fashion. A work
+ // queue might be slightly faster. As is, the number of iterations is
+ // so low that it hardly seems to be worth the complexity.
+
+ for change := true; change; {
+ change = false
+ for _, b := range po {
+ be := lv.blockEffects(b)
+
+ newliveout.Clear()
+ switch b.Kind {
+ case ssa.BlockRet:
+ for _, pos := range lv.cache.retuevar {
+ newliveout.Set(pos)
+ }
+ case ssa.BlockRetJmp:
+ for _, pos := range lv.cache.tailuevar {
+ newliveout.Set(pos)
+ }
+ case ssa.BlockExit:
+ // panic exit - nothing to do
+ default:
+ // A variable is live on output from this block
+ // if it is live on input to some successor.
+ //
+ // out[b] = \bigcup_{s \in succ[b]} in[s]
+ newliveout.Copy(lv.blockEffects(b.Succs[0].Block()).livein)
+ for _, succ := range b.Succs[1:] {
+ newliveout.Or(newliveout, lv.blockEffects(succ.Block()).livein)
+ }
+ }
+
+ if !be.liveout.Eq(newliveout) {
+ change = true
+ be.liveout.Copy(newliveout)
+ }
+
+ // A variable is live on input to this block
+ // if it is used by this block, or live on output from this block and
+ // not set by the code in this block.
+ //
+ // in[b] = uevar[b] \cup (out[b] \setminus varkill[b])
+ newlivein.AndNot(be.liveout, be.varkill)
+ be.livein.Or(newlivein, be.uevar)
+ }
+ }
+}
+
+// Visits all instructions in a basic block and computes a bit vector of live
+// variables at each safe point locations.
+func (lv *Liveness) epilogue() {
+ nvars := int32(len(lv.vars))
+ liveout := bvalloc(nvars)
+ livedefer := bvalloc(nvars) // always-live variables
+
+ // If there is a defer (that could recover), then all output
+ // parameters are live all the time. In addition, any locals
+ // that are pointers to heap-allocated output parameters are
+ // also always live (post-deferreturn code needs these
+ // pointers to copy values back to the stack).
+ // TODO: if the output parameter is heap-allocated, then we
+ // don't need to keep the stack copy live?
+ if lv.fn.Func.HasDefer() {
+ for i, n := range lv.vars {
+ if n.Class() == PPARAMOUT {
+ if n.Name.IsOutputParamHeapAddr() {
+ // Just to be paranoid. Heap addresses are PAUTOs.
+ Fatalf("variable %v both output param and heap output param", n)
+ }
+ if n.Name.Param.Heapaddr != nil {
+ // If this variable moved to the heap, then
+ // its stack copy is not live.
+ continue
+ }
+ // Note: zeroing is handled by zeroResults in walk.go.
+ livedefer.Set(int32(i))
+ }
+ if n.Name.IsOutputParamHeapAddr() {
+ // This variable will be overwritten early in the function
+ // prologue (from the result of a mallocgc) but we need to
+ // zero it in case that malloc causes a stack scan.
+ n.Name.SetNeedzero(true)
+ livedefer.Set(int32(i))
+ }
+ if n.Name.OpenDeferSlot() {
+ // Open-coded defer args slots must be live
+ // everywhere in a function, since a panic can
+ // occur (almost) anywhere. Because it is live
+ // everywhere, it must be zeroed on entry.
+ livedefer.Set(int32(i))
+ // It was already marked as Needzero when created.
+ if !n.Name.Needzero() {
+ Fatalf("all pointer-containing defer arg slots should have Needzero set")
+ }
+ }
+ }
+ }
+
+ // We must analyze the entry block first. The runtime assumes
+ // the function entry map is index 0. Conveniently, layout
+ // already ensured that the entry block is first.
+ if lv.f.Entry != lv.f.Blocks[0] {
+ lv.f.Fatalf("entry block must be first")
+ }
+
+ {
+ // Reserve an entry for function entry.
+ live := bvalloc(nvars)
+ lv.livevars = append(lv.livevars, live)
+ }
+
+ for _, b := range lv.f.Blocks {
+ be := lv.blockEffects(b)
+
+ // Walk forward through the basic block instructions and
+ // allocate liveness maps for those instructions that need them.
+ for _, v := range b.Values {
+ if !lv.hasStackMap(v) {
+ continue
+ }
+
+ live := bvalloc(nvars)
+ lv.livevars = append(lv.livevars, live)
+ }
+
+ // walk backward, construct maps at each safe point
+ index := int32(len(lv.livevars) - 1)
+
+ liveout.Copy(be.liveout)
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+
+ if lv.hasStackMap(v) {
+ // Found an interesting instruction, record the
+ // corresponding liveness information.
+
+ live := &lv.livevars[index]
+ live.Or(*live, liveout)
+ live.Or(*live, livedefer) // only for non-entry safe points
+ index--
+ }
+
+ // Update liveness information.
+ pos, e := lv.valueEffects(v)
+ if e&varkill != 0 {
+ liveout.Unset(pos)
+ }
+ if e&uevar != 0 {
+ liveout.Set(pos)
+ }
+ }
+
+ if b == lv.f.Entry {
+ if index != 0 {
+ Fatalf("bad index for entry point: %v", index)
+ }
+
+ // Check to make sure only input variables are live.
+ for i, n := range lv.vars {
+ if !liveout.Get(int32(i)) {
+ continue
+ }
+ if n.Class() == PPARAM {
+ continue // ok
+ }
+ Fatalf("bad live variable at entry of %v: %L", lv.fn.Func.Nname, n)
+ }
+
+ // Record live variables.
+ live := &lv.livevars[index]
+ live.Or(*live, liveout)
+ }
+
+ // The liveness maps for this block are now complete. Compact them.
+ lv.compact(b)
+ }
+
+ // If we have an open-coded deferreturn call, make a liveness map for it.
+ if lv.fn.Func.OpenCodedDeferDisallowed() {
+ lv.livenessMap.deferreturn = LivenessDontCare
+ } else {
+ lv.livenessMap.deferreturn = LivenessIndex{
+ stackMapIndex: lv.stackMapSet.add(livedefer),
+ isUnsafePoint: false,
+ }
+ }
+
+ // Done compacting. Throw out the stack map set.
+ lv.stackMaps = lv.stackMapSet.extractUniqe()
+ lv.stackMapSet = bvecSet{}
+
+ // Useful sanity check: on entry to the function,
+ // the only things that can possibly be live are the
+ // input parameters.
+ for j, n := range lv.vars {
+ if n.Class() != PPARAM && lv.stackMaps[0].Get(int32(j)) {
+ lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func.Nname, n)
+ }
+ }
+}
+
+// Compact coalesces identical bitmaps from lv.livevars into the sets
+// lv.stackMapSet.
+//
+// Compact clears lv.livevars.
+//
+// There are actually two lists of bitmaps, one list for the local variables and one
+// list for the function arguments. Both lists are indexed by the same PCDATA
+// index, so the corresponding pairs must be considered together when
+// merging duplicates. The argument bitmaps change much less often during
+// function execution than the local variable bitmaps, so it is possible that
+// we could introduce a separate PCDATA index for arguments vs locals and
+// then compact the set of argument bitmaps separately from the set of
+// local variable bitmaps. As of 2014-04-02, doing this to the godoc binary
+// is actually a net loss: we save about 50k of argument bitmaps but the new
+// PCDATA tables cost about 100k. So for now we keep using a single index for
+// both bitmap lists.
+func (lv *Liveness) compact(b *ssa.Block) {
+ pos := 0
+ if b == lv.f.Entry {
+ // Handle entry stack map.
+ lv.stackMapSet.add(lv.livevars[0])
+ pos++
+ }
+ for _, v := range b.Values {
+ hasStackMap := lv.hasStackMap(v)
+ isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID))
+ idx := LivenessIndex{StackMapDontCare, isUnsafePoint}
+ if hasStackMap {
+ idx.stackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
+ pos++
+ }
+ if hasStackMap || isUnsafePoint {
+ lv.livenessMap.set(v, idx)
+ }
+ }
+
+ // Reset livevars.
+ lv.livevars = lv.livevars[:0]
+}
+
+func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
+ if debuglive == 0 || lv.fn.funcname() == "init" || strings.HasPrefix(lv.fn.funcname(), ".") {
+ return
+ }
+ if !(v == nil || v.Op.IsCall()) {
+ // Historically we only printed this information at
+ // calls. Keep doing so.
+ return
+ }
+ if live.IsEmpty() {
+ return
+ }
+
+ pos := lv.fn.Func.Nname.Pos
+ if v != nil {
+ pos = v.Pos
+ }
+
+ s := "live at "
+ if v == nil {
+ s += fmt.Sprintf("entry to %s:", lv.fn.funcname())
+ } else if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
+ fn := sym.Fn.Name
+ if pos := strings.Index(fn, "."); pos >= 0 {
+ fn = fn[pos+1:]
+ }
+ s += fmt.Sprintf("call to %s:", fn)
+ } else {
+ s += "indirect call:"
+ }
+
+ for j, n := range lv.vars {
+ if live.Get(int32(j)) {
+ s += fmt.Sprintf(" %v", n)
+ }
+ }
+
+ Warnl(pos, s)
+}
+
+func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
+ if live.IsEmpty() {
+ return printed
+ }
+
+ if !printed {
+ fmt.Printf("\t")
+ } else {
+ fmt.Printf(" ")
+ }
+ fmt.Printf("%s=", name)
+
+ comma := ""
+ for i, n := range lv.vars {
+ if !live.Get(int32(i)) {
+ continue
+ }
+ fmt.Printf("%s%s", comma, n.Sym.Name)
+ comma = ","
+ }
+ return true
+}
+
+// printeffect is like printbvec, but for valueEffects.
+func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bool {
+ if !x {
+ return printed
+ }
+ if !printed {
+ fmt.Printf("\t")
+ } else {
+ fmt.Printf(" ")
+ }
+ fmt.Printf("%s=", name)
+ if x {
+ fmt.Printf("%s", lv.vars[pos].Sym.Name)
+ }
+
+ return true
+}
+
+// Prints the computed liveness information and inputs, for debugging.
+// This format synthesizes the information used during the multiple passes
+// into a single presentation.
+func (lv *Liveness) printDebug() {
+ fmt.Printf("liveness: %s\n", lv.fn.funcname())
+
+ for i, b := range lv.f.Blocks {
+ if i > 0 {
+ fmt.Printf("\n")
+ }
+
+ // bb#0 pred=1,2 succ=3,4
+ fmt.Printf("bb#%d pred=", b.ID)
+ for j, pred := range b.Preds {
+ if j > 0 {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%d", pred.Block().ID)
+ }
+ fmt.Printf(" succ=")
+ for j, succ := range b.Succs {
+ if j > 0 {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%d", succ.Block().ID)
+ }
+ fmt.Printf("\n")
+
+ be := lv.blockEffects(b)
+
+ // initial settings
+ printed := false
+ printed = lv.printbvec(printed, "uevar", be.uevar)
+ printed = lv.printbvec(printed, "livein", be.livein)
+ if printed {
+ fmt.Printf("\n")
+ }
+
+ // program listing, with individual effects listed
+
+ if b == lv.f.Entry {
+ live := lv.stackMaps[0]
+ fmt.Printf("(%s) function entry\n", linestr(lv.fn.Func.Nname.Pos))
+ fmt.Printf("\tlive=")
+ printed = false
+ for j, n := range lv.vars {
+ if !live.Get(int32(j)) {
+ continue
+ }
+ if printed {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%v", n)
+ printed = true
+ }
+ fmt.Printf("\n")
+ }
+
+ for _, v := range b.Values {
+ fmt.Printf("(%s) %v\n", linestr(v.Pos), v.LongString())
+
+ pcdata := lv.livenessMap.Get(v)
+
+ pos, effect := lv.valueEffects(v)
+ printed = false
+ printed = lv.printeffect(printed, "uevar", pos, effect&uevar != 0)
+ printed = lv.printeffect(printed, "varkill", pos, effect&varkill != 0)
+ if printed {
+ fmt.Printf("\n")
+ }
+
+ if pcdata.StackMapValid() {
+ fmt.Printf("\tlive=")
+ printed = false
+ if pcdata.StackMapValid() {
+ live := lv.stackMaps[pcdata.stackMapIndex]
+ for j, n := range lv.vars {
+ if !live.Get(int32(j)) {
+ continue
+ }
+ if printed {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%v", n)
+ printed = true
+ }
+ }
+ fmt.Printf("\n")
+ }
+
+ if pcdata.isUnsafePoint {
+ fmt.Printf("\tunsafe-point\n")
+ }
+ }
+
+ // bb bitsets
+ fmt.Printf("end\n")
+ printed = false
+ printed = lv.printbvec(printed, "varkill", be.varkill)
+ printed = lv.printbvec(printed, "liveout", be.liveout)
+ if printed {
+ fmt.Printf("\n")
+ }
+ }
+
+ fmt.Printf("\n")
+}
+
+// Dumps a slice of bitmaps to a symbol as a sequence of uint32 values. The
+// first word dumped is the total number of bitmaps. The second word is the
+// length of the bitmaps. All bitmaps are assumed to be of equal length. The
+// remaining bytes are the raw bitmaps.
+func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
+ // Size args bitmaps to be just large enough to hold the largest pointer.
+ // First, find the largest Xoffset node we care about.
+ // (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
+ var maxArgNode *Node
+ for _, n := range lv.vars {
+ switch n.Class() {
+ case PPARAM, PPARAMOUT:
+ if maxArgNode == nil || n.Xoffset > maxArgNode.Xoffset {
+ maxArgNode = n
+ }
+ }
+ }
+ // Next, find the offset of the largest pointer in the largest node.
+ var maxArgs int64
+ if maxArgNode != nil {
+ maxArgs = maxArgNode.Xoffset + typeptrdata(maxArgNode.Type)
+ }
+
+ // Size locals bitmaps to be stkptrsize sized.
+ // We cannot shrink them to only hold the largest pointer,
+ // because their size is used to calculate the beginning
+ // of the local variables frame.
+ // Further discussion in https://golang.org/cl/104175.
+ // TODO: consider trimming leading zeros.
+ // This would require shifting all bitmaps.
+ maxLocals := lv.stkptrsize
+
+ // Temporary symbols for encoding bitmaps.
+ var argsSymTmp, liveSymTmp obj.LSym
+
+ args := bvalloc(int32(maxArgs / int64(Widthptr)))
+ aoff := duint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
+ aoff = duint32(&argsSymTmp, aoff, uint32(args.n)) // number of bits in each bitmap
+
+ locals := bvalloc(int32(maxLocals / int64(Widthptr)))
+ loff := duint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
+ loff = duint32(&liveSymTmp, loff, uint32(locals.n)) // number of bits in each bitmap
+
+ for _, live := range lv.stackMaps {
+ args.Clear()
+ locals.Clear()
+
+ lv.pointerMap(live, lv.vars, args, locals)
+
+ aoff = dbvec(&argsSymTmp, aoff, args)
+ loff = dbvec(&liveSymTmp, loff, locals)
+ }
+
+ // Give these LSyms content-addressable names,
+ // so that they can be de-duplicated.
+ // This provides significant binary size savings.
+ //
+ // These symbols will be added to Ctxt.Data by addGCLocals
+ // after parallel compilation is done.
+ makeSym := func(tmpSym *obj.LSym) *obj.LSym {
+ return Ctxt.LookupInit(fmt.Sprintf("gclocals·%x", md5.Sum(tmpSym.P)), func(lsym *obj.LSym) {
+ lsym.P = tmpSym.P
+ lsym.Set(obj.AttrContentAddressable, true)
+ })
+ }
+ return makeSym(&argsSymTmp), makeSym(&liveSymTmp)
+}
+
+// Entry pointer for liveness analysis. Solves for the liveness of
+// pointer variables in the function and emits a runtime data
+// structure read by the garbage collector.
+// Returns a map from GC safe points to their corresponding stack map index.
+func liveness(e *ssafn, f *ssa.Func, pp *Progs) LivenessMap {
+ // Construct the global liveness state.
+ vars, idx := getvariables(e.curfn)
+ lv := newliveness(e.curfn, f, vars, idx, e.stkptrsize)
+
+ // Run the dataflow framework.
+ lv.prologue()
+ lv.solve()
+ lv.epilogue()
+ if debuglive > 0 {
+ lv.showlive(nil, lv.stackMaps[0])
+ for _, b := range f.Blocks {
+ for _, val := range b.Values {
+ if idx := lv.livenessMap.Get(val); idx.StackMapValid() {
+ lv.showlive(val, lv.stackMaps[idx.stackMapIndex])
+ }
+ }
+ }
+ }
+ if debuglive >= 2 {
+ lv.printDebug()
+ }
+
+ // Update the function cache.
+ {
+ cache := f.Cache.Liveness.(*livenessFuncCache)
+ if cap(lv.be) < 2000 { // Threshold from ssa.Cache slices.
+ for i := range lv.be {
+ lv.be[i] = BlockEffects{}
+ }
+ cache.be = lv.be
+ }
+ if len(lv.livenessMap.vals) < 2000 {
+ cache.livenessMap = lv.livenessMap
+ }
+ }
+
+ // Emit the live pointer map data structures
+ ls := e.curfn.Func.lsym
+ fninfo := ls.Func()
+ fninfo.GCArgs, fninfo.GCLocals = lv.emit()
+
+ p := pp.Prog(obj.AFUNCDATA)
+ Addrconst(&p.From, objabi.FUNCDATA_ArgsPointerMaps)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = fninfo.GCArgs
+
+ p = pp.Prog(obj.AFUNCDATA)
+ Addrconst(&p.From, objabi.FUNCDATA_LocalsPointerMaps)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = fninfo.GCLocals
+
+ return lv.livenessMap
+}
+
+// isfat reports whether a variable of type t needs multiple assignments to initialize.
+// For example:
+//
+// type T struct { x, y int }
+// x := T{x: 0, y: 1}
+//
+// Then we need:
+//
+// var t T
+// t.x = 0
+// t.y = 1
+//
+// to fully initialize t.
+func isfat(t *types.Type) bool {
+ if t != nil {
+ switch t.Etype {
+ case TSLICE, TSTRING,
+ TINTER: // maybe remove later
+ return true
+ case TARRAY:
+ // Array of 1 element, check if element is fat
+ if t.NumElem() == 1 {
+ return isfat(t.Elem())
+ }
+ return true
+ case TSTRUCT:
+ // Struct with 1 field, check if field is fat
+ if t.NumFields() == 1 {
+ return isfat(t.Field(0).Type)
+ }
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/src/cmd/compile/internal/gc/pprof.go b/src/cmd/compile/internal/gc/pprof.go
new file mode 100644
index 0000000..256c659
--- /dev/null
+++ b/src/cmd/compile/internal/gc/pprof.go
@@ -0,0 +1,13 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package gc
+
+import "runtime"
+
+func startMutexProfiling() {
+ runtime.SetMutexProfileFraction(1)
+}
diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go
new file mode 100644
index 0000000..3552617
--- /dev/null
+++ b/src/cmd/compile/internal/gc/racewalk.go
@@ -0,0 +1,93 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+)
+
+// The racewalk pass is currently handled in three parts.
+//
+// First, for flag_race, it inserts calls to racefuncenter and
+// racefuncexit at the start and end (respectively) of each
+// function. This is handled below.
+//
+// Second, during buildssa, it inserts appropriate instrumentation
+// calls immediately before each memory load or store. This is handled
+// by the (*state).instrument method in ssa.go, so here we just set
+// the Func.InstrumentBody flag as needed. For background on why this
+// is done during SSA construction rather than a separate SSA pass,
+// see issue #19054.
+//
+// Third we remove calls to racefuncenter and racefuncexit, for leaf
+// functions without instrumented operations. This is done as part of
+// ssa opt pass via special rule.
+
+// TODO(dvyukov): do not instrument initialization as writes:
+// a := make([]int, 10)
+
+// Do not instrument the following packages at all,
+// at best instrumentation would cause infinite recursion.
+var omit_pkgs = []string{
+ "runtime/internal/atomic",
+ "runtime/internal/sys",
+ "runtime/internal/math",
+ "runtime",
+ "runtime/race",
+ "runtime/msan",
+ "internal/cpu",
+}
+
+// Don't insert racefuncenterfp/racefuncexit into the following packages.
+// Memory accesses in the packages are either uninteresting or will cause false positives.
+var norace_inst_pkgs = []string{"sync", "sync/atomic"}
+
+func ispkgin(pkgs []string) bool {
+ if myimportpath != "" {
+ for _, p := range pkgs {
+ if myimportpath == p {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+func instrument(fn *Node) {
+ if fn.Func.Pragma&Norace != 0 {
+ return
+ }
+
+ if !flag_race || !ispkgin(norace_inst_pkgs) {
+ fn.Func.SetInstrumentBody(true)
+ }
+
+ if flag_race {
+ lno := lineno
+ lineno = src.NoXPos
+
+ if thearch.LinkArch.Arch.Family != sys.AMD64 {
+ fn.Func.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
+ fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
+ } else {
+
+ // nodpc is the PC of the caller as extracted by
+ // getcallerpc. We use -widthptr(FP) for x86.
+ // This only works for amd64. This will not
+ // work on arm or others that might support
+ // race in the future.
+ nodpc := nodfp.copy()
+ nodpc.Type = types.Types[TUINTPTR]
+ nodpc.Xoffset = int64(-Widthptr)
+ fn.Func.Dcl = append(fn.Func.Dcl, nodpc)
+ fn.Func.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
+ fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
+ }
+ lineno = lno
+ }
+}
diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go
new file mode 100644
index 0000000..1b4d765
--- /dev/null
+++ b/src/cmd/compile/internal/gc/range.go
@@ -0,0 +1,628 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/sys"
+ "unicode/utf8"
+)
+
+// range
+func typecheckrange(n *Node) {
+ // Typechecking order is important here:
+ // 0. first typecheck range expression (slice/map/chan),
+ // it is evaluated only once and so logically it is not part of the loop.
+ // 1. typecheck produced values,
+ // this part can declare new vars and so it must be typechecked before body,
+ // because body can contain a closure that captures the vars.
+ // 2. decldepth++ to denote loop body.
+ // 3. typecheck body.
+ // 4. decldepth--.
+ typecheckrangeExpr(n)
+
+ // second half of dance, the first half being typecheckrangeExpr
+ n.SetTypecheck(1)
+ ls := n.List.Slice()
+ for i1, n1 := range ls {
+ if n1.Typecheck() == 0 {
+ ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
+ }
+ }
+
+ decldepth++
+ typecheckslice(n.Nbody.Slice(), ctxStmt)
+ decldepth--
+}
+
+func typecheckrangeExpr(n *Node) {
+ n.Right = typecheck(n.Right, ctxExpr)
+
+ t := n.Right.Type
+ if t == nil {
+ return
+ }
+ // delicate little dance. see typecheckas2
+ ls := n.List.Slice()
+ for i1, n1 := range ls {
+ if n1.Name == nil || n1.Name.Defn != n {
+ ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
+ }
+ }
+
+ if t.IsPtr() && t.Elem().IsArray() {
+ t = t.Elem()
+ }
+ n.Type = t
+
+ var t1, t2 *types.Type
+ toomany := false
+ switch t.Etype {
+ default:
+ yyerrorl(n.Pos, "cannot range over %L", n.Right)
+ return
+
+ case TARRAY, TSLICE:
+ t1 = types.Types[TINT]
+ t2 = t.Elem()
+
+ case TMAP:
+ t1 = t.Key()
+ t2 = t.Elem()
+
+ case TCHAN:
+ if !t.ChanDir().CanRecv() {
+ yyerrorl(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type)
+ return
+ }
+
+ t1 = t.Elem()
+ t2 = nil
+ if n.List.Len() == 2 {
+ toomany = true
+ }
+
+ case TSTRING:
+ t1 = types.Types[TINT]
+ t2 = types.Runetype
+ }
+
+ if n.List.Len() > 2 || toomany {
+ yyerrorl(n.Pos, "too many variables in range")
+ }
+
+ var v1, v2 *Node
+ if n.List.Len() != 0 {
+ v1 = n.List.First()
+ }
+ if n.List.Len() > 1 {
+ v2 = n.List.Second()
+ }
+
+ // this is not only an optimization but also a requirement in the spec.
+ // "if the second iteration variable is the blank identifier, the range
+ // clause is equivalent to the same clause with only the first variable
+ // present."
+ if v2.isBlank() {
+ if v1 != nil {
+ n.List.Set1(v1)
+ }
+ v2 = nil
+ }
+
+ if v1 != nil {
+ if v1.Name != nil && v1.Name.Defn == n {
+ v1.Type = t1
+ } else if v1.Type != nil {
+ if op, why := assignop(t1, v1.Type); op == OXXX {
+ yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why)
+ }
+ }
+ checkassign(n, v1)
+ }
+
+ if v2 != nil {
+ if v2.Name != nil && v2.Name.Defn == n {
+ v2.Type = t2
+ } else if v2.Type != nil {
+ if op, why := assignop(t2, v2.Type); op == OXXX {
+ yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why)
+ }
+ }
+ checkassign(n, v2)
+ }
+}
+
+func cheapComputableIndex(width int64) bool {
+ switch thearch.LinkArch.Family {
+ // MIPS does not have R+R addressing
+ // Arm64 may lack ability to generate this code in our assembler,
+ // but the architecture supports it.
+ case sys.PPC64, sys.S390X:
+ return width == 1
+ case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
+ switch width {
+ case 1, 2, 4, 8:
+ return true
+ }
+ }
+ return false
+}
+
+// walkrange transforms various forms of ORANGE into
+// simpler forms. The result must be assigned back to n.
+// Node n may also be modified in place, and may also be
+// the returned node.
+func walkrange(n *Node) *Node {
+ if isMapClear(n) {
+ m := n.Right
+ lno := setlineno(m)
+ n = mapClear(m)
+ lineno = lno
+ return n
+ }
+
+ // variable name conventions:
+ // ohv1, hv1, hv2: hidden (old) val 1, 2
+ // ha, hit: hidden aggregate, iterator
+ // hn, hp: hidden len, pointer
+ // hb: hidden bool
+ // a, v1, v2: not hidden aggregate, val 1, 2
+
+ t := n.Type
+
+ a := n.Right
+ lno := setlineno(a)
+ n.Right = nil
+
+ var v1, v2 *Node
+ l := n.List.Len()
+ if l > 0 {
+ v1 = n.List.First()
+ }
+
+ if l > 1 {
+ v2 = n.List.Second()
+ }
+
+ if v2.isBlank() {
+ v2 = nil
+ }
+
+ if v1.isBlank() && v2 == nil {
+ v1 = nil
+ }
+
+ if v1 == nil && v2 != nil {
+ Fatalf("walkrange: v2 != nil while v1 == nil")
+ }
+
+ // n.List has no meaning anymore, clear it
+ // to avoid erroneous processing by racewalk.
+ n.List.Set(nil)
+
+ var ifGuard *Node
+
+ translatedLoopOp := OFOR
+
+ var body []*Node
+ var init []*Node
+ switch t.Etype {
+ default:
+ Fatalf("walkrange")
+
+ case TARRAY, TSLICE:
+ if arrayClear(n, v1, v2, a) {
+ lineno = lno
+ return n
+ }
+
+ // order.stmt arranged for a copy of the array/slice variable if needed.
+ ha := a
+
+ hv1 := temp(types.Types[TINT])
+ hn := temp(types.Types[TINT])
+
+ init = append(init, nod(OAS, hv1, nil))
+ init = append(init, nod(OAS, hn, nod(OLEN, ha, nil)))
+
+ n.Left = nod(OLT, hv1, hn)
+ n.Right = nod(OAS, hv1, nod(OADD, hv1, nodintconst(1)))
+
+ // for range ha { body }
+ if v1 == nil {
+ break
+ }
+
+ // for v1 := range ha { body }
+ if v2 == nil {
+ body = []*Node{nod(OAS, v1, hv1)}
+ break
+ }
+
+ // for v1, v2 := range ha { body }
+ if cheapComputableIndex(n.Type.Elem().Width) {
+ // v1, v2 = hv1, ha[hv1]
+ tmp := nod(OINDEX, ha, hv1)
+ tmp.SetBounded(true)
+ // Use OAS2 to correctly handle assignments
+ // of the form "v1, a[v1] := range".
+ a := nod(OAS2, nil, nil)
+ a.List.Set2(v1, v2)
+ a.Rlist.Set2(hv1, tmp)
+ body = []*Node{a}
+ break
+ }
+
+ // TODO(austin): OFORUNTIL is a strange beast, but is
+ // necessary for expressing the control flow we need
+ // while also making "break" and "continue" work. It
+ // would be nice to just lower ORANGE during SSA, but
+ // racewalk needs to see many of the operations
+ // involved in ORANGE's implementation. If racewalk
+ // moves into SSA, consider moving ORANGE into SSA and
+ // eliminating OFORUNTIL.
+
+ // TODO(austin): OFORUNTIL inhibits bounds-check
+ // elimination on the index variable (see #20711).
+ // Enhance the prove pass to understand this.
+ ifGuard = nod(OIF, nil, nil)
+ ifGuard.Left = nod(OLT, hv1, hn)
+ translatedLoopOp = OFORUNTIL
+
+ hp := temp(types.NewPtr(n.Type.Elem()))
+ tmp := nod(OINDEX, ha, nodintconst(0))
+ tmp.SetBounded(true)
+ init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil)))
+
+ // Use OAS2 to correctly handle assignments
+ // of the form "v1, a[v1] := range".
+ a := nod(OAS2, nil, nil)
+ a.List.Set2(v1, v2)
+ a.Rlist.Set2(hv1, nod(ODEREF, hp, nil))
+ body = append(body, a)
+
+ // Advance pointer as part of the late increment.
+ //
+ // This runs *after* the condition check, so we know
+ // advancing the pointer is safe and won't go past the
+ // end of the allocation.
+ a = nod(OAS, hp, addptr(hp, t.Elem().Width))
+ a = typecheck(a, ctxStmt)
+ n.List.Set1(a)
+
+ case TMAP:
+ // order.stmt allocated the iterator for us.
+ // we only use a once, so no copy needed.
+ ha := a
+
+ hit := prealloc[n]
+ th := hit.Type
+ n.Left = nil
+ keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
+ elemsym := th.Field(1).Sym // ditto
+
+ fn := syslook("mapiterinit")
+
+ fn = substArgTypes(fn, t.Key(), t.Elem(), th)
+ init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nod(OADDR, hit, nil)))
+ n.Left = nod(ONE, nodSym(ODOT, hit, keysym), nodnil())
+
+ fn = syslook("mapiternext")
+ fn = substArgTypes(fn, th)
+ n.Right = mkcall1(fn, nil, nil, nod(OADDR, hit, nil))
+
+ key := nodSym(ODOT, hit, keysym)
+ key = nod(ODEREF, key, nil)
+ if v1 == nil {
+ body = nil
+ } else if v2 == nil {
+ body = []*Node{nod(OAS, v1, key)}
+ } else {
+ elem := nodSym(ODOT, hit, elemsym)
+ elem = nod(ODEREF, elem, nil)
+ a := nod(OAS2, nil, nil)
+ a.List.Set2(v1, v2)
+ a.Rlist.Set2(key, elem)
+ body = []*Node{a}
+ }
+
+ case TCHAN:
+ // order.stmt arranged for a copy of the channel variable.
+ ha := a
+
+ n.Left = nil
+
+ hv1 := temp(t.Elem())
+ hv1.SetTypecheck(1)
+ if t.Elem().HasPointers() {
+ init = append(init, nod(OAS, hv1, nil))
+ }
+ hb := temp(types.Types[TBOOL])
+
+ n.Left = nod(ONE, hb, nodbool(false))
+ a := nod(OAS2RECV, nil, nil)
+ a.SetTypecheck(1)
+ a.List.Set2(hv1, hb)
+ a.Right = nod(ORECV, ha, nil)
+ n.Left.Ninit.Set1(a)
+ if v1 == nil {
+ body = nil
+ } else {
+ body = []*Node{nod(OAS, v1, hv1)}
+ }
+ // Zero hv1. This prevents hv1 from being the sole, inaccessible
+ // reference to an otherwise GC-able value during the next channel receive.
+ // See issue 15281.
+ body = append(body, nod(OAS, hv1, nil))
+
+ case TSTRING:
+ // Transform string range statements like "for v1, v2 = range a" into
+ //
+ // ha := a
+ // for hv1 := 0; hv1 < len(ha); {
+ // hv1t := hv1
+ // hv2 := rune(ha[hv1])
+ // if hv2 < utf8.RuneSelf {
+ // hv1++
+ // } else {
+ // hv2, hv1 = decoderune(ha, hv1)
+ // }
+ // v1, v2 = hv1t, hv2
+ // // original body
+ // }
+
+ // order.stmt arranged for a copy of the string variable.
+ ha := a
+
+ hv1 := temp(types.Types[TINT])
+ hv1t := temp(types.Types[TINT])
+ hv2 := temp(types.Runetype)
+
+ // hv1 := 0
+ init = append(init, nod(OAS, hv1, nil))
+
+ // hv1 < len(ha)
+ n.Left = nod(OLT, hv1, nod(OLEN, ha, nil))
+
+ if v1 != nil {
+ // hv1t = hv1
+ body = append(body, nod(OAS, hv1t, hv1))
+ }
+
+ // hv2 := rune(ha[hv1])
+ nind := nod(OINDEX, ha, hv1)
+ nind.SetBounded(true)
+ body = append(body, nod(OAS, hv2, conv(nind, types.Runetype)))
+
+ // if hv2 < utf8.RuneSelf
+ nif := nod(OIF, nil, nil)
+ nif.Left = nod(OLT, hv2, nodintconst(utf8.RuneSelf))
+
+ // hv1++
+ nif.Nbody.Set1(nod(OAS, hv1, nod(OADD, hv1, nodintconst(1))))
+
+ // } else {
+ eif := nod(OAS2, nil, nil)
+ nif.Rlist.Set1(eif)
+
+ // hv2, hv1 = decoderune(ha, hv1)
+ eif.List.Set2(hv2, hv1)
+ fn := syslook("decoderune")
+ eif.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, ha, hv1))
+
+ body = append(body, nif)
+
+ if v1 != nil {
+ if v2 != nil {
+ // v1, v2 = hv1t, hv2
+ a := nod(OAS2, nil, nil)
+ a.List.Set2(v1, v2)
+ a.Rlist.Set2(hv1t, hv2)
+ body = append(body, a)
+ } else {
+ // v1 = hv1t
+ body = append(body, nod(OAS, v1, hv1t))
+ }
+ }
+ }
+
+ n.Op = translatedLoopOp
+ typecheckslice(init, ctxStmt)
+
+ if ifGuard != nil {
+ ifGuard.Ninit.Append(init...)
+ ifGuard = typecheck(ifGuard, ctxStmt)
+ } else {
+ n.Ninit.Append(init...)
+ }
+
+ typecheckslice(n.Left.Ninit.Slice(), ctxStmt)
+
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Left = defaultlit(n.Left, nil)
+ n.Right = typecheck(n.Right, ctxStmt)
+ typecheckslice(body, ctxStmt)
+ n.Nbody.Prepend(body...)
+
+ if ifGuard != nil {
+ ifGuard.Nbody.Set1(n)
+ n = ifGuard
+ }
+
+ n = walkstmt(n)
+
+ lineno = lno
+ return n
+}
+
+// isMapClear checks if n is of the form:
+//
+// for k := range m {
+// delete(m, k)
+// }
+//
+// where == for keys of map m is reflexive.
+func isMapClear(n *Node) bool {
+ if Debug.N != 0 || instrumenting {
+ return false
+ }
+
+ if n.Op != ORANGE || n.Type.Etype != TMAP || n.List.Len() != 1 {
+ return false
+ }
+
+ k := n.List.First()
+ if k == nil || k.isBlank() {
+ return false
+ }
+
+ // Require k to be a new variable name.
+ if k.Name == nil || k.Name.Defn != n {
+ return false
+ }
+
+ if n.Nbody.Len() != 1 {
+ return false
+ }
+
+ stmt := n.Nbody.First() // only stmt in body
+ if stmt == nil || stmt.Op != ODELETE {
+ return false
+ }
+
+ m := n.Right
+ if !samesafeexpr(stmt.List.First(), m) || !samesafeexpr(stmt.List.Second(), k) {
+ return false
+ }
+
+ // Keys where equality is not reflexive can not be deleted from maps.
+ if !isreflexive(m.Type.Key()) {
+ return false
+ }
+
+ return true
+}
+
+// mapClear constructs a call to runtime.mapclear for the map m.
+func mapClear(m *Node) *Node {
+ t := m.Type
+
+ // instantiate mapclear(typ *type, hmap map[any]any)
+ fn := syslook("mapclear")
+ fn = substArgTypes(fn, t.Key(), t.Elem())
+ n := mkcall1(fn, nil, nil, typename(t), m)
+
+ n = typecheck(n, ctxStmt)
+ n = walkstmt(n)
+
+ return n
+}
+
+// Lower n into runtime·memclr if possible, for
+// fast zeroing of slices and arrays (issue 5373).
+// Look for instances of
+//
+// for i := range a {
+// a[i] = zero
+// }
+//
+// in which the evaluation of a is side-effect-free.
+//
+// Parameters are as in walkrange: "for v1, v2 = range a".
+func arrayClear(n, v1, v2, a *Node) bool {
+ if Debug.N != 0 || instrumenting {
+ return false
+ }
+
+ if v1 == nil || v2 != nil {
+ return false
+ }
+
+ if n.Nbody.Len() != 1 || n.Nbody.First() == nil {
+ return false
+ }
+
+ stmt := n.Nbody.First() // only stmt in body
+ if stmt.Op != OAS || stmt.Left.Op != OINDEX {
+ return false
+ }
+
+ if !samesafeexpr(stmt.Left.Left, a) || !samesafeexpr(stmt.Left.Right, v1) {
+ return false
+ }
+
+ elemsize := n.Type.Elem().Width
+ if elemsize <= 0 || !isZero(stmt.Right) {
+ return false
+ }
+
+ // Convert to
+ // if len(a) != 0 {
+ // hp = &a[0]
+ // hn = len(a)*sizeof(elem(a))
+ // memclr{NoHeap,Has}Pointers(hp, hn)
+ // i = len(a) - 1
+ // }
+ n.Op = OIF
+
+ n.Nbody.Set(nil)
+ n.Left = nod(ONE, nod(OLEN, a, nil), nodintconst(0))
+
+ // hp = &a[0]
+ hp := temp(types.Types[TUNSAFEPTR])
+
+ tmp := nod(OINDEX, a, nodintconst(0))
+ tmp.SetBounded(true)
+ tmp = nod(OADDR, tmp, nil)
+ tmp = convnop(tmp, types.Types[TUNSAFEPTR])
+ n.Nbody.Append(nod(OAS, hp, tmp))
+
+ // hn = len(a) * sizeof(elem(a))
+ hn := temp(types.Types[TUINTPTR])
+
+ tmp = nod(OLEN, a, nil)
+ tmp = nod(OMUL, tmp, nodintconst(elemsize))
+ tmp = conv(tmp, types.Types[TUINTPTR])
+ n.Nbody.Append(nod(OAS, hn, tmp))
+
+ var fn *Node
+ if a.Type.Elem().HasPointers() {
+ // memclrHasPointers(hp, hn)
+ Curfn.Func.setWBPos(stmt.Pos)
+ fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
+ } else {
+ // memclrNoHeapPointers(hp, hn)
+ fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
+ }
+
+ n.Nbody.Append(fn)
+
+ // i = len(a) - 1
+ v1 = nod(OAS, v1, nod(OSUB, nod(OLEN, a, nil), nodintconst(1)))
+
+ n.Nbody.Append(v1)
+
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Left = defaultlit(n.Left, nil)
+ typecheckslice(n.Nbody.Slice(), ctxStmt)
+ n = walkstmt(n)
+ return true
+}
+
+// addptr returns (*T)(uintptr(p) + n).
+func addptr(p *Node, n int64) *Node {
+ t := p.Type
+
+ p = nod(OCONVNOP, p, nil)
+ p.Type = types.Types[TUINTPTR]
+
+ p = nod(OADD, p, nodintconst(n))
+
+ p = nod(OCONVNOP, p, nil)
+ p.Type = t
+
+ return p
+}
diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go
new file mode 100644
index 0000000..9401eba
--- /dev/null
+++ b/src/cmd/compile/internal/gc/reflect.go
@@ -0,0 +1,1901 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/gcprog"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+)
+
+type itabEntry struct {
+ t, itype *types.Type
+ lsym *obj.LSym // symbol of the itab itself
+
+ // symbols of each method in
+ // the itab, sorted by byte offset;
+ // filled in by peekitabs
+ entries []*obj.LSym
+}
+
+type ptabEntry struct {
+ s *types.Sym
+ t *types.Type
+}
+
+// runtime interface and reflection data structures
+var (
+ signatmu sync.Mutex // protects signatset and signatslice
+ signatset = make(map[*types.Type]struct{})
+ signatslice []*types.Type
+
+ itabs []itabEntry
+ ptabs []ptabEntry
+)
+
+type Sig struct {
+ name *types.Sym
+ isym *types.Sym
+ tsym *types.Sym
+ type_ *types.Type
+ mtype *types.Type
+}
+
+// Builds a type representing a Bucket structure for
+// the given map type. This type is not visible to users -
+// we include only enough information to generate a correct GC
+// program for it.
+// Make sure this stays in sync with runtime/map.go.
+const (
+ BUCKETSIZE = 8
+ MAXKEYSIZE = 128
+ MAXELEMSIZE = 128
+)
+
+func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{})
+func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{})
+func commonSize() int { return 4*Widthptr + 8 + 8 } // Sizeof(runtime._type{})
+
+func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
+ if t.Sym == nil && len(methods(t)) == 0 {
+ return 0
+ }
+ return 4 + 2 + 2 + 4 + 4
+}
+
+func makefield(name string, t *types.Type) *types.Field {
+ f := types.NewField()
+ f.Type = t
+ f.Sym = (*types.Pkg)(nil).Lookup(name)
+ return f
+}
+
+// bmap makes the map bucket type given the type of the map.
+func bmap(t *types.Type) *types.Type {
+ if t.MapType().Bucket != nil {
+ return t.MapType().Bucket
+ }
+
+ bucket := types.New(TSTRUCT)
+ keytype := t.Key()
+ elemtype := t.Elem()
+ dowidth(keytype)
+ dowidth(elemtype)
+ if keytype.Width > MAXKEYSIZE {
+ keytype = types.NewPtr(keytype)
+ }
+ if elemtype.Width > MAXELEMSIZE {
+ elemtype = types.NewPtr(elemtype)
+ }
+
+ field := make([]*types.Field, 0, 5)
+
+ // The first field is: uint8 topbits[BUCKETSIZE].
+ arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE)
+ field = append(field, makefield("topbits", arr))
+
+ arr = types.NewArray(keytype, BUCKETSIZE)
+ arr.SetNoalg(true)
+ keys := makefield("keys", arr)
+ field = append(field, keys)
+
+ arr = types.NewArray(elemtype, BUCKETSIZE)
+ arr.SetNoalg(true)
+ elems := makefield("elems", arr)
+ field = append(field, elems)
+
+ // If keys and elems have no pointers, the map implementation
+ // can keep a list of overflow pointers on the side so that
+ // buckets can be marked as having no pointers.
+ // Arrange for the bucket to have no pointers by changing
+ // the type of the overflow field to uintptr in this case.
+ // See comment on hmap.overflow in runtime/map.go.
+ otyp := types.NewPtr(bucket)
+ if !elemtype.HasPointers() && !keytype.HasPointers() {
+ otyp = types.Types[TUINTPTR]
+ }
+ overflow := makefield("overflow", otyp)
+ field = append(field, overflow)
+
+ // link up fields
+ bucket.SetNoalg(true)
+ bucket.SetFields(field[:])
+ dowidth(bucket)
+
+ // Check invariants that map code depends on.
+ if !IsComparable(t.Key()) {
+ Fatalf("unsupported map key type for %v", t)
+ }
+ if BUCKETSIZE < 8 {
+ Fatalf("bucket size too small for proper alignment")
+ }
+ if keytype.Align > BUCKETSIZE {
+ Fatalf("key align too big for %v", t)
+ }
+ if elemtype.Align > BUCKETSIZE {
+ Fatalf("elem align too big for %v", t)
+ }
+ if keytype.Width > MAXKEYSIZE {
+ Fatalf("key size to large for %v", t)
+ }
+ if elemtype.Width > MAXELEMSIZE {
+ Fatalf("elem size to large for %v", t)
+ }
+ if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() {
+ Fatalf("key indirect incorrect for %v", t)
+ }
+ if t.Elem().Width > MAXELEMSIZE && !elemtype.IsPtr() {
+ Fatalf("elem indirect incorrect for %v", t)
+ }
+ if keytype.Width%int64(keytype.Align) != 0 {
+ Fatalf("key size not a multiple of key align for %v", t)
+ }
+ if elemtype.Width%int64(elemtype.Align) != 0 {
+ Fatalf("elem size not a multiple of elem align for %v", t)
+ }
+ if bucket.Align%keytype.Align != 0 {
+ Fatalf("bucket align not multiple of key align %v", t)
+ }
+ if bucket.Align%elemtype.Align != 0 {
+ Fatalf("bucket align not multiple of elem align %v", t)
+ }
+ if keys.Offset%int64(keytype.Align) != 0 {
+ Fatalf("bad alignment of keys in bmap for %v", t)
+ }
+ if elems.Offset%int64(elemtype.Align) != 0 {
+ Fatalf("bad alignment of elems in bmap for %v", t)
+ }
+
+ // Double-check that overflow field is final memory in struct,
+ // with no padding at end.
+ if overflow.Offset != bucket.Width-int64(Widthptr) {
+ Fatalf("bad offset of overflow in bmap for %v", t)
+ }
+
+ t.MapType().Bucket = bucket
+
+ bucket.StructType().Map = t
+ return bucket
+}
+
+// hmap builds a type representing a Hmap structure for the given map type.
+// Make sure this stays in sync with runtime/map.go.
+func hmap(t *types.Type) *types.Type {
+ if t.MapType().Hmap != nil {
+ return t.MapType().Hmap
+ }
+
+ bmap := bmap(t)
+
+ // build a struct:
+ // type hmap struct {
+ // count int
+ // flags uint8
+ // B uint8
+ // noverflow uint16
+ // hash0 uint32
+ // buckets *bmap
+ // oldbuckets *bmap
+ // nevacuate uintptr
+ // extra unsafe.Pointer // *mapextra
+ // }
+ // must match runtime/map.go:hmap.
+ fields := []*types.Field{
+ makefield("count", types.Types[TINT]),
+ makefield("flags", types.Types[TUINT8]),
+ makefield("B", types.Types[TUINT8]),
+ makefield("noverflow", types.Types[TUINT16]),
+ makefield("hash0", types.Types[TUINT32]), // Used in walk.go for OMAKEMAP.
+ makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP.
+ makefield("oldbuckets", types.NewPtr(bmap)),
+ makefield("nevacuate", types.Types[TUINTPTR]),
+ makefield("extra", types.Types[TUNSAFEPTR]),
+ }
+
+ hmap := types.New(TSTRUCT)
+ hmap.SetNoalg(true)
+ hmap.SetFields(fields)
+ dowidth(hmap)
+
+ // The size of hmap should be 48 bytes on 64 bit
+ // and 28 bytes on 32 bit platforms.
+ if size := int64(8 + 5*Widthptr); hmap.Width != size {
+ Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size)
+ }
+
+ t.MapType().Hmap = hmap
+ hmap.StructType().Map = t
+ return hmap
+}
+
+// hiter builds a type representing an Hiter structure for the given map type.
+// Make sure this stays in sync with runtime/map.go.
+func hiter(t *types.Type) *types.Type {
+ if t.MapType().Hiter != nil {
+ return t.MapType().Hiter
+ }
+
+ hmap := hmap(t)
+ bmap := bmap(t)
+
+ // build a struct:
+ // type hiter struct {
+ // key *Key
+ // elem *Elem
+ // t unsafe.Pointer // *MapType
+ // h *hmap
+ // buckets *bmap
+ // bptr *bmap
+ // overflow unsafe.Pointer // *[]*bmap
+ // oldoverflow unsafe.Pointer // *[]*bmap
+ // startBucket uintptr
+ // offset uint8
+ // wrapped bool
+ // B uint8
+ // i uint8
+ // bucket uintptr
+ // checkBucket uintptr
+ // }
+ // must match runtime/map.go:hiter.
+ fields := []*types.Field{
+ makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP.
+ makefield("elem", types.NewPtr(t.Elem())), // Used in range.go for TMAP.
+ makefield("t", types.Types[TUNSAFEPTR]),
+ makefield("h", types.NewPtr(hmap)),
+ makefield("buckets", types.NewPtr(bmap)),
+ makefield("bptr", types.NewPtr(bmap)),
+ makefield("overflow", types.Types[TUNSAFEPTR]),
+ makefield("oldoverflow", types.Types[TUNSAFEPTR]),
+ makefield("startBucket", types.Types[TUINTPTR]),
+ makefield("offset", types.Types[TUINT8]),
+ makefield("wrapped", types.Types[TBOOL]),
+ makefield("B", types.Types[TUINT8]),
+ makefield("i", types.Types[TUINT8]),
+ makefield("bucket", types.Types[TUINTPTR]),
+ makefield("checkBucket", types.Types[TUINTPTR]),
+ }
+
+ // build iterator struct holding the above fields
+ hiter := types.New(TSTRUCT)
+ hiter.SetNoalg(true)
+ hiter.SetFields(fields)
+ dowidth(hiter)
+ if hiter.Width != int64(12*Widthptr) {
+ Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr)
+ }
+ t.MapType().Hiter = hiter
+ hiter.StructType().Map = t
+ return hiter
+}
+
+// deferstruct makes a runtime._defer structure, with additional space for
+// stksize bytes of args.
+func deferstruct(stksize int64) *types.Type {
+ makefield := func(name string, typ *types.Type) *types.Field {
+ f := types.NewField()
+ f.Type = typ
+ // Unlike the global makefield function, this one needs to set Pkg
+ // because these types might be compared (in SSA CSE sorting).
+ // TODO: unify this makefield and the global one above.
+ f.Sym = &types.Sym{Name: name, Pkg: localpkg}
+ return f
+ }
+ argtype := types.NewArray(types.Types[TUINT8], stksize)
+ argtype.Width = stksize
+ argtype.Align = 1
+ // These fields must match the ones in runtime/runtime2.go:_defer and
+ // cmd/compile/internal/gc/ssa.go:(*state).call.
+ fields := []*types.Field{
+ makefield("siz", types.Types[TUINT32]),
+ makefield("started", types.Types[TBOOL]),
+ makefield("heap", types.Types[TBOOL]),
+ makefield("openDefer", types.Types[TBOOL]),
+ makefield("sp", types.Types[TUINTPTR]),
+ makefield("pc", types.Types[TUINTPTR]),
+ // Note: the types here don't really matter. Defer structures
+ // are always scanned explicitly during stack copying and GC,
+ // so we make them uintptr type even though they are real pointers.
+ makefield("fn", types.Types[TUINTPTR]),
+ makefield("_panic", types.Types[TUINTPTR]),
+ makefield("link", types.Types[TUINTPTR]),
+ makefield("framepc", types.Types[TUINTPTR]),
+ makefield("varp", types.Types[TUINTPTR]),
+ makefield("fd", types.Types[TUINTPTR]),
+ makefield("args", argtype),
+ }
+
+ // build struct holding the above fields
+ s := types.New(TSTRUCT)
+ s.SetNoalg(true)
+ s.SetFields(fields)
+ s.Width = widstruct(s, s, 0, 1)
+ s.Align = uint8(Widthptr)
+ return s
+}
+
+// f is method type, with receiver.
+// return function type, receiver as first argument (or not).
+func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
+ inLen := f.Params().Fields().Len()
+ if receiver != nil {
+ inLen++
+ }
+ in := make([]*Node, 0, inLen)
+
+ if receiver != nil {
+ d := anonfield(receiver)
+ in = append(in, d)
+ }
+
+ for _, t := range f.Params().Fields().Slice() {
+ d := anonfield(t.Type)
+ d.SetIsDDD(t.IsDDD())
+ in = append(in, d)
+ }
+
+ outLen := f.Results().Fields().Len()
+ out := make([]*Node, 0, outLen)
+ for _, t := range f.Results().Fields().Slice() {
+ d := anonfield(t.Type)
+ out = append(out, d)
+ }
+
+ t := functype(nil, in, out)
+ if f.Nname() != nil {
+ // Link to name of original method function.
+ t.SetNname(f.Nname())
+ }
+
+ return t
+}
+
+// methods returns the methods of the non-interface type t, sorted by name.
+// Generates stub functions as needed.
+func methods(t *types.Type) []*Sig {
+ // method type
+ mt := methtype(t)
+
+ if mt == nil {
+ return nil
+ }
+ expandmeth(mt)
+
+ // type stored in interface word
+ it := t
+
+ if !isdirectiface(it) {
+ it = types.NewPtr(t)
+ }
+
+ // make list of methods for t,
+ // generating code if necessary.
+ var ms []*Sig
+ for _, f := range mt.AllMethods().Slice() {
+ if !f.IsMethod() {
+ Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
+ }
+ if f.Type.Recv() == nil {
+ Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f)
+ }
+ if f.Nointerface() {
+ continue
+ }
+
+ method := f.Sym
+ if method == nil {
+ break
+ }
+
+ // get receiver type for this particular method.
+ // if pointer receiver but non-pointer t and
+ // this is not an embedded pointer inside a struct,
+ // method does not apply.
+ if !isMethodApplicable(t, f) {
+ continue
+ }
+
+ sig := &Sig{
+ name: method,
+ isym: methodSym(it, method),
+ tsym: methodSym(t, method),
+ type_: methodfunc(f.Type, t),
+ mtype: methodfunc(f.Type, nil),
+ }
+ ms = append(ms, sig)
+
+ this := f.Type.Recv().Type
+
+ if !sig.isym.Siggen() {
+ sig.isym.SetSiggen(true)
+ if !types.Identical(this, it) {
+ genwrapper(it, f, sig.isym)
+ }
+ }
+
+ if !sig.tsym.Siggen() {
+ sig.tsym.SetSiggen(true)
+ if !types.Identical(this, t) {
+ genwrapper(t, f, sig.tsym)
+ }
+ }
+ }
+
+ return ms
+}
+
+// imethods returns the methods of the interface type t, sorted by name.
+func imethods(t *types.Type) []*Sig {
+ var methods []*Sig
+ for _, f := range t.Fields().Slice() {
+ if f.Type.Etype != TFUNC || f.Sym == nil {
+ continue
+ }
+ if f.Sym.IsBlank() {
+ Fatalf("unexpected blank symbol in interface method set")
+ }
+ if n := len(methods); n > 0 {
+ last := methods[n-1]
+ if !last.name.Less(f.Sym) {
+ Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym)
+ }
+ }
+
+ sig := &Sig{
+ name: f.Sym,
+ mtype: f.Type,
+ type_: methodfunc(f.Type, nil),
+ }
+ methods = append(methods, sig)
+
+ // NOTE(rsc): Perhaps an oversight that
+ // IfaceType.Method is not in the reflect data.
+ // Generate the method body, so that compiled
+ // code can refer to it.
+ isym := methodSym(t, f.Sym)
+ if !isym.Siggen() {
+ isym.SetSiggen(true)
+ genwrapper(t, f, isym)
+ }
+ }
+
+ return methods
+}
+
+func dimportpath(p *types.Pkg) {
+ if p.Pathsym != nil {
+ return
+ }
+
+ // If we are compiling the runtime package, there are two runtime packages around
+ // -- localpkg and Runtimepkg. We don't want to produce import path symbols for
+ // both of them, so just produce one for localpkg.
+ if myimportpath == "runtime" && p == Runtimepkg {
+ return
+ }
+
+ str := p.Path
+ if p == localpkg {
+ // Note: myimportpath != "", or else dgopkgpath won't call dimportpath.
+ str = myimportpath
+ }
+
+ s := Ctxt.Lookup("type..importpath." + p.Prefix + ".")
+ ot := dnameData(s, 0, str, "", nil, false)
+ ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
+ s.Set(obj.AttrContentAddressable, true)
+ p.Pathsym = s
+}
+
+func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int {
+ if pkg == nil {
+ return duintptr(s, ot, 0)
+ }
+
+ if pkg == localpkg && myimportpath == "" {
+ // If we don't know the full import path of the package being compiled
+ // (i.e. -p was not passed on the compiler command line), emit a reference to
+ // type..importpath.""., which the linker will rewrite using the correct import path.
+ // Every package that imports this one directly defines the symbol.
+ // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
+ ns := Ctxt.Lookup(`type..importpath."".`)
+ return dsymptr(s, ot, ns, 0)
+ }
+
+ dimportpath(pkg)
+ return dsymptr(s, ot, pkg.Pathsym, 0)
+}
+
+// dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol.
+func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
+ if pkg == nil {
+ return duint32(s, ot, 0)
+ }
+ if pkg == localpkg && myimportpath == "" {
+ // If we don't know the full import path of the package being compiled
+ // (i.e. -p was not passed on the compiler command line), emit a reference to
+ // type..importpath.""., which the linker will rewrite using the correct import path.
+ // Every package that imports this one directly defines the symbol.
+ // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
+ ns := Ctxt.Lookup(`type..importpath."".`)
+ return dsymptrOff(s, ot, ns)
+ }
+
+ dimportpath(pkg)
+ return dsymptrOff(s, ot, pkg.Pathsym)
+}
+
+// dnameField dumps a reflect.name for a struct field.
+func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
+ if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
+ Fatalf("package mismatch for %v", ft.Sym)
+ }
+ nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
+ return dsymptr(lsym, ot, nsym, 0)
+}
+
+// dnameData writes the contents of a reflect.name into s at offset ot.
+func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int {
+ if len(name) > 1<<16-1 {
+ Fatalf("name too long: %s", name)
+ }
+ if len(tag) > 1<<16-1 {
+ Fatalf("tag too long: %s", tag)
+ }
+
+ // Encode name and tag. See reflect/type.go for details.
+ var bits byte
+ l := 1 + 2 + len(name)
+ if exported {
+ bits |= 1 << 0
+ }
+ if len(tag) > 0 {
+ l += 2 + len(tag)
+ bits |= 1 << 1
+ }
+ if pkg != nil {
+ bits |= 1 << 2
+ }
+ b := make([]byte, l)
+ b[0] = bits
+ b[1] = uint8(len(name) >> 8)
+ b[2] = uint8(len(name))
+ copy(b[3:], name)
+ if len(tag) > 0 {
+ tb := b[3+len(name):]
+ tb[0] = uint8(len(tag) >> 8)
+ tb[1] = uint8(len(tag))
+ copy(tb[2:], tag)
+ }
+
+ ot = int(s.WriteBytes(Ctxt, int64(ot), b))
+
+ if pkg != nil {
+ ot = dgopkgpathOff(s, ot, pkg)
+ }
+
+ return ot
+}
+
+var dnameCount int
+
+// dname creates a reflect.name for a struct field or method.
+func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym {
+ // Write out data as "type.." to signal two things to the
+ // linker, first that when dynamically linking, the symbol
+ // should be moved to a relro section, and second that the
+ // contents should not be decoded as a type.
+ sname := "type..namedata."
+ if pkg == nil {
+ // In the common case, share data with other packages.
+ if name == "" {
+ if exported {
+ sname += "-noname-exported." + tag
+ } else {
+ sname += "-noname-unexported." + tag
+ }
+ } else {
+ if exported {
+ sname += name + "." + tag
+ } else {
+ sname += name + "-" + tag
+ }
+ }
+ } else {
+ sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount)
+ dnameCount++
+ }
+ s := Ctxt.Lookup(sname)
+ if len(s.P) > 0 {
+ return s
+ }
+ ot := dnameData(s, 0, name, tag, pkg, exported)
+ ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
+ s.Set(obj.AttrContentAddressable, true)
+ return s
+}
+
+// dextratype dumps the fields of a runtime.uncommontype.
+// dataAdd is the offset in bytes after the header where the
+// backing array of the []method field is written (by dextratypeData).
+func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
+ m := methods(t)
+ if t.Sym == nil && len(m) == 0 {
+ return ot
+ }
+ noff := int(Rnd(int64(ot), int64(Widthptr)))
+ if noff != ot {
+ Fatalf("unexpected alignment in dextratype for %v", t)
+ }
+
+ for _, a := range m {
+ dtypesym(a.type_)
+ }
+
+ ot = dgopkgpathOff(lsym, ot, typePkg(t))
+
+ dataAdd += uncommonSize(t)
+ mcount := len(m)
+ if mcount != int(uint16(mcount)) {
+ Fatalf("too many methods on %v: %d", t, mcount)
+ }
+ xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) })
+ if dataAdd != int(uint32(dataAdd)) {
+ Fatalf("methods are too far away on %v: %d", t, dataAdd)
+ }
+
+ ot = duint16(lsym, ot, uint16(mcount))
+ ot = duint16(lsym, ot, uint16(xcount))
+ ot = duint32(lsym, ot, uint32(dataAdd))
+ ot = duint32(lsym, ot, 0)
+ return ot
+}
+
+func typePkg(t *types.Type) *types.Pkg {
+ tsym := t.Sym
+ if tsym == nil {
+ switch t.Etype {
+ case TARRAY, TSLICE, TPTR, TCHAN:
+ if t.Elem() != nil {
+ tsym = t.Elem().Sym
+ }
+ }
+ }
+ if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype {
+ return tsym.Pkg
+ }
+ return nil
+}
+
+// dextratypeData dumps the backing array for the []method field of
+// runtime.uncommontype.
+func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
+ for _, a := range methods(t) {
+ // ../../../../runtime/type.go:/method
+ exported := types.IsExported(a.name.Name)
+ var pkg *types.Pkg
+ if !exported && a.name.Pkg != typePkg(t) {
+ pkg = a.name.Pkg
+ }
+ nsym := dname(a.name.Name, "", pkg, exported)
+
+ ot = dsymptrOff(lsym, ot, nsym)
+ ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype))
+ ot = dmethodptrOff(lsym, ot, a.isym.Linksym())
+ ot = dmethodptrOff(lsym, ot, a.tsym.Linksym())
+ }
+ return ot
+}
+
+func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int {
+ duint32(s, ot, 0)
+ r := obj.Addrel(s)
+ r.Off = int32(ot)
+ r.Siz = 4
+ r.Sym = x
+ r.Type = objabi.R_METHODOFF
+ return ot + 4
+}
+
+var kinds = []int{
+ TINT: objabi.KindInt,
+ TUINT: objabi.KindUint,
+ TINT8: objabi.KindInt8,
+ TUINT8: objabi.KindUint8,
+ TINT16: objabi.KindInt16,
+ TUINT16: objabi.KindUint16,
+ TINT32: objabi.KindInt32,
+ TUINT32: objabi.KindUint32,
+ TINT64: objabi.KindInt64,
+ TUINT64: objabi.KindUint64,
+ TUINTPTR: objabi.KindUintptr,
+ TFLOAT32: objabi.KindFloat32,
+ TFLOAT64: objabi.KindFloat64,
+ TBOOL: objabi.KindBool,
+ TSTRING: objabi.KindString,
+ TPTR: objabi.KindPtr,
+ TSTRUCT: objabi.KindStruct,
+ TINTER: objabi.KindInterface,
+ TCHAN: objabi.KindChan,
+ TMAP: objabi.KindMap,
+ TARRAY: objabi.KindArray,
+ TSLICE: objabi.KindSlice,
+ TFUNC: objabi.KindFunc,
+ TCOMPLEX64: objabi.KindComplex64,
+ TCOMPLEX128: objabi.KindComplex128,
+ TUNSAFEPTR: objabi.KindUnsafePointer,
+}
+
+// typeptrdata returns the length in bytes of the prefix of t
+// containing pointer data. Anything after this offset is scalar data.
+func typeptrdata(t *types.Type) int64 {
+ if !t.HasPointers() {
+ return 0
+ }
+
+ switch t.Etype {
+ case TPTR,
+ TUNSAFEPTR,
+ TFUNC,
+ TCHAN,
+ TMAP:
+ return int64(Widthptr)
+
+ case TSTRING:
+ // struct { byte *str; intgo len; }
+ return int64(Widthptr)
+
+ case TINTER:
+ // struct { Itab *tab; void *data; } or
+ // struct { Type *type; void *data; }
+ // Note: see comment in plive.go:onebitwalktype1.
+ return 2 * int64(Widthptr)
+
+ case TSLICE:
+ // struct { byte *array; uintgo len; uintgo cap; }
+ return int64(Widthptr)
+
+ case TARRAY:
+ // haspointers already eliminated t.NumElem() == 0.
+ return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem())
+
+ case TSTRUCT:
+ // Find the last field that has pointers.
+ var lastPtrField *types.Field
+ for _, t1 := range t.Fields().Slice() {
+ if t1.Type.HasPointers() {
+ lastPtrField = t1
+ }
+ }
+ return lastPtrField.Offset + typeptrdata(lastPtrField.Type)
+
+ default:
+ Fatalf("typeptrdata: unexpected type, %v", t)
+ return 0
+ }
+}
+
+// tflag is documented in reflect/type.go.
+//
+// tflag values must be kept in sync with copies in:
+// cmd/compile/internal/gc/reflect.go
+// cmd/link/internal/ld/decodesym.go
+// reflect/type.go
+// runtime/type.go
+const (
+ tflagUncommon = 1 << 0
+ tflagExtraStar = 1 << 1
+ tflagNamed = 1 << 2
+ tflagRegularMemory = 1 << 3
+)
+
+var (
+ memhashvarlen *obj.LSym
+ memequalvarlen *obj.LSym
+)
+
+// dcommontype dumps the contents of a reflect.rtype (runtime._type).
+func dcommontype(lsym *obj.LSym, t *types.Type) int {
+ dowidth(t)
+ eqfunc := geneq(t)
+
+ sptrWeak := true
+ var sptr *obj.LSym
+ if !t.IsPtr() || t.IsPtrElem() {
+ tptr := types.NewPtr(t)
+ if t.Sym != nil || methods(tptr) != nil {
+ sptrWeak = false
+ }
+ sptr = dtypesym(tptr)
+ }
+
+ gcsym, useGCProg, ptrdata := dgcsym(t)
+
+ // ../../../../reflect/type.go:/^type.rtype
+ // actual type structure
+ // type rtype struct {
+ // size uintptr
+ // ptrdata uintptr
+ // hash uint32
+ // tflag tflag
+ // align uint8
+ // fieldAlign uint8
+ // kind uint8
+ // equal func(unsafe.Pointer, unsafe.Pointer) bool
+ // gcdata *byte
+ // str nameOff
+ // ptrToThis typeOff
+ // }
+ ot := 0
+ ot = duintptr(lsym, ot, uint64(t.Width))
+ ot = duintptr(lsym, ot, uint64(ptrdata))
+ ot = duint32(lsym, ot, typehash(t))
+
+ var tflag uint8
+ if uncommonSize(t) != 0 {
+ tflag |= tflagUncommon
+ }
+ if t.Sym != nil && t.Sym.Name != "" {
+ tflag |= tflagNamed
+ }
+ if IsRegularMemory(t) {
+ tflag |= tflagRegularMemory
+ }
+
+ exported := false
+ p := t.LongString()
+ // If we're writing out type T,
+ // we are very likely to write out type *T as well.
+ // Use the string "*T"[1:] for "T", so that the two
+ // share storage. This is a cheap way to reduce the
+ // amount of space taken up by reflect strings.
+ if !strings.HasPrefix(p, "*") {
+ p = "*" + p
+ tflag |= tflagExtraStar
+ if t.Sym != nil {
+ exported = types.IsExported(t.Sym.Name)
+ }
+ } else {
+ if t.Elem() != nil && t.Elem().Sym != nil {
+ exported = types.IsExported(t.Elem().Sym.Name)
+ }
+ }
+
+ ot = duint8(lsym, ot, tflag)
+
+ // runtime (and common sense) expects alignment to be a power of two.
+ i := int(t.Align)
+
+ if i == 0 {
+ i = 1
+ }
+ if i&(i-1) != 0 {
+ Fatalf("invalid alignment %d for %v", t.Align, t)
+ }
+ ot = duint8(lsym, ot, t.Align) // align
+ ot = duint8(lsym, ot, t.Align) // fieldAlign
+
+ i = kinds[t.Etype]
+ if isdirectiface(t) {
+ i |= objabi.KindDirectIface
+ }
+ if useGCProg {
+ i |= objabi.KindGCProg
+ }
+ ot = duint8(lsym, ot, uint8(i)) // kind
+ if eqfunc != nil {
+ ot = dsymptr(lsym, ot, eqfunc, 0) // equality function
+ } else {
+ ot = duintptr(lsym, ot, 0) // type we can't do == with
+ }
+ ot = dsymptr(lsym, ot, gcsym, 0) // gcdata
+
+ nsym := dname(p, "", nil, exported)
+ ot = dsymptrOff(lsym, ot, nsym) // str
+ // ptrToThis
+ if sptr == nil {
+ ot = duint32(lsym, ot, 0)
+ } else if sptrWeak {
+ ot = dsymptrWeakOff(lsym, ot, sptr)
+ } else {
+ ot = dsymptrOff(lsym, ot, sptr)
+ }
+
+ return ot
+}
+
+// typeHasNoAlg reports whether t does not have any associated hash/eq
+// algorithms because t, or some component of t, is marked Noalg.
+func typeHasNoAlg(t *types.Type) bool {
+ a, bad := algtype1(t)
+ return a == ANOEQ && bad.Noalg()
+}
+
+func typesymname(t *types.Type) string {
+ name := t.ShortString()
+ // Use a separate symbol name for Noalg types for #17752.
+ if typeHasNoAlg(t) {
+ name = "noalg." + name
+ }
+ return name
+}
+
+// Fake package for runtime type info (headers)
+// Don't access directly, use typeLookup below.
+var (
+ typepkgmu sync.Mutex // protects typepkg lookups
+ typepkg = types.NewPkg("type", "type")
+)
+
+func typeLookup(name string) *types.Sym {
+ typepkgmu.Lock()
+ s := typepkg.Lookup(name)
+ typepkgmu.Unlock()
+ return s
+}
+
+func typesym(t *types.Type) *types.Sym {
+ return typeLookup(typesymname(t))
+}
+
+// tracksym returns the symbol for tracking use of field/method f, assumed
+// to be a member of struct/interface type t.
+func tracksym(t *types.Type, f *types.Field) *types.Sym {
+ return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name)
+}
+
+func typesymprefix(prefix string, t *types.Type) *types.Sym {
+ p := prefix + "." + t.ShortString()
+ s := typeLookup(p)
+
+ // This function is for looking up type-related generated functions
+ // (e.g. eq and hash). Make sure they are indeed generated.
+ signatmu.Lock()
+ addsignat(t)
+ signatmu.Unlock()
+
+ //print("algsym: %s -> %+S\n", p, s);
+
+ return s
+}
+
+func typenamesym(t *types.Type) *types.Sym {
+ if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
+ Fatalf("typenamesym %v", t)
+ }
+ s := typesym(t)
+ signatmu.Lock()
+ addsignat(t)
+ signatmu.Unlock()
+ return s
+}
+
+func typename(t *types.Type) *Node {
+ s := typenamesym(t)
+ if s.Def == nil {
+ n := newnamel(src.NoXPos, s)
+ n.Type = types.Types[TUINT8]
+ n.SetClass(PEXTERN)
+ n.SetTypecheck(1)
+ s.Def = asTypesNode(n)
+ }
+
+ n := nod(OADDR, asNode(s.Def), nil)
+ n.Type = types.NewPtr(asNode(s.Def).Type)
+ n.SetTypecheck(1)
+ return n
+}
+
+func itabname(t, itype *types.Type) *Node {
+ if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
+ Fatalf("itabname(%v, %v)", t, itype)
+ }
+ s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString())
+ if s.Def == nil {
+ n := newname(s)
+ n.Type = types.Types[TUINT8]
+ n.SetClass(PEXTERN)
+ n.SetTypecheck(1)
+ s.Def = asTypesNode(n)
+ itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
+ }
+
+ n := nod(OADDR, asNode(s.Def), nil)
+ n.Type = types.NewPtr(asNode(s.Def).Type)
+ n.SetTypecheck(1)
+ return n
+}
+
+// isreflexive reports whether t has a reflexive equality operator.
+// That is, if x==x for all x of type t.
+func isreflexive(t *types.Type) bool {
+ switch t.Etype {
+ case TBOOL,
+ TINT,
+ TUINT,
+ TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TUINTPTR,
+ TPTR,
+ TUNSAFEPTR,
+ TSTRING,
+ TCHAN:
+ return true
+
+ case TFLOAT32,
+ TFLOAT64,
+ TCOMPLEX64,
+ TCOMPLEX128,
+ TINTER:
+ return false
+
+ case TARRAY:
+ return isreflexive(t.Elem())
+
+ case TSTRUCT:
+ for _, t1 := range t.Fields().Slice() {
+ if !isreflexive(t1.Type) {
+ return false
+ }
+ }
+ return true
+
+ default:
+ Fatalf("bad type for map key: %v", t)
+ return false
+ }
+}
+
+// needkeyupdate reports whether map updates with t as a key
+// need the key to be updated.
+func needkeyupdate(t *types.Type) bool {
+ switch t.Etype {
+ case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32,
+ TINT64, TUINT64, TUINTPTR, TPTR, TUNSAFEPTR, TCHAN:
+ return false
+
+ case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0
+ TINTER,
+ TSTRING: // strings might have smaller backing stores
+ return true
+
+ case TARRAY:
+ return needkeyupdate(t.Elem())
+
+ case TSTRUCT:
+ for _, t1 := range t.Fields().Slice() {
+ if needkeyupdate(t1.Type) {
+ return true
+ }
+ }
+ return false
+
+ default:
+ Fatalf("bad type for map key: %v", t)
+ return true
+ }
+}
+
+// hashMightPanic reports whether the hash of a map key of type t might panic.
+func hashMightPanic(t *types.Type) bool {
+ switch t.Etype {
+ case TINTER:
+ return true
+
+ case TARRAY:
+ return hashMightPanic(t.Elem())
+
+ case TSTRUCT:
+ for _, t1 := range t.Fields().Slice() {
+ if hashMightPanic(t1.Type) {
+ return true
+ }
+ }
+ return false
+
+ default:
+ return false
+ }
+}
+
+// formalType replaces byte and rune aliases with real types.
+// They've been separate internally to make error messages
+// better, but we have to merge them in the reflect tables.
+func formalType(t *types.Type) *types.Type {
+ if t == types.Bytetype || t == types.Runetype {
+ return types.Types[t.Etype]
+ }
+ return t
+}
+
+func dtypesym(t *types.Type) *obj.LSym {
+ t = formalType(t)
+ if t.IsUntyped() {
+ Fatalf("dtypesym %v", t)
+ }
+
+ s := typesym(t)
+ lsym := s.Linksym()
+ if s.Siggen() {
+ return lsym
+ }
+ s.SetSiggen(true)
+
+ // special case (look for runtime below):
+ // when compiling package runtime,
+ // emit the type structures for int, float, etc.
+ tbase := t
+
+ if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil {
+ tbase = t.Elem()
+ }
+ dupok := 0
+ if tbase.Sym == nil {
+ dupok = obj.DUPOK
+ }
+
+ if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc
+ // named types from other files are defined only by those files
+ if tbase.Sym != nil && tbase.Sym.Pkg != localpkg {
+ if i, ok := typeSymIdx[tbase]; ok {
+ lsym.Pkg = tbase.Sym.Pkg.Prefix
+ if t != tbase {
+ lsym.SymIdx = int32(i[1])
+ } else {
+ lsym.SymIdx = int32(i[0])
+ }
+ lsym.Set(obj.AttrIndexed, true)
+ }
+ return lsym
+ }
+ // TODO(mdempsky): Investigate whether this can happen.
+ if tbase.Etype == TFORW {
+ return lsym
+ }
+ }
+
+ ot := 0
+ switch t.Etype {
+ default:
+ ot = dcommontype(lsym, t)
+ ot = dextratype(lsym, ot, t, 0)
+
+ case TARRAY:
+ // ../../../../runtime/type.go:/arrayType
+ s1 := dtypesym(t.Elem())
+ t2 := types.NewSlice(t.Elem())
+ s2 := dtypesym(t2)
+ ot = dcommontype(lsym, t)
+ ot = dsymptr(lsym, ot, s1, 0)
+ ot = dsymptr(lsym, ot, s2, 0)
+ ot = duintptr(lsym, ot, uint64(t.NumElem()))
+ ot = dextratype(lsym, ot, t, 0)
+
+ case TSLICE:
+ // ../../../../runtime/type.go:/sliceType
+ s1 := dtypesym(t.Elem())
+ ot = dcommontype(lsym, t)
+ ot = dsymptr(lsym, ot, s1, 0)
+ ot = dextratype(lsym, ot, t, 0)
+
+ case TCHAN:
+ // ../../../../runtime/type.go:/chanType
+ s1 := dtypesym(t.Elem())
+ ot = dcommontype(lsym, t)
+ ot = dsymptr(lsym, ot, s1, 0)
+ ot = duintptr(lsym, ot, uint64(t.ChanDir()))
+ ot = dextratype(lsym, ot, t, 0)
+
+ case TFUNC:
+ for _, t1 := range t.Recvs().Fields().Slice() {
+ dtypesym(t1.Type)
+ }
+ isddd := false
+ for _, t1 := range t.Params().Fields().Slice() {
+ isddd = t1.IsDDD()
+ dtypesym(t1.Type)
+ }
+ for _, t1 := range t.Results().Fields().Slice() {
+ dtypesym(t1.Type)
+ }
+
+ ot = dcommontype(lsym, t)
+ inCount := t.NumRecvs() + t.NumParams()
+ outCount := t.NumResults()
+ if isddd {
+ outCount |= 1 << 15
+ }
+ ot = duint16(lsym, ot, uint16(inCount))
+ ot = duint16(lsym, ot, uint16(outCount))
+ if Widthptr == 8 {
+ ot += 4 // align for *rtype
+ }
+
+ dataAdd := (inCount + t.NumResults()) * Widthptr
+ ot = dextratype(lsym, ot, t, dataAdd)
+
+ // Array of rtype pointers follows funcType.
+ for _, t1 := range t.Recvs().Fields().Slice() {
+ ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
+ }
+ for _, t1 := range t.Params().Fields().Slice() {
+ ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
+ }
+ for _, t1 := range t.Results().Fields().Slice() {
+ ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
+ }
+
+ case TINTER:
+ m := imethods(t)
+ n := len(m)
+ for _, a := range m {
+ dtypesym(a.type_)
+ }
+
+ // ../../../../runtime/type.go:/interfaceType
+ ot = dcommontype(lsym, t)
+
+ var tpkg *types.Pkg
+ if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype {
+ tpkg = t.Sym.Pkg
+ }
+ ot = dgopkgpath(lsym, ot, tpkg)
+
+ ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
+ ot = duintptr(lsym, ot, uint64(n))
+ ot = duintptr(lsym, ot, uint64(n))
+ dataAdd := imethodSize() * n
+ ot = dextratype(lsym, ot, t, dataAdd)
+
+ for _, a := range m {
+ // ../../../../runtime/type.go:/imethod
+ exported := types.IsExported(a.name.Name)
+ var pkg *types.Pkg
+ if !exported && a.name.Pkg != tpkg {
+ pkg = a.name.Pkg
+ }
+ nsym := dname(a.name.Name, "", pkg, exported)
+
+ ot = dsymptrOff(lsym, ot, nsym)
+ ot = dsymptrOff(lsym, ot, dtypesym(a.type_))
+ }
+
+ // ../../../../runtime/type.go:/mapType
+ case TMAP:
+ s1 := dtypesym(t.Key())
+ s2 := dtypesym(t.Elem())
+ s3 := dtypesym(bmap(t))
+ hasher := genhash(t.Key())
+
+ ot = dcommontype(lsym, t)
+ ot = dsymptr(lsym, ot, s1, 0)
+ ot = dsymptr(lsym, ot, s2, 0)
+ ot = dsymptr(lsym, ot, s3, 0)
+ ot = dsymptr(lsym, ot, hasher, 0)
+ var flags uint32
+ // Note: flags must match maptype accessors in ../../../../runtime/type.go
+ // and maptype builder in ../../../../reflect/type.go:MapOf.
+ if t.Key().Width > MAXKEYSIZE {
+ ot = duint8(lsym, ot, uint8(Widthptr))
+ flags |= 1 // indirect key
+ } else {
+ ot = duint8(lsym, ot, uint8(t.Key().Width))
+ }
+
+ if t.Elem().Width > MAXELEMSIZE {
+ ot = duint8(lsym, ot, uint8(Widthptr))
+ flags |= 2 // indirect value
+ } else {
+ ot = duint8(lsym, ot, uint8(t.Elem().Width))
+ }
+ ot = duint16(lsym, ot, uint16(bmap(t).Width))
+ if isreflexive(t.Key()) {
+ flags |= 4 // reflexive key
+ }
+ if needkeyupdate(t.Key()) {
+ flags |= 8 // need key update
+ }
+ if hashMightPanic(t.Key()) {
+ flags |= 16 // hash might panic
+ }
+ ot = duint32(lsym, ot, flags)
+ ot = dextratype(lsym, ot, t, 0)
+
+ case TPTR:
+ if t.Elem().Etype == TANY {
+ // ../../../../runtime/type.go:/UnsafePointerType
+ ot = dcommontype(lsym, t)
+ ot = dextratype(lsym, ot, t, 0)
+
+ break
+ }
+
+ // ../../../../runtime/type.go:/ptrType
+ s1 := dtypesym(t.Elem())
+
+ ot = dcommontype(lsym, t)
+ ot = dsymptr(lsym, ot, s1, 0)
+ ot = dextratype(lsym, ot, t, 0)
+
+ // ../../../../runtime/type.go:/structType
+ // for security, only the exported fields.
+ case TSTRUCT:
+ fields := t.Fields().Slice()
+ for _, t1 := range fields {
+ dtypesym(t1.Type)
+ }
+
+ // All non-exported struct field names within a struct
+ // type must originate from a single package. By
+ // identifying and recording that package within the
+ // struct type descriptor, we can omit that
+ // information from the field descriptors.
+ var spkg *types.Pkg
+ for _, f := range fields {
+ if !types.IsExported(f.Sym.Name) {
+ spkg = f.Sym.Pkg
+ break
+ }
+ }
+
+ ot = dcommontype(lsym, t)
+ ot = dgopkgpath(lsym, ot, spkg)
+ ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
+ ot = duintptr(lsym, ot, uint64(len(fields)))
+ ot = duintptr(lsym, ot, uint64(len(fields)))
+
+ dataAdd := len(fields) * structfieldSize()
+ ot = dextratype(lsym, ot, t, dataAdd)
+
+ for _, f := range fields {
+ // ../../../../runtime/type.go:/structField
+ ot = dnameField(lsym, ot, spkg, f)
+ ot = dsymptr(lsym, ot, dtypesym(f.Type), 0)
+ offsetAnon := uint64(f.Offset) << 1
+ if offsetAnon>>1 != uint64(f.Offset) {
+ Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
+ }
+ if f.Embedded != 0 {
+ offsetAnon |= 1
+ }
+ ot = duintptr(lsym, ot, offsetAnon)
+ }
+ }
+
+ ot = dextratypeData(lsym, ot, t)
+ ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA))
+
+ // The linker will leave a table of all the typelinks for
+ // types in the binary, so the runtime can find them.
+ //
+ // When buildmode=shared, all types are in typelinks so the
+ // runtime can deduplicate type pointers.
+ keep := Ctxt.Flag_dynlink
+ if !keep && t.Sym == nil {
+ // For an unnamed type, we only need the link if the type can
+ // be created at run time by reflect.PtrTo and similar
+ // functions. If the type exists in the program, those
+ // functions must return the existing type structure rather
+ // than creating a new one.
+ switch t.Etype {
+ case TPTR, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT:
+ keep = true
+ }
+ }
+ // Do not put Noalg types in typelinks. See issue #22605.
+ if typeHasNoAlg(t) {
+ keep = false
+ }
+ lsym.Set(obj.AttrMakeTypelink, keep)
+
+ return lsym
+}
+
+// ifaceMethodOffset returns the offset of the i-th method in the interface
+// type descriptor, ityp.
+func ifaceMethodOffset(ityp *types.Type, i int64) int64 {
+ // interface type descriptor layout is struct {
+ // _type // commonSize
+ // pkgpath // 1 word
+ // []imethod // 3 words (pointing to [...]imethod below)
+ // uncommontype // uncommonSize
+ // [...]imethod
+ // }
+ // The size of imethod is 8.
+ return int64(commonSize()+4*Widthptr+uncommonSize(ityp)) + i*8
+}
+
+// for each itabEntry, gather the methods on
+// the concrete type that implement the interface
+func peekitabs() {
+ for i := range itabs {
+ tab := &itabs[i]
+ methods := genfun(tab.t, tab.itype)
+ if len(methods) == 0 {
+ continue
+ }
+ tab.entries = methods
+ }
+}
+
+// for the given concrete type and interface
+// type, return the (sorted) set of methods
+// on the concrete type that implement the interface
+func genfun(t, it *types.Type) []*obj.LSym {
+ if t == nil || it == nil {
+ return nil
+ }
+ sigs := imethods(it)
+ methods := methods(t)
+ out := make([]*obj.LSym, 0, len(sigs))
+ // TODO(mdempsky): Short circuit before calling methods(t)?
+ // See discussion on CL 105039.
+ if len(sigs) == 0 {
+ return nil
+ }
+
+ // both sigs and methods are sorted by name,
+ // so we can find the intersect in a single pass
+ for _, m := range methods {
+ if m.name == sigs[0].name {
+ out = append(out, m.isym.Linksym())
+ sigs = sigs[1:]
+ if len(sigs) == 0 {
+ break
+ }
+ }
+ }
+
+ if len(sigs) != 0 {
+ Fatalf("incomplete itab")
+ }
+
+ return out
+}
+
+// itabsym uses the information gathered in
+// peekitabs to de-virtualize interface methods.
+// Since this is called by the SSA backend, it shouldn't
+// generate additional Nodes, Syms, etc.
+func itabsym(it *obj.LSym, offset int64) *obj.LSym {
+ var syms []*obj.LSym
+ if it == nil {
+ return nil
+ }
+
+ for i := range itabs {
+ e := &itabs[i]
+ if e.lsym == it {
+ syms = e.entries
+ break
+ }
+ }
+ if syms == nil {
+ return nil
+ }
+
+ // keep this arithmetic in sync with *itab layout
+ methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr))
+ if methodnum >= len(syms) {
+ return nil
+ }
+ return syms[methodnum]
+}
+
+// addsignat ensures that a runtime type descriptor is emitted for t.
+func addsignat(t *types.Type) {
+ if _, ok := signatset[t]; !ok {
+ signatset[t] = struct{}{}
+ signatslice = append(signatslice, t)
+ }
+}
+
+func addsignats(dcls []*Node) {
+ // copy types from dcl list to signatset
+ for _, n := range dcls {
+ if n.Op == OTYPE {
+ addsignat(n.Type)
+ }
+ }
+}
+
+func dumpsignats() {
+ // Process signatset. Use a loop, as dtypesym adds
+ // entries to signatset while it is being processed.
+ signats := make([]typeAndStr, len(signatslice))
+ for len(signatslice) > 0 {
+ signats = signats[:0]
+ // Transfer entries to a slice and sort, for reproducible builds.
+ for _, t := range signatslice {
+ signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()})
+ delete(signatset, t)
+ }
+ signatslice = signatslice[:0]
+ sort.Sort(typesByString(signats))
+ for _, ts := range signats {
+ t := ts.t
+ dtypesym(t)
+ if t.Sym != nil {
+ dtypesym(types.NewPtr(t))
+ }
+ }
+ }
+}
+
+func dumptabs() {
+ // process itabs
+ for _, i := range itabs {
+ // dump empty itab symbol into i.sym
+ // type itab struct {
+ // inter *interfacetype
+ // _type *_type
+ // hash uint32
+ // _ [4]byte
+ // fun [1]uintptr // variable sized
+ // }
+ o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0)
+ o = dsymptr(i.lsym, o, dtypesym(i.t), 0)
+ o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash
+ o += 4 // skip unused field
+ for _, fn := range genfun(i.t, i.itype) {
+ o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method
+ }
+ // Nothing writes static itabs, so they are read only.
+ ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
+ i.lsym.Set(obj.AttrContentAddressable, true)
+ }
+
+ // process ptabs
+ if localpkg.Name == "main" && len(ptabs) > 0 {
+ ot := 0
+ s := Ctxt.Lookup("go.plugin.tabs")
+ for _, p := range ptabs {
+ // Dump ptab symbol into go.pluginsym package.
+ //
+ // type ptab struct {
+ // name nameOff
+ // typ typeOff // pointer to symbol
+ // }
+ nsym := dname(p.s.Name, "", nil, true)
+ tsym := dtypesym(p.t)
+ ot = dsymptrOff(s, ot, nsym)
+ ot = dsymptrOff(s, ot, tsym)
+ // Plugin exports symbols as interfaces. Mark their types
+ // as UsedInIface.
+ tsym.Set(obj.AttrUsedInIface, true)
+ }
+ ggloblsym(s, int32(ot), int16(obj.RODATA))
+
+ ot = 0
+ s = Ctxt.Lookup("go.plugin.exports")
+ for _, p := range ptabs {
+ ot = dsymptr(s, ot, p.s.Linksym(), 0)
+ }
+ ggloblsym(s, int32(ot), int16(obj.RODATA))
+ }
+}
+
+func dumpimportstrings() {
+ // generate import strings for imported packages
+ for _, p := range types.ImportedPkgList() {
+ dimportpath(p)
+ }
+}
+
+func dumpbasictypes() {
+ // do basic types if compiling package runtime.
+ // they have to be in at least one package,
+ // and runtime is always loaded implicitly,
+ // so this is as good as any.
+ // another possible choice would be package main,
+ // but using runtime means fewer copies in object files.
+ if myimportpath == "runtime" {
+ for i := types.EType(1); i <= TBOOL; i++ {
+ dtypesym(types.NewPtr(types.Types[i]))
+ }
+ dtypesym(types.NewPtr(types.Types[TSTRING]))
+ dtypesym(types.NewPtr(types.Types[TUNSAFEPTR]))
+
+ // emit type structs for error and func(error) string.
+ // The latter is the type of an auto-generated wrapper.
+ dtypesym(types.NewPtr(types.Errortype))
+
+ dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])}))
+
+ // add paths for runtime and main, which 6l imports implicitly.
+ dimportpath(Runtimepkg)
+
+ if flag_race {
+ dimportpath(racepkg)
+ }
+ if flag_msan {
+ dimportpath(msanpkg)
+ }
+ dimportpath(types.NewPkg("main", ""))
+ }
+}
+
+type typeAndStr struct {
+ t *types.Type
+ short string
+ regular string
+}
+
+type typesByString []typeAndStr
+
+func (a typesByString) Len() int { return len(a) }
+func (a typesByString) Less(i, j int) bool {
+ if a[i].short != a[j].short {
+ return a[i].short < a[j].short
+ }
+ // When the only difference between the types is whether
+ // they refer to byte or uint8, such as **byte vs **uint8,
+ // the types' ShortStrings can be identical.
+ // To preserve deterministic sort ordering, sort these by String().
+ if a[i].regular != a[j].regular {
+ return a[i].regular < a[j].regular
+ }
+ // Identical anonymous interfaces defined in different locations
+ // will be equal for the above checks, but different in DWARF output.
+ // Sort by source position to ensure deterministic order.
+ // See issues 27013 and 30202.
+ if a[i].t.Etype == types.TINTER && a[i].t.Methods().Len() > 0 {
+ return a[i].t.Methods().Index(0).Pos.Before(a[j].t.Methods().Index(0).Pos)
+ }
+ return false
+}
+func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap,
+// which holds 1-bit entries describing where pointers are in a given type.
+// Above this length, the GC information is recorded as a GC program,
+// which can express repetition compactly. In either form, the
+// information is used by the runtime to initialize the heap bitmap,
+// and for large types (like 128 or more words), they are roughly the
+// same speed. GC programs are never much larger and often more
+// compact. (If large arrays are involved, they can be arbitrarily
+// more compact.)
+//
+// The cutoff must be large enough that any allocation large enough to
+// use a GC program is large enough that it does not share heap bitmap
+// bytes with any other objects, allowing the GC program execution to
+// assume an aligned start and not use atomic operations. In the current
+// runtime, this means all malloc size classes larger than the cutoff must
+// be multiples of four words. On 32-bit systems that's 16 bytes, and
+// all size classes >= 16 bytes are 16-byte aligned, so no real constraint.
+// On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed
+// for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated
+// is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes
+// must be >= 4.
+//
+// We used to use 16 because the GC programs do have some constant overhead
+// to get started, and processing 128 pointers seems to be enough to
+// amortize that overhead well.
+//
+// To make sure that the runtime's chansend can call typeBitsBulkBarrier,
+// we raised the limit to 2048, so that even 32-bit systems are guaranteed to
+// use bitmaps for objects up to 64 kB in size.
+//
+// Also known to reflect/type.go.
+//
+const maxPtrmaskBytes = 2048
+
+// dgcsym emits and returns a data symbol containing GC information for type t,
+// along with a boolean reporting whether the UseGCProg bit should be set in
+// the type kind, and the ptrdata field to record in the reflect type information.
+func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) {
+ ptrdata = typeptrdata(t)
+ if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 {
+ lsym = dgcptrmask(t)
+ return
+ }
+
+ useGCProg = true
+ lsym, ptrdata = dgcprog(t)
+ return
+}
+
+// dgcptrmask emits and returns the symbol containing a pointer mask for type t.
+func dgcptrmask(t *types.Type) *obj.LSym {
+ ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8)
+ fillptrmask(t, ptrmask)
+ p := fmt.Sprintf("gcbits.%x", ptrmask)
+
+ sym := Runtimepkg.Lookup(p)
+ lsym := sym.Linksym()
+ if !sym.Uniq() {
+ sym.SetUniq(true)
+ for i, x := range ptrmask {
+ duint8(lsym, i, x)
+ }
+ ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ lsym.Set(obj.AttrContentAddressable, true)
+ }
+ return lsym
+}
+
+// fillptrmask fills in ptrmask with 1s corresponding to the
+// word offsets in t that hold pointers.
+// ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits.
+func fillptrmask(t *types.Type, ptrmask []byte) {
+ for i := range ptrmask {
+ ptrmask[i] = 0
+ }
+ if !t.HasPointers() {
+ return
+ }
+
+ vec := bvalloc(8 * int32(len(ptrmask)))
+ onebitwalktype1(t, 0, vec)
+
+ nptr := typeptrdata(t) / int64(Widthptr)
+ for i := int64(0); i < nptr; i++ {
+ if vec.Get(int32(i)) {
+ ptrmask[i/8] |= 1 << (uint(i) % 8)
+ }
+ }
+}
+
+// dgcprog emits and returns the symbol containing a GC program for type t
+// along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]).
+// In practice, the size is typeptrdata(t) except for non-trivial arrays.
+// For non-trivial arrays, the program describes the full t.Width size.
+func dgcprog(t *types.Type) (*obj.LSym, int64) {
+ dowidth(t)
+ if t.Width == BADWIDTH {
+ Fatalf("dgcprog: %v badwidth", t)
+ }
+ lsym := typesymprefix(".gcprog", t).Linksym()
+ var p GCProg
+ p.init(lsym)
+ p.emit(t, 0)
+ offset := p.w.BitIndex() * int64(Widthptr)
+ p.end()
+ if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width {
+ Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
+ }
+ return lsym, offset
+}
+
+type GCProg struct {
+ lsym *obj.LSym
+ symoff int
+ w gcprog.Writer
+}
+
+var Debug_gcprog int // set by -d gcprog
+
+func (p *GCProg) init(lsym *obj.LSym) {
+ p.lsym = lsym
+ p.symoff = 4 // first 4 bytes hold program length
+ p.w.Init(p.writeByte)
+ if Debug_gcprog > 0 {
+ fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym)
+ p.w.Debug(os.Stderr)
+ }
+}
+
+func (p *GCProg) writeByte(x byte) {
+ p.symoff = duint8(p.lsym, p.symoff, x)
+}
+
+func (p *GCProg) end() {
+ p.w.End()
+ duint32(p.lsym, 0, uint32(p.symoff-4))
+ ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ if Debug_gcprog > 0 {
+ fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym)
+ }
+}
+
+func (p *GCProg) emit(t *types.Type, offset int64) {
+ dowidth(t)
+ if !t.HasPointers() {
+ return
+ }
+ if t.Width == int64(Widthptr) {
+ p.w.Ptr(offset / int64(Widthptr))
+ return
+ }
+ switch t.Etype {
+ default:
+ Fatalf("GCProg.emit: unexpected type %v", t)
+
+ case TSTRING:
+ p.w.Ptr(offset / int64(Widthptr))
+
+ case TINTER:
+ // Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1.
+ p.w.Ptr(offset/int64(Widthptr) + 1)
+
+ case TSLICE:
+ p.w.Ptr(offset / int64(Widthptr))
+
+ case TARRAY:
+ if t.NumElem() == 0 {
+ // should have been handled by haspointers check above
+ Fatalf("GCProg.emit: empty array")
+ }
+
+ // Flatten array-of-array-of-array to just a big array by multiplying counts.
+ count := t.NumElem()
+ elem := t.Elem()
+ for elem.IsArray() {
+ count *= elem.NumElem()
+ elem = elem.Elem()
+ }
+
+ if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) {
+ // Cheaper to just emit the bits.
+ for i := int64(0); i < count; i++ {
+ p.emit(elem, offset+i*elem.Width)
+ }
+ return
+ }
+ p.emit(elem, offset)
+ p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr))
+ p.w.Repeat(elem.Width/int64(Widthptr), count-1)
+
+ case TSTRUCT:
+ for _, t1 := range t.Fields().Slice() {
+ p.emit(t1.Type, offset+t1.Offset)
+ }
+ }
+}
+
+// zeroaddr returns the address of a symbol with at least
+// size bytes of zeros.
+func zeroaddr(size int64) *Node {
+ if size >= 1<<31 {
+ Fatalf("map elem too big %d", size)
+ }
+ if zerosize < size {
+ zerosize = size
+ }
+ s := mappkg.Lookup("zero")
+ if s.Def == nil {
+ x := newname(s)
+ x.Type = types.Types[TUINT8]
+ x.SetClass(PEXTERN)
+ x.SetTypecheck(1)
+ s.Def = asTypesNode(x)
+ }
+ z := nod(OADDR, asNode(s.Def), nil)
+ z.Type = types.NewPtr(types.Types[TUINT8])
+ z.SetTypecheck(1)
+ return z
+}
diff --git a/src/cmd/compile/internal/gc/reproduciblebuilds_test.go b/src/cmd/compile/internal/gc/reproduciblebuilds_test.go
new file mode 100644
index 0000000..8101e44
--- /dev/null
+++ b/src/cmd/compile/internal/gc/reproduciblebuilds_test.go
@@ -0,0 +1,112 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc_test
+
+import (
+ "bytes"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+)
+
+func TestReproducibleBuilds(t *testing.T) {
+ tests := []string{
+ "issue20272.go",
+ "issue27013.go",
+ "issue30202.go",
+ }
+
+ testenv.MustHaveGoBuild(t)
+ iters := 10
+ if testing.Short() {
+ iters = 4
+ }
+ t.Parallel()
+ for _, test := range tests {
+ test := test
+ t.Run(test, func(t *testing.T) {
+ t.Parallel()
+ var want []byte
+ tmp, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("temp file creation failed: %v", err)
+ }
+ defer os.Remove(tmp.Name())
+ defer tmp.Close()
+ for i := 0; i < iters; i++ {
+ // Note: use -c 2 to expose any nondeterminism which is the result
+ // of the runtime scheduler.
+ out, err := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-c", "2", "-o", tmp.Name(), filepath.Join("testdata", "reproducible", test)).CombinedOutput()
+ if err != nil {
+ t.Fatalf("failed to compile: %v\n%s", err, out)
+ }
+ obj, err := ioutil.ReadFile(tmp.Name())
+ if err != nil {
+ t.Fatalf("failed to read object file: %v", err)
+ }
+ if i == 0 {
+ want = obj
+ } else {
+ if !bytes.Equal(want, obj) {
+ t.Fatalf("builds produced different output after %d iters (%d bytes vs %d bytes)", i, len(want), len(obj))
+ }
+ }
+ }
+ })
+ }
+}
+
+func TestIssue38068(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ // Compile a small package with and without the concurrent
+ // backend, then check to make sure that the resulting archives
+ // are identical. Note: this uses "go tool compile" instead of
+ // "go build" since the latter will generate differnent build IDs
+ // if it sees different command line flags.
+ scenarios := []struct {
+ tag string
+ args string
+ libpath string
+ }{
+ {tag: "serial", args: "-c=1"},
+ {tag: "concurrent", args: "-c=2"}}
+
+ tmpdir, err := ioutil.TempDir("", "TestIssue38068")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ src := filepath.Join("testdata", "reproducible", "issue38068.go")
+ for i := range scenarios {
+ s := &scenarios[i]
+ s.libpath = filepath.Join(tmpdir, s.tag+".a")
+ // Note: use of "-p" required in order for DWARF to be generated.
+ cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-trimpath", "-p=issue38068", "-buildid=", s.args, "-o", s.libpath, src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("%v: %v:\n%s", cmd.Args, err, out)
+ }
+ }
+
+ readBytes := func(fn string) []byte {
+ payload, err := ioutil.ReadFile(fn)
+ if err != nil {
+ t.Fatalf("failed to read executable '%s': %v", fn, err)
+ }
+ return payload
+ }
+
+ b1 := readBytes(scenarios[0].libpath)
+ b2 := readBytes(scenarios[1].libpath)
+ if !bytes.Equal(b1, b2) {
+ t.Fatalf("concurrent and serial builds produced different output")
+ }
+}
diff --git a/src/cmd/compile/internal/gc/scc.go b/src/cmd/compile/internal/gc/scc.go
new file mode 100644
index 0000000..5c7935a
--- /dev/null
+++ b/src/cmd/compile/internal/gc/scc.go
@@ -0,0 +1,140 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+// Strongly connected components.
+//
+// Run analysis on minimal sets of mutually recursive functions
+// or single non-recursive functions, bottom up.
+//
+// Finding these sets is finding strongly connected components
+// by reverse topological order in the static call graph.
+// The algorithm (known as Tarjan's algorithm) for doing that is taken from
+// Sedgewick, Algorithms, Second Edition, p. 482, with two adaptations.
+//
+// First, a hidden closure function (n.Func.IsHiddenClosure()) cannot be the
+// root of a connected component. Refusing to use it as a root
+// forces it into the component of the function in which it appears.
+// This is more convenient for escape analysis.
+//
+// Second, each function becomes two virtual nodes in the graph,
+// with numbers n and n+1. We record the function's node number as n
+// but search from node n+1. If the search tells us that the component
+// number (min) is n+1, we know that this is a trivial component: one function
+// plus its closures. If the search tells us that the component number is
+// n, then there was a path from node n+1 back to node n, meaning that
+// the function set is mutually recursive. The escape analysis can be
+// more precise when analyzing a single non-recursive function than
+// when analyzing a set of mutually recursive functions.
+
+type bottomUpVisitor struct {
+ analyze func([]*Node, bool)
+ visitgen uint32
+ nodeID map[*Node]uint32
+ stack []*Node
+}
+
+// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
+// It calls analyze with successive groups of functions, working from
+// the bottom of the call graph upward. Each time analyze is called with
+// a list of functions, every function on that list only calls other functions
+// on the list or functions that have been passed in previous invocations of
+// analyze. Closures appear in the same list as their outer functions.
+// The lists are as short as possible while preserving those requirements.
+// (In a typical program, many invocations of analyze will be passed just
+// a single function.) The boolean argument 'recursive' passed to analyze
+// specifies whether the functions on the list are mutually recursive.
+// If recursive is false, the list consists of only a single function and its closures.
+// If recursive is true, the list may still contain only a single function,
+// if that function is itself recursive.
+func visitBottomUp(list []*Node, analyze func(list []*Node, recursive bool)) {
+ var v bottomUpVisitor
+ v.analyze = analyze
+ v.nodeID = make(map[*Node]uint32)
+ for _, n := range list {
+ if n.Op == ODCLFUNC && !n.Func.IsHiddenClosure() {
+ v.visit(n)
+ }
+ }
+}
+
+func (v *bottomUpVisitor) visit(n *Node) uint32 {
+ if id := v.nodeID[n]; id > 0 {
+ // already visited
+ return id
+ }
+
+ v.visitgen++
+ id := v.visitgen
+ v.nodeID[n] = id
+ v.visitgen++
+ min := v.visitgen
+ v.stack = append(v.stack, n)
+
+ inspectList(n.Nbody, func(n *Node) bool {
+ switch n.Op {
+ case ONAME:
+ if n.Class() == PFUNC {
+ if n.isMethodExpression() {
+ n = asNode(n.Type.Nname())
+ }
+ if n != nil && n.Name.Defn != nil {
+ if m := v.visit(n.Name.Defn); m < min {
+ min = m
+ }
+ }
+ }
+ case ODOTMETH:
+ fn := asNode(n.Type.Nname())
+ if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
+ if m := v.visit(fn.Name.Defn); m < min {
+ min = m
+ }
+ }
+ case OCALLPART:
+ fn := asNode(callpartMethod(n).Type.Nname())
+ if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil {
+ if m := v.visit(fn.Name.Defn); m < min {
+ min = m
+ }
+ }
+ case OCLOSURE:
+ if m := v.visit(n.Func.Closure); m < min {
+ min = m
+ }
+ }
+ return true
+ })
+
+ if (min == id || min == id+1) && !n.Func.IsHiddenClosure() {
+ // This node is the root of a strongly connected component.
+
+ // The original min passed to visitcodelist was v.nodeID[n]+1.
+ // If visitcodelist found its way back to v.nodeID[n], then this
+ // block is a set of mutually recursive functions.
+ // Otherwise it's just a lone function that does not recurse.
+ recursive := min == id
+
+ // Remove connected component from stack.
+ // Mark walkgen so that future visits return a large number
+ // so as not to affect the caller's min.
+
+ var i int
+ for i = len(v.stack) - 1; i >= 0; i-- {
+ x := v.stack[i]
+ if x == n {
+ break
+ }
+ v.nodeID[x] = ^uint32(0)
+ }
+ v.nodeID[n] = ^uint32(0)
+ block := v.stack[i:]
+ // Run escape analysis on this set of functions.
+ v.stack = v.stack[:i]
+ v.analyze(block, recursive)
+ }
+
+ return min
+}
diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/gc/scope.go
new file mode 100644
index 0000000..e66b859
--- /dev/null
+++ b/src/cmd/compile/internal/gc/scope.go
@@ -0,0 +1,109 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "sort"
+)
+
+// See golang.org/issue/20390.
+func xposBefore(p, q src.XPos) bool {
+ return Ctxt.PosTable.Pos(p).Before(Ctxt.PosTable.Pos(q))
+}
+
+func findScope(marks []Mark, pos src.XPos) ScopeID {
+ i := sort.Search(len(marks), func(i int) bool {
+ return xposBefore(pos, marks[i].Pos)
+ })
+ if i == 0 {
+ return 0
+ }
+ return marks[i-1].Scope
+}
+
+func assembleScopes(fnsym *obj.LSym, fn *Node, dwarfVars []*dwarf.Var, varScopes []ScopeID) []dwarf.Scope {
+ // Initialize the DWARF scope tree based on lexical scopes.
+ dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func.Parents))
+ for i, parent := range fn.Func.Parents {
+ dwarfScopes[i+1].Parent = int32(parent)
+ }
+
+ scopeVariables(dwarfVars, varScopes, dwarfScopes)
+ scopePCs(fnsym, fn.Func.Marks, dwarfScopes)
+ return compactScopes(dwarfScopes)
+}
+
+// scopeVariables assigns DWARF variable records to their scopes.
+func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ScopeID, dwarfScopes []dwarf.Scope) {
+ sort.Stable(varsByScopeAndOffset{dwarfVars, varScopes})
+
+ i0 := 0
+ for i := range dwarfVars {
+ if varScopes[i] == varScopes[i0] {
+ continue
+ }
+ dwarfScopes[varScopes[i0]].Vars = dwarfVars[i0:i]
+ i0 = i
+ }
+ if i0 < len(dwarfVars) {
+ dwarfScopes[varScopes[i0]].Vars = dwarfVars[i0:]
+ }
+}
+
+// scopePCs assigns PC ranges to their scopes.
+func scopePCs(fnsym *obj.LSym, marks []Mark, dwarfScopes []dwarf.Scope) {
+ // If there aren't any child scopes (in particular, when scope
+ // tracking is disabled), we can skip a whole lot of work.
+ if len(marks) == 0 {
+ return
+ }
+ p0 := fnsym.Func().Text
+ scope := findScope(marks, p0.Pos)
+ for p := p0; p != nil; p = p.Link {
+ if p.Pos == p0.Pos {
+ continue
+ }
+ dwarfScopes[scope].AppendRange(dwarf.Range{Start: p0.Pc, End: p.Pc})
+ p0 = p
+ scope = findScope(marks, p0.Pos)
+ }
+ if p0.Pc < fnsym.Size {
+ dwarfScopes[scope].AppendRange(dwarf.Range{Start: p0.Pc, End: fnsym.Size})
+ }
+}
+
+func compactScopes(dwarfScopes []dwarf.Scope) []dwarf.Scope {
+ // Reverse pass to propagate PC ranges to parent scopes.
+ for i := len(dwarfScopes) - 1; i > 0; i-- {
+ s := &dwarfScopes[i]
+ dwarfScopes[s.Parent].UnifyRanges(s)
+ }
+
+ return dwarfScopes
+}
+
+type varsByScopeAndOffset struct {
+ vars []*dwarf.Var
+ scopes []ScopeID
+}
+
+func (v varsByScopeAndOffset) Len() int {
+ return len(v.vars)
+}
+
+func (v varsByScopeAndOffset) Less(i, j int) bool {
+ if v.scopes[i] != v.scopes[j] {
+ return v.scopes[i] < v.scopes[j]
+ }
+ return v.vars[i].StackOffset < v.vars[j].StackOffset
+}
+
+func (v varsByScopeAndOffset) Swap(i, j int) {
+ v.vars[i], v.vars[j] = v.vars[j], v.vars[i]
+ v.scopes[i], v.scopes[j] = v.scopes[j], v.scopes[i]
+}
diff --git a/src/cmd/compile/internal/gc/scope_test.go b/src/cmd/compile/internal/gc/scope_test.go
new file mode 100644
index 0000000..b0e038d
--- /dev/null
+++ b/src/cmd/compile/internal/gc/scope_test.go
@@ -0,0 +1,538 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc_test
+
+import (
+ "cmd/internal/objfile"
+ "debug/dwarf"
+ "fmt"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+type testline struct {
+ // line is one line of go source
+ line string
+
+ // scopes is a list of scope IDs of all the lexical scopes that this line
+ // of code belongs to.
+ // Scope IDs are assigned by traversing the tree of lexical blocks of a
+ // function in pre-order
+ // Scope IDs are function specific, i.e. scope 0 is always the root scope
+ // of the function that this line belongs to. Empty scopes are not assigned
+ // an ID (because they are not saved in debug_info).
+ // Scope 0 is always omitted from this list since all lines always belong
+ // to it.
+ scopes []int
+
+ // vars is the list of variables that belong in scopes[len(scopes)-1].
+ // Local variables are prefixed with "var ", formal parameters with "arg ".
+ // Must be ordered alphabetically.
+ // Set to nil to skip the check.
+ vars []string
+
+ // decl is the list of variables declared at this line.
+ decl []string
+
+ // declBefore is the list of variables declared at or before this line.
+ declBefore []string
+}
+
+var testfile = []testline{
+ {line: "package main"},
+ {line: "func f1(x int) { }"},
+ {line: "func f2(x int) { }"},
+ {line: "func f3(x int) { }"},
+ {line: "func f4(x int) { }"},
+ {line: "func f5(x int) { }"},
+ {line: "func f6(x int) { }"},
+ {line: "func fi(x interface{}) { if a, ok := x.(error); ok { a.Error() } }"},
+ {line: "func gret1() int { return 2 }"},
+ {line: "func gretbool() bool { return true }"},
+ {line: "func gret3() (int, int, int) { return 0, 1, 2 }"},
+ {line: "var v = []int{ 0, 1, 2 }"},
+ {line: "var ch = make(chan int)"},
+ {line: "var floatch = make(chan float64)"},
+ {line: "var iface interface{}"},
+ {line: "func TestNestedFor() {", vars: []string{"var a int"}},
+ {line: " a := 0", decl: []string{"a"}},
+ {line: " f1(a)"},
+ {line: " for i := 0; i < 5; i++ {", scopes: []int{1}, vars: []string{"var i int"}, decl: []string{"i"}},
+ {line: " f2(i)", scopes: []int{1}},
+ {line: " for i := 0; i < 5; i++ {", scopes: []int{1, 2}, vars: []string{"var i int"}, decl: []string{"i"}},
+ {line: " f3(i)", scopes: []int{1, 2}},
+ {line: " }"},
+ {line: " f4(i)", scopes: []int{1}},
+ {line: " }"},
+ {line: " f5(a)"},
+ {line: "}"},
+ {line: "func TestOas2() {", vars: []string{}},
+ {line: " if a, b, c := gret3(); a != 1 {", scopes: []int{1}, vars: []string{"var a int", "var b int", "var c int"}},
+ {line: " f1(a)", scopes: []int{1}},
+ {line: " f1(b)", scopes: []int{1}},
+ {line: " f1(c)", scopes: []int{1}},
+ {line: " }"},
+ {line: " for i, x := range v {", scopes: []int{2}, vars: []string{"var i int", "var x int"}},
+ {line: " f1(i)", scopes: []int{2}},
+ {line: " f1(x)", scopes: []int{2}},
+ {line: " }"},
+ {line: " if a, ok := <- ch; ok {", scopes: []int{3}, vars: []string{"var a int", "var ok bool"}},
+ {line: " f1(a)", scopes: []int{3}},
+ {line: " }"},
+ {line: " if a, ok := iface.(int); ok {", scopes: []int{4}, vars: []string{"var a int", "var ok bool"}},
+ {line: " f1(a)", scopes: []int{4}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestIfElse() {"},
+ {line: " if x := gret1(); x != 0 {", scopes: []int{1}, vars: []string{"var x int"}},
+ {line: " a := 0", scopes: []int{1, 2}, vars: []string{"var a int"}},
+ {line: " f1(a); f1(x)", scopes: []int{1, 2}},
+ {line: " } else {"},
+ {line: " b := 1", scopes: []int{1, 3}, vars: []string{"var b int"}},
+ {line: " f1(b); f1(x+1)", scopes: []int{1, 3}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestSwitch() {", vars: []string{}},
+ {line: " switch x := gret1(); x {", scopes: []int{1}, vars: []string{"var x int"}},
+ {line: " case 0:", scopes: []int{1, 2}},
+ {line: " i := x + 5", scopes: []int{1, 2}, vars: []string{"var i int"}},
+ {line: " f1(x); f1(i)", scopes: []int{1, 2}},
+ {line: " case 1:", scopes: []int{1, 3}},
+ {line: " j := x + 10", scopes: []int{1, 3}, vars: []string{"var j int"}},
+ {line: " f1(x); f1(j)", scopes: []int{1, 3}},
+ {line: " case 2:", scopes: []int{1, 4}},
+ {line: " k := x + 2", scopes: []int{1, 4}, vars: []string{"var k int"}},
+ {line: " f1(x); f1(k)", scopes: []int{1, 4}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestTypeSwitch() {", vars: []string{}},
+ {line: " switch x := iface.(type) {"},
+ {line: " case int:", scopes: []int{1}},
+ {line: " f1(x)", scopes: []int{1}, vars: []string{"var x int"}},
+ {line: " case uint8:", scopes: []int{2}},
+ {line: " f1(int(x))", scopes: []int{2}, vars: []string{"var x uint8"}},
+ {line: " case float64:", scopes: []int{3}},
+ {line: " f1(int(x)+1)", scopes: []int{3}, vars: []string{"var x float64"}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestSelectScope() {"},
+ {line: " select {"},
+ {line: " case i := <- ch:", scopes: []int{1}},
+ {line: " f1(i)", scopes: []int{1}, vars: []string{"var i int"}},
+ {line: " case f := <- floatch:", scopes: []int{2}},
+ {line: " f1(int(f))", scopes: []int{2}, vars: []string{"var f float64"}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestBlock() {", vars: []string{"var a int"}},
+ {line: " a := 1"},
+ {line: " {"},
+ {line: " b := 2", scopes: []int{1}, vars: []string{"var b int"}},
+ {line: " f1(b)", scopes: []int{1}},
+ {line: " f1(a)", scopes: []int{1}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestDiscontiguousRanges() {", vars: []string{"var a int"}},
+ {line: " a := 0"},
+ {line: " f1(a)"},
+ {line: " {"},
+ {line: " b := 0", scopes: []int{1}, vars: []string{"var b int"}},
+ {line: " f2(b)", scopes: []int{1}},
+ {line: " if gretbool() {", scopes: []int{1}},
+ {line: " c := 0", scopes: []int{1, 2}, vars: []string{"var c int"}},
+ {line: " f3(c)", scopes: []int{1, 2}},
+ {line: " } else {"},
+ {line: " c := 1.1", scopes: []int{1, 3}, vars: []string{"var c float64"}},
+ {line: " f4(int(c))", scopes: []int{1, 3}},
+ {line: " }"},
+ {line: " f5(b)", scopes: []int{1}},
+ {line: " }"},
+ {line: " f6(a)"},
+ {line: "}"},
+ {line: "func TestClosureScope() {", vars: []string{"var a int", "var b int", "var f func(int)"}},
+ {line: " a := 1; b := 1"},
+ {line: " f := func(c int) {", scopes: []int{0}, vars: []string{"arg c int", "var &b *int", "var a int", "var d int"}, declBefore: []string{"&b", "a"}},
+ {line: " d := 3"},
+ {line: " f1(c); f1(d)"},
+ {line: " if e := 3; e != 0 {", scopes: []int{1}, vars: []string{"var e int"}},
+ {line: " f1(e)", scopes: []int{1}},
+ {line: " f1(a)", scopes: []int{1}},
+ {line: " b = 2", scopes: []int{1}},
+ {line: " }"},
+ {line: " }"},
+ {line: " f(3); f1(b)"},
+ {line: "}"},
+ {line: "func TestEscape() {"},
+ {line: " a := 1", vars: []string{"var a int"}},
+ {line: " {"},
+ {line: " b := 2", scopes: []int{1}, vars: []string{"var &b *int", "var p *int"}},
+ {line: " p := &b", scopes: []int{1}},
+ {line: " f1(a)", scopes: []int{1}},
+ {line: " fi(p)", scopes: []int{1}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestCaptureVar(flag bool) func() int {"},
+ {line: " a := 1", vars: []string{"arg flag bool", "arg ~r1 func() int", "var a int"}},
+ {line: " if flag {"},
+ {line: " b := 2", scopes: []int{1}, vars: []string{"var b int", "var f func() int"}},
+ {line: " f := func() int {", scopes: []int{1, 0}},
+ {line: " return b + 1"},
+ {line: " }"},
+ {line: " return f", scopes: []int{1}},
+ {line: " }"},
+ {line: " f1(a)"},
+ {line: " return nil"},
+ {line: "}"},
+ {line: "func main() {"},
+ {line: " TestNestedFor()"},
+ {line: " TestOas2()"},
+ {line: " TestIfElse()"},
+ {line: " TestSwitch()"},
+ {line: " TestTypeSwitch()"},
+ {line: " TestSelectScope()"},
+ {line: " TestBlock()"},
+ {line: " TestDiscontiguousRanges()"},
+ {line: " TestClosureScope()"},
+ {line: " TestEscape()"},
+ {line: " TestCaptureVar(true)"},
+ {line: "}"},
+}
+
+const detailOutput = false
+
+// Compiles testfile checks that the description of lexical blocks emitted
+// by the linker in debug_info, for each function in the main package,
+// corresponds to what we expect it to be.
+func TestScopeRanges(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ if runtime.GOOS == "plan9" {
+ t.Skip("skipping on plan9; no DWARF symbol table in executables")
+ }
+
+ dir, err := ioutil.TempDir("", "TestScopeRanges")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ src, f := gobuild(t, dir, false, testfile)
+ defer f.Close()
+
+ // the compiler uses forward slashes for paths even on windows
+ src = strings.Replace(src, "\\", "/", -1)
+
+ pcln, err := f.PCLineTable()
+ if err != nil {
+ t.Fatal(err)
+ }
+ dwarfData, err := f.DWARF()
+ if err != nil {
+ t.Fatal(err)
+ }
+ dwarfReader := dwarfData.Reader()
+
+ lines := make(map[line][]*lexblock)
+
+ for {
+ entry, err := dwarfReader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ break
+ }
+
+ if entry.Tag != dwarf.TagSubprogram {
+ continue
+ }
+
+ name, ok := entry.Val(dwarf.AttrName).(string)
+ if !ok || !strings.HasPrefix(name, "main.Test") {
+ continue
+ }
+
+ var scope lexblock
+ ctxt := scopexplainContext{
+ dwarfData: dwarfData,
+ dwarfReader: dwarfReader,
+ scopegen: 1,
+ }
+
+ readScope(&ctxt, &scope, entry)
+
+ scope.markLines(pcln, lines)
+ }
+
+ anyerror := false
+ for i := range testfile {
+ tgt := testfile[i].scopes
+ out := lines[line{src, i + 1}]
+
+ if detailOutput {
+ t.Logf("%s // %v", testfile[i].line, out)
+ }
+
+ scopesok := checkScopes(tgt, out)
+ if !scopesok {
+ t.Logf("mismatch at line %d %q: expected: %v got: %v\n", i, testfile[i].line, tgt, scopesToString(out))
+ }
+
+ varsok := true
+ if testfile[i].vars != nil {
+ if len(out) > 0 {
+ varsok = checkVars(testfile[i].vars, out[len(out)-1].vars)
+ if !varsok {
+ t.Logf("variable mismatch at line %d %q for scope %d: expected: %v got: %v\n", i+1, testfile[i].line, out[len(out)-1].id, testfile[i].vars, out[len(out)-1].vars)
+ }
+ for j := range testfile[i].decl {
+ if line := declLineForVar(out[len(out)-1].vars, testfile[i].decl[j]); line != i+1 {
+ t.Errorf("wrong declaration line for variable %s, expected %d got: %d", testfile[i].decl[j], i+1, line)
+ }
+ }
+
+ for j := range testfile[i].declBefore {
+ if line := declLineForVar(out[len(out)-1].vars, testfile[i].declBefore[j]); line > i+1 {
+ t.Errorf("wrong declaration line for variable %s, expected %d (or less) got: %d", testfile[i].declBefore[j], i+1, line)
+ }
+ }
+ }
+ }
+
+ anyerror = anyerror || !scopesok || !varsok
+ }
+
+ if anyerror {
+ t.Fatalf("mismatched output")
+ }
+}
+
+func scopesToString(v []*lexblock) string {
+ r := make([]string, len(v))
+ for i, s := range v {
+ r[i] = strconv.Itoa(s.id)
+ }
+ return "[ " + strings.Join(r, ", ") + " ]"
+}
+
+func checkScopes(tgt []int, out []*lexblock) bool {
+ if len(out) > 0 {
+ // omit scope 0
+ out = out[1:]
+ }
+ if len(tgt) != len(out) {
+ return false
+ }
+ for i := range tgt {
+ if tgt[i] != out[i].id {
+ return false
+ }
+ }
+ return true
+}
+
+func checkVars(tgt []string, out []variable) bool {
+ if len(tgt) != len(out) {
+ return false
+ }
+ for i := range tgt {
+ if tgt[i] != out[i].expr {
+ return false
+ }
+ }
+ return true
+}
+
+func declLineForVar(scope []variable, name string) int {
+ for i := range scope {
+ if scope[i].name() == name {
+ return scope[i].declLine
+ }
+ }
+ return -1
+}
+
+type lexblock struct {
+ id int
+ ranges [][2]uint64
+ vars []variable
+ scopes []lexblock
+}
+
+type variable struct {
+ expr string
+ declLine int
+}
+
+func (v *variable) name() string {
+ return strings.Split(v.expr, " ")[1]
+}
+
+type line struct {
+ file string
+ lineno int
+}
+
+type scopexplainContext struct {
+ dwarfData *dwarf.Data
+ dwarfReader *dwarf.Reader
+ scopegen int
+}
+
+// readScope reads the DW_TAG_lexical_block or the DW_TAG_subprogram in
+// entry and writes a description in scope.
+// Nested DW_TAG_lexical_block entries are read recursively.
+func readScope(ctxt *scopexplainContext, scope *lexblock, entry *dwarf.Entry) {
+ var err error
+ scope.ranges, err = ctxt.dwarfData.Ranges(entry)
+ if err != nil {
+ panic(err)
+ }
+ for {
+ e, err := ctxt.dwarfReader.Next()
+ if err != nil {
+ panic(err)
+ }
+ switch e.Tag {
+ case 0:
+ sort.Slice(scope.vars, func(i, j int) bool {
+ return scope.vars[i].expr < scope.vars[j].expr
+ })
+ return
+ case dwarf.TagFormalParameter:
+ typ, err := ctxt.dwarfData.Type(e.Val(dwarf.AttrType).(dwarf.Offset))
+ if err != nil {
+ panic(err)
+ }
+ scope.vars = append(scope.vars, entryToVar(e, "arg", typ))
+ case dwarf.TagVariable:
+ typ, err := ctxt.dwarfData.Type(e.Val(dwarf.AttrType).(dwarf.Offset))
+ if err != nil {
+ panic(err)
+ }
+ scope.vars = append(scope.vars, entryToVar(e, "var", typ))
+ case dwarf.TagLexDwarfBlock:
+ scope.scopes = append(scope.scopes, lexblock{id: ctxt.scopegen})
+ ctxt.scopegen++
+ readScope(ctxt, &scope.scopes[len(scope.scopes)-1], e)
+ }
+ }
+}
+
+func entryToVar(e *dwarf.Entry, kind string, typ dwarf.Type) variable {
+ return variable{
+ fmt.Sprintf("%s %s %s", kind, e.Val(dwarf.AttrName).(string), typ.String()),
+ int(e.Val(dwarf.AttrDeclLine).(int64)),
+ }
+}
+
+// markLines marks all lines that belong to this scope with this scope
+// Recursively calls markLines for all children scopes.
+func (scope *lexblock) markLines(pcln objfile.Liner, lines map[line][]*lexblock) {
+ for _, r := range scope.ranges {
+ for pc := r[0]; pc < r[1]; pc++ {
+ file, lineno, _ := pcln.PCToLine(pc)
+ l := line{file, lineno}
+ if len(lines[l]) == 0 || lines[l][len(lines[l])-1] != scope {
+ lines[l] = append(lines[l], scope)
+ }
+ }
+ }
+
+ for i := range scope.scopes {
+ scope.scopes[i].markLines(pcln, lines)
+ }
+}
+
+func gobuild(t *testing.T, dir string, optimized bool, testfile []testline) (string, *objfile.File) {
+ src := filepath.Join(dir, "test.go")
+ dst := filepath.Join(dir, "out.o")
+
+ f, err := os.Create(src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := range testfile {
+ f.Write([]byte(testfile[i].line))
+ f.Write([]byte{'\n'})
+ }
+ f.Close()
+
+ args := []string{"build"}
+ if !optimized {
+ args = append(args, "-gcflags=-N -l")
+ }
+ args = append(args, "-o", dst, src)
+
+ cmd := exec.Command(testenv.GoToolPath(t), args...)
+ if b, err := cmd.CombinedOutput(); err != nil {
+ t.Logf("build: %s\n", string(b))
+ t.Fatal(err)
+ }
+
+ pkg, err := objfile.Open(dst)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return src, pkg
+}
+
+// TestEmptyDwarfRanges tests that no list entry in debug_ranges has start == end.
+// See issue #23928.
+func TestEmptyDwarfRanges(t *testing.T) {
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ if runtime.GOOS == "plan9" {
+ t.Skip("skipping on plan9; no DWARF symbol table in executables")
+ }
+
+ dir, err := ioutil.TempDir("", "TestEmptyDwarfRanges")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ _, f := gobuild(t, dir, true, []testline{{line: "package main"}, {line: "func main(){ println(\"hello\") }"}})
+ defer f.Close()
+
+ dwarfData, err := f.DWARF()
+ if err != nil {
+ t.Fatal(err)
+ }
+ dwarfReader := dwarfData.Reader()
+
+ for {
+ entry, err := dwarfReader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ break
+ }
+
+ ranges, err := dwarfData.Ranges(entry)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ranges == nil {
+ continue
+ }
+
+ for _, rng := range ranges {
+ if rng[0] == rng[1] {
+ t.Errorf("range entry with start == end: %v", rng)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go
new file mode 100644
index 0000000..97e0424
--- /dev/null
+++ b/src/cmd/compile/internal/gc/select.go
@@ -0,0 +1,387 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/compile/internal/types"
+
+// select
+func typecheckselect(sel *Node) {
+ var def *Node
+ lno := setlineno(sel)
+ typecheckslice(sel.Ninit.Slice(), ctxStmt)
+ for _, ncase := range sel.List.Slice() {
+ if ncase.Op != OCASE {
+ setlineno(ncase)
+ Fatalf("typecheckselect %v", ncase.Op)
+ }
+
+ if ncase.List.Len() == 0 {
+ // default
+ if def != nil {
+ yyerrorl(ncase.Pos, "multiple defaults in select (first at %v)", def.Line())
+ } else {
+ def = ncase
+ }
+ } else if ncase.List.Len() > 1 {
+ yyerrorl(ncase.Pos, "select cases cannot be lists")
+ } else {
+ ncase.List.SetFirst(typecheck(ncase.List.First(), ctxStmt))
+ n := ncase.List.First()
+ ncase.Left = n
+ ncase.List.Set(nil)
+ switch n.Op {
+ default:
+ pos := n.Pos
+ if n.Op == ONAME {
+ // We don't have the right position for ONAME nodes (see #15459 and
+ // others). Using ncase.Pos for now as it will provide the correct
+ // line number (assuming the expression follows the "case" keyword
+ // on the same line). This matches the approach before 1.10.
+ pos = ncase.Pos
+ }
+ yyerrorl(pos, "select case must be receive, send or assign recv")
+
+ // convert x = <-c into OSELRECV(x, <-c).
+ // remove implicit conversions; the eventual assignment
+ // will reintroduce them.
+ case OAS:
+ if (n.Right.Op == OCONVNOP || n.Right.Op == OCONVIFACE) && n.Right.Implicit() {
+ n.Right = n.Right.Left
+ }
+
+ if n.Right.Op != ORECV {
+ yyerrorl(n.Pos, "select assignment must have receive on right hand side")
+ break
+ }
+
+ n.Op = OSELRECV
+
+ // convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
+ case OAS2RECV:
+ if n.Right.Op != ORECV {
+ yyerrorl(n.Pos, "select assignment must have receive on right hand side")
+ break
+ }
+
+ n.Op = OSELRECV2
+ n.Left = n.List.First()
+ n.List.Set1(n.List.Second())
+
+ // convert <-c into OSELRECV(N, <-c)
+ case ORECV:
+ n = nodl(n.Pos, OSELRECV, nil, n)
+
+ n.SetTypecheck(1)
+ ncase.Left = n
+
+ case OSEND:
+ break
+ }
+ }
+
+ typecheckslice(ncase.Nbody.Slice(), ctxStmt)
+ }
+
+ lineno = lno
+}
+
+func walkselect(sel *Node) {
+ lno := setlineno(sel)
+ if sel.Nbody.Len() != 0 {
+ Fatalf("double walkselect")
+ }
+
+ init := sel.Ninit.Slice()
+ sel.Ninit.Set(nil)
+
+ init = append(init, walkselectcases(&sel.List)...)
+ sel.List.Set(nil)
+
+ sel.Nbody.Set(init)
+ walkstmtlist(sel.Nbody.Slice())
+
+ lineno = lno
+}
+
+func walkselectcases(cases *Nodes) []*Node {
+ ncas := cases.Len()
+ sellineno := lineno
+
+ // optimization: zero-case select
+ if ncas == 0 {
+ return []*Node{mkcall("block", nil, nil)}
+ }
+
+ // optimization: one-case select: single op.
+ if ncas == 1 {
+ cas := cases.First()
+ setlineno(cas)
+ l := cas.Ninit.Slice()
+ if cas.Left != nil { // not default:
+ n := cas.Left
+ l = append(l, n.Ninit.Slice()...)
+ n.Ninit.Set(nil)
+ switch n.Op {
+ default:
+ Fatalf("select %v", n.Op)
+
+ case OSEND:
+ // already ok
+
+ case OSELRECV, OSELRECV2:
+ if n.Op == OSELRECV || n.List.Len() == 0 {
+ if n.Left == nil {
+ n = n.Right
+ } else {
+ n.Op = OAS
+ }
+ break
+ }
+
+ if n.Left == nil {
+ nblank = typecheck(nblank, ctxExpr|ctxAssign)
+ n.Left = nblank
+ }
+
+ n.Op = OAS2
+ n.List.Prepend(n.Left)
+ n.Rlist.Set1(n.Right)
+ n.Right = nil
+ n.Left = nil
+ n.SetTypecheck(0)
+ n = typecheck(n, ctxStmt)
+ }
+
+ l = append(l, n)
+ }
+
+ l = append(l, cas.Nbody.Slice()...)
+ l = append(l, nod(OBREAK, nil, nil))
+ return l
+ }
+
+ // convert case value arguments to addresses.
+ // this rewrite is used by both the general code and the next optimization.
+ var dflt *Node
+ for _, cas := range cases.Slice() {
+ setlineno(cas)
+ n := cas.Left
+ if n == nil {
+ dflt = cas
+ continue
+ }
+ switch n.Op {
+ case OSEND:
+ n.Right = nod(OADDR, n.Right, nil)
+ n.Right = typecheck(n.Right, ctxExpr)
+
+ case OSELRECV, OSELRECV2:
+ if n.Op == OSELRECV2 && n.List.Len() == 0 {
+ n.Op = OSELRECV
+ }
+
+ if n.Left != nil {
+ n.Left = nod(OADDR, n.Left, nil)
+ n.Left = typecheck(n.Left, ctxExpr)
+ }
+ }
+ }
+
+ // optimization: two-case select but one is default: single non-blocking op.
+ if ncas == 2 && dflt != nil {
+ cas := cases.First()
+ if cas == dflt {
+ cas = cases.Second()
+ }
+
+ n := cas.Left
+ setlineno(n)
+ r := nod(OIF, nil, nil)
+ r.Ninit.Set(cas.Ninit.Slice())
+ switch n.Op {
+ default:
+ Fatalf("select %v", n.Op)
+
+ case OSEND:
+ // if selectnbsend(c, v) { body } else { default body }
+ ch := n.Left
+ r.Left = mkcall1(chanfn("selectnbsend", 2, ch.Type), types.Types[TBOOL], &r.Ninit, ch, n.Right)
+
+ case OSELRECV:
+ // if selectnbrecv(&v, c) { body } else { default body }
+ ch := n.Right.Left
+ elem := n.Left
+ if elem == nil {
+ elem = nodnil()
+ }
+ r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, ch)
+
+ case OSELRECV2:
+ // if selectnbrecv2(&v, &received, c) { body } else { default body }
+ ch := n.Right.Left
+ elem := n.Left
+ if elem == nil {
+ elem = nodnil()
+ }
+ receivedp := nod(OADDR, n.List.First(), nil)
+ receivedp = typecheck(receivedp, ctxExpr)
+ r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), types.Types[TBOOL], &r.Ninit, elem, receivedp, ch)
+ }
+
+ r.Left = typecheck(r.Left, ctxExpr)
+ r.Nbody.Set(cas.Nbody.Slice())
+ r.Rlist.Set(append(dflt.Ninit.Slice(), dflt.Nbody.Slice()...))
+ return []*Node{r, nod(OBREAK, nil, nil)}
+ }
+
+ if dflt != nil {
+ ncas--
+ }
+ casorder := make([]*Node, ncas)
+ nsends, nrecvs := 0, 0
+
+ var init []*Node
+
+ // generate sel-struct
+ lineno = sellineno
+ selv := temp(types.NewArray(scasetype(), int64(ncas)))
+ r := nod(OAS, selv, nil)
+ r = typecheck(r, ctxStmt)
+ init = append(init, r)
+
+ // No initialization for order; runtime.selectgo is responsible for that.
+ order := temp(types.NewArray(types.Types[TUINT16], 2*int64(ncas)))
+
+ var pc0, pcs *Node
+ if flag_race {
+ pcs = temp(types.NewArray(types.Types[TUINTPTR], int64(ncas)))
+ pc0 = typecheck(nod(OADDR, nod(OINDEX, pcs, nodintconst(0)), nil), ctxExpr)
+ } else {
+ pc0 = nodnil()
+ }
+
+ // register cases
+ for _, cas := range cases.Slice() {
+ setlineno(cas)
+
+ init = append(init, cas.Ninit.Slice()...)
+ cas.Ninit.Set(nil)
+
+ n := cas.Left
+ if n == nil { // default:
+ continue
+ }
+
+ var i int
+ var c, elem *Node
+ switch n.Op {
+ default:
+ Fatalf("select %v", n.Op)
+ case OSEND:
+ i = nsends
+ nsends++
+ c = n.Left
+ elem = n.Right
+ case OSELRECV, OSELRECV2:
+ nrecvs++
+ i = ncas - nrecvs
+ c = n.Right.Left
+ elem = n.Left
+ }
+
+ casorder[i] = cas
+
+ setField := func(f string, val *Node) {
+ r := nod(OAS, nodSym(ODOT, nod(OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
+ r = typecheck(r, ctxStmt)
+ init = append(init, r)
+ }
+
+ c = convnop(c, types.Types[TUNSAFEPTR])
+ setField("c", c)
+ if elem != nil {
+ elem = convnop(elem, types.Types[TUNSAFEPTR])
+ setField("elem", elem)
+ }
+
+ // TODO(mdempsky): There should be a cleaner way to
+ // handle this.
+ if flag_race {
+ r = mkcall("selectsetpc", nil, nil, nod(OADDR, nod(OINDEX, pcs, nodintconst(int64(i))), nil))
+ init = append(init, r)
+ }
+ }
+ if nsends+nrecvs != ncas {
+ Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
+ }
+
+ // run the select
+ lineno = sellineno
+ chosen := temp(types.Types[TINT])
+ recvOK := temp(types.Types[TBOOL])
+ r = nod(OAS2, nil, nil)
+ r.List.Set2(chosen, recvOK)
+ fn := syslook("selectgo")
+ r.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
+ r = typecheck(r, ctxStmt)
+ init = append(init, r)
+
+ // selv and order are no longer alive after selectgo.
+ init = append(init, nod(OVARKILL, selv, nil))
+ init = append(init, nod(OVARKILL, order, nil))
+ if flag_race {
+ init = append(init, nod(OVARKILL, pcs, nil))
+ }
+
+ // dispatch cases
+ dispatch := func(cond, cas *Node) {
+ cond = typecheck(cond, ctxExpr)
+ cond = defaultlit(cond, nil)
+
+ r := nod(OIF, cond, nil)
+
+ if n := cas.Left; n != nil && n.Op == OSELRECV2 {
+ x := nod(OAS, n.List.First(), recvOK)
+ x = typecheck(x, ctxStmt)
+ r.Nbody.Append(x)
+ }
+
+ r.Nbody.AppendNodes(&cas.Nbody)
+ r.Nbody.Append(nod(OBREAK, nil, nil))
+ init = append(init, r)
+ }
+
+ if dflt != nil {
+ setlineno(dflt)
+ dispatch(nod(OLT, chosen, nodintconst(0)), dflt)
+ }
+ for i, cas := range casorder {
+ setlineno(cas)
+ dispatch(nod(OEQ, chosen, nodintconst(int64(i))), cas)
+ }
+
+ return init
+}
+
+// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
+func bytePtrToIndex(n *Node, i int64) *Node {
+ s := nod(OADDR, nod(OINDEX, n, nodintconst(i)), nil)
+ t := types.NewPtr(types.Types[TUINT8])
+ return convnop(s, t)
+}
+
+var scase *types.Type
+
+// Keep in sync with src/runtime/select.go.
+func scasetype() *types.Type {
+ if scase == nil {
+ scase = tostruct([]*Node{
+ namedfield("c", types.Types[TUNSAFEPTR]),
+ namedfield("elem", types.Types[TUNSAFEPTR]),
+ })
+ scase.SetNoalg(true)
+ }
+ return scase
+}
diff --git a/src/cmd/compile/internal/gc/shift_test.go b/src/cmd/compile/internal/gc/shift_test.go
new file mode 100644
index 0000000..ce2eedf
--- /dev/null
+++ b/src/cmd/compile/internal/gc/shift_test.go
@@ -0,0 +1,1031 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "reflect"
+ "testing"
+)
+
+// Tests shifts of zero.
+
+//go:noinline
+func ofz64l64(n uint64) int64 {
+ var x int64
+ return x << n
+}
+
+//go:noinline
+func ofz64l32(n uint32) int64 {
+ var x int64
+ return x << n
+}
+
+//go:noinline
+func ofz64l16(n uint16) int64 {
+ var x int64
+ return x << n
+}
+
+//go:noinline
+func ofz64l8(n uint8) int64 {
+ var x int64
+ return x << n
+}
+
+//go:noinline
+func ofz64r64(n uint64) int64 {
+ var x int64
+ return x >> n
+}
+
+//go:noinline
+func ofz64r32(n uint32) int64 {
+ var x int64
+ return x >> n
+}
+
+//go:noinline
+func ofz64r16(n uint16) int64 {
+ var x int64
+ return x >> n
+}
+
+//go:noinline
+func ofz64r8(n uint8) int64 {
+ var x int64
+ return x >> n
+}
+
+//go:noinline
+func ofz64ur64(n uint64) uint64 {
+ var x uint64
+ return x >> n
+}
+
+//go:noinline
+func ofz64ur32(n uint32) uint64 {
+ var x uint64
+ return x >> n
+}
+
+//go:noinline
+func ofz64ur16(n uint16) uint64 {
+ var x uint64
+ return x >> n
+}
+
+//go:noinline
+func ofz64ur8(n uint8) uint64 {
+ var x uint64
+ return x >> n
+}
+
+//go:noinline
+func ofz32l64(n uint64) int32 {
+ var x int32
+ return x << n
+}
+
+//go:noinline
+func ofz32l32(n uint32) int32 {
+ var x int32
+ return x << n
+}
+
+//go:noinline
+func ofz32l16(n uint16) int32 {
+ var x int32
+ return x << n
+}
+
+//go:noinline
+func ofz32l8(n uint8) int32 {
+ var x int32
+ return x << n
+}
+
+//go:noinline
+func ofz32r64(n uint64) int32 {
+ var x int32
+ return x >> n
+}
+
+//go:noinline
+func ofz32r32(n uint32) int32 {
+ var x int32
+ return x >> n
+}
+
+//go:noinline
+func ofz32r16(n uint16) int32 {
+ var x int32
+ return x >> n
+}
+
+//go:noinline
+func ofz32r8(n uint8) int32 {
+ var x int32
+ return x >> n
+}
+
+//go:noinline
+func ofz32ur64(n uint64) uint32 {
+ var x uint32
+ return x >> n
+}
+
+//go:noinline
+func ofz32ur32(n uint32) uint32 {
+ var x uint32
+ return x >> n
+}
+
+//go:noinline
+func ofz32ur16(n uint16) uint32 {
+ var x uint32
+ return x >> n
+}
+
+//go:noinline
+func ofz32ur8(n uint8) uint32 {
+ var x uint32
+ return x >> n
+}
+
+//go:noinline
+func ofz16l64(n uint64) int16 {
+ var x int16
+ return x << n
+}
+
+//go:noinline
+func ofz16l32(n uint32) int16 {
+ var x int16
+ return x << n
+}
+
+//go:noinline
+func ofz16l16(n uint16) int16 {
+ var x int16
+ return x << n
+}
+
+//go:noinline
+func ofz16l8(n uint8) int16 {
+ var x int16
+ return x << n
+}
+
+//go:noinline
+func ofz16r64(n uint64) int16 {
+ var x int16
+ return x >> n
+}
+
+//go:noinline
+func ofz16r32(n uint32) int16 {
+ var x int16
+ return x >> n
+}
+
+//go:noinline
+func ofz16r16(n uint16) int16 {
+ var x int16
+ return x >> n
+}
+
+//go:noinline
+func ofz16r8(n uint8) int16 {
+ var x int16
+ return x >> n
+}
+
+//go:noinline
+func ofz16ur64(n uint64) uint16 {
+ var x uint16
+ return x >> n
+}
+
+//go:noinline
+func ofz16ur32(n uint32) uint16 {
+ var x uint16
+ return x >> n
+}
+
+//go:noinline
+func ofz16ur16(n uint16) uint16 {
+ var x uint16
+ return x >> n
+}
+
+//go:noinline
+func ofz16ur8(n uint8) uint16 {
+ var x uint16
+ return x >> n
+}
+
+//go:noinline
+func ofz8l64(n uint64) int8 {
+ var x int8
+ return x << n
+}
+
+//go:noinline
+func ofz8l32(n uint32) int8 {
+ var x int8
+ return x << n
+}
+
+//go:noinline
+func ofz8l16(n uint16) int8 {
+ var x int8
+ return x << n
+}
+
+//go:noinline
+func ofz8l8(n uint8) int8 {
+ var x int8
+ return x << n
+}
+
+//go:noinline
+func ofz8r64(n uint64) int8 {
+ var x int8
+ return x >> n
+}
+
+//go:noinline
+func ofz8r32(n uint32) int8 {
+ var x int8
+ return x >> n
+}
+
+//go:noinline
+func ofz8r16(n uint16) int8 {
+ var x int8
+ return x >> n
+}
+
+//go:noinline
+func ofz8r8(n uint8) int8 {
+ var x int8
+ return x >> n
+}
+
+//go:noinline
+func ofz8ur64(n uint64) uint8 {
+ var x uint8
+ return x >> n
+}
+
+//go:noinline
+func ofz8ur32(n uint32) uint8 {
+ var x uint8
+ return x >> n
+}
+
+//go:noinline
+func ofz8ur16(n uint16) uint8 {
+ var x uint8
+ return x >> n
+}
+
+//go:noinline
+func ofz8ur8(n uint8) uint8 {
+ var x uint8
+ return x >> n
+}
+
+func TestShiftOfZero(t *testing.T) {
+ if got := ofz64l64(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz64l32(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz64l16(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz64l8(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz64r64(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz64r32(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz64r16(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz64r8(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz64ur64(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz64ur32(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz64ur16(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz64ur8(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+
+ if got := ofz32l64(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz32l32(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz32l16(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz32l8(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz32r64(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz32r32(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz32r16(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz32r8(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz32ur64(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz32ur32(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz32ur16(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz32ur8(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+
+ if got := ofz16l64(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz16l32(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz16l16(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz16l8(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz16r64(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz16r32(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz16r16(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz16r8(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz16ur64(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz16ur32(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz16ur16(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz16ur8(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+
+ if got := ofz8l64(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz8l32(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz8l16(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz8l8(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz8r64(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz8r32(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz8r16(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz8r8(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz8ur64(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz8ur32(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz8ur16(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz8ur8(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+}
+
+//go:noinline
+func byz64l(n int64) int64 {
+ return n << 0
+}
+
+//go:noinline
+func byz64r(n int64) int64 {
+ return n >> 0
+}
+
+//go:noinline
+func byz64ur(n uint64) uint64 {
+ return n >> 0
+}
+
+//go:noinline
+func byz32l(n int32) int32 {
+ return n << 0
+}
+
+//go:noinline
+func byz32r(n int32) int32 {
+ return n >> 0
+}
+
+//go:noinline
+func byz32ur(n uint32) uint32 {
+ return n >> 0
+}
+
+//go:noinline
+func byz16l(n int16) int16 {
+ return n << 0
+}
+
+//go:noinline
+func byz16r(n int16) int16 {
+ return n >> 0
+}
+
+//go:noinline
+func byz16ur(n uint16) uint16 {
+ return n >> 0
+}
+
+//go:noinline
+func byz8l(n int8) int8 {
+ return n << 0
+}
+
+//go:noinline
+func byz8r(n int8) int8 {
+ return n >> 0
+}
+
+//go:noinline
+func byz8ur(n uint8) uint8 {
+ return n >> 0
+}
+
+func TestShiftByZero(t *testing.T) {
+ {
+ var n int64 = 0x5555555555555555
+ if got := byz64l(n); got != n {
+ t.Errorf("%x<<0 == %x, want %x", n, got, n)
+ }
+ if got := byz64r(n); got != n {
+ t.Errorf("%x>>0 == %x, want %x", n, got, n)
+ }
+ }
+ {
+ var n uint64 = 0xaaaaaaaaaaaaaaaa
+ if got := byz64ur(n); got != n {
+ t.Errorf("%x>>>0 == %x, want %x", n, got, n)
+ }
+ }
+
+ {
+ var n int32 = 0x55555555
+ if got := byz32l(n); got != n {
+ t.Errorf("%x<<0 == %x, want %x", n, got, n)
+ }
+ if got := byz32r(n); got != n {
+ t.Errorf("%x>>0 == %x, want %x", n, got, n)
+ }
+ }
+ {
+ var n uint32 = 0xaaaaaaaa
+ if got := byz32ur(n); got != n {
+ t.Errorf("%x>>>0 == %x, want %x", n, got, n)
+ }
+ }
+
+ {
+ var n int16 = 0x5555
+ if got := byz16l(n); got != n {
+ t.Errorf("%x<<0 == %x, want %x", n, got, n)
+ }
+ if got := byz16r(n); got != n {
+ t.Errorf("%x>>0 == %x, want %x", n, got, n)
+ }
+ }
+ {
+ var n uint16 = 0xaaaa
+ if got := byz16ur(n); got != n {
+ t.Errorf("%x>>>0 == %x, want %x", n, got, n)
+ }
+ }
+
+ {
+ var n int8 = 0x55
+ if got := byz8l(n); got != n {
+ t.Errorf("%x<<0 == %x, want %x", n, got, n)
+ }
+ if got := byz8r(n); got != n {
+ t.Errorf("%x>>0 == %x, want %x", n, got, n)
+ }
+ }
+ {
+ var n uint8 = 0x55
+ if got := byz8ur(n); got != n {
+ t.Errorf("%x>>>0 == %x, want %x", n, got, n)
+ }
+ }
+}
+
+//go:noinline
+func two64l(x int64) int64 {
+ return x << 1 << 1
+}
+
+//go:noinline
+func two64r(x int64) int64 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two64ur(x uint64) uint64 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two32l(x int32) int32 {
+ return x << 1 << 1
+}
+
+//go:noinline
+func two32r(x int32) int32 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two32ur(x uint32) uint32 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two16l(x int16) int16 {
+ return x << 1 << 1
+}
+
+//go:noinline
+func two16r(x int16) int16 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two16ur(x uint16) uint16 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two8l(x int8) int8 {
+ return x << 1 << 1
+}
+
+//go:noinline
+func two8r(x int8) int8 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two8ur(x uint8) uint8 {
+ return x >> 1 >> 1
+}
+
+func TestShiftCombine(t *testing.T) {
+ if got, want := two64l(4), int64(16); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := two64r(64), int64(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two64ur(64), uint64(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two32l(4), int32(16); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := two32r(64), int32(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two32ur(64), uint32(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two16l(4), int16(16); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := two16r(64), int16(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two16ur(64), uint16(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two8l(4), int8(16); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := two8r(64), int8(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two8ur(64), uint8(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+
+}
+
+//go:noinline
+func three64l(x int64) int64 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three64ul(x uint64) uint64 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three64r(x int64) int64 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three64ur(x uint64) uint64 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three32l(x int32) int32 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three32ul(x uint32) uint32 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three32r(x int32) int32 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three32ur(x uint32) uint32 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three16l(x int16) int16 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three16ul(x uint16) uint16 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three16r(x int16) int16 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three16ur(x uint16) uint16 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three8l(x int8) int8 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three8ul(x uint8) uint8 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three8r(x int8) int8 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three8ur(x uint8) uint8 {
+ return x >> 3 << 1 >> 2
+}
+
+func TestShiftCombine3(t *testing.T) {
+ if got, want := three64l(4), int64(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three64ul(4), uint64(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three64r(64), int64(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three64ur(64), uint64(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three32l(4), int32(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three32ul(4), uint32(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three32r(64), int32(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three32ur(64), uint32(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three16l(4), int16(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three16ul(4), uint16(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three16r(64), int16(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three16ur(64), uint16(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three8l(4), int8(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three8ul(4), uint8(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three8r(64), int8(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three8ur(64), uint8(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+}
+
+var (
+ one64 int64 = 1
+ one64u uint64 = 1
+ one32 int32 = 1
+ one32u uint32 = 1
+ one16 int16 = 1
+ one16u uint16 = 1
+ one8 int8 = 1
+ one8u uint8 = 1
+)
+
+func TestShiftLargeCombine(t *testing.T) {
+ var N uint64 = 0x8000000000000000
+ if one64<<N<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64u>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32<<N<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32u>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16<<N<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16u>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8<<N<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8u>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+}
+
+func TestShiftLargeCombine3(t *testing.T) {
+ var N uint64 = 0x8000000000000001
+ if one64<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64u<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64u>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32u<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32u>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16u<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16u>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8u<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8u>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+}
+
+func TestShiftGeneric(t *testing.T) {
+ for _, test := range [...]struct {
+ valueWidth int
+ signed bool
+ shiftWidth int
+ left bool
+ f interface{}
+ }{
+ {64, true, 64, true, func(n int64, s uint64) int64 { return n << s }},
+ {64, true, 64, false, func(n int64, s uint64) int64 { return n >> s }},
+ {64, false, 64, false, func(n uint64, s uint64) uint64 { return n >> s }},
+ {64, true, 32, true, func(n int64, s uint32) int64 { return n << s }},
+ {64, true, 32, false, func(n int64, s uint32) int64 { return n >> s }},
+ {64, false, 32, false, func(n uint64, s uint32) uint64 { return n >> s }},
+ {64, true, 16, true, func(n int64, s uint16) int64 { return n << s }},
+ {64, true, 16, false, func(n int64, s uint16) int64 { return n >> s }},
+ {64, false, 16, false, func(n uint64, s uint16) uint64 { return n >> s }},
+ {64, true, 8, true, func(n int64, s uint8) int64 { return n << s }},
+ {64, true, 8, false, func(n int64, s uint8) int64 { return n >> s }},
+ {64, false, 8, false, func(n uint64, s uint8) uint64 { return n >> s }},
+
+ {32, true, 64, true, func(n int32, s uint64) int32 { return n << s }},
+ {32, true, 64, false, func(n int32, s uint64) int32 { return n >> s }},
+ {32, false, 64, false, func(n uint32, s uint64) uint32 { return n >> s }},
+ {32, true, 32, true, func(n int32, s uint32) int32 { return n << s }},
+ {32, true, 32, false, func(n int32, s uint32) int32 { return n >> s }},
+ {32, false, 32, false, func(n uint32, s uint32) uint32 { return n >> s }},
+ {32, true, 16, true, func(n int32, s uint16) int32 { return n << s }},
+ {32, true, 16, false, func(n int32, s uint16) int32 { return n >> s }},
+ {32, false, 16, false, func(n uint32, s uint16) uint32 { return n >> s }},
+ {32, true, 8, true, func(n int32, s uint8) int32 { return n << s }},
+ {32, true, 8, false, func(n int32, s uint8) int32 { return n >> s }},
+ {32, false, 8, false, func(n uint32, s uint8) uint32 { return n >> s }},
+
+ {16, true, 64, true, func(n int16, s uint64) int16 { return n << s }},
+ {16, true, 64, false, func(n int16, s uint64) int16 { return n >> s }},
+ {16, false, 64, false, func(n uint16, s uint64) uint16 { return n >> s }},
+ {16, true, 32, true, func(n int16, s uint32) int16 { return n << s }},
+ {16, true, 32, false, func(n int16, s uint32) int16 { return n >> s }},
+ {16, false, 32, false, func(n uint16, s uint32) uint16 { return n >> s }},
+ {16, true, 16, true, func(n int16, s uint16) int16 { return n << s }},
+ {16, true, 16, false, func(n int16, s uint16) int16 { return n >> s }},
+ {16, false, 16, false, func(n uint16, s uint16) uint16 { return n >> s }},
+ {16, true, 8, true, func(n int16, s uint8) int16 { return n << s }},
+ {16, true, 8, false, func(n int16, s uint8) int16 { return n >> s }},
+ {16, false, 8, false, func(n uint16, s uint8) uint16 { return n >> s }},
+
+ {8, true, 64, true, func(n int8, s uint64) int8 { return n << s }},
+ {8, true, 64, false, func(n int8, s uint64) int8 { return n >> s }},
+ {8, false, 64, false, func(n uint8, s uint64) uint8 { return n >> s }},
+ {8, true, 32, true, func(n int8, s uint32) int8 { return n << s }},
+ {8, true, 32, false, func(n int8, s uint32) int8 { return n >> s }},
+ {8, false, 32, false, func(n uint8, s uint32) uint8 { return n >> s }},
+ {8, true, 16, true, func(n int8, s uint16) int8 { return n << s }},
+ {8, true, 16, false, func(n int8, s uint16) int8 { return n >> s }},
+ {8, false, 16, false, func(n uint8, s uint16) uint8 { return n >> s }},
+ {8, true, 8, true, func(n int8, s uint8) int8 { return n << s }},
+ {8, true, 8, false, func(n int8, s uint8) int8 { return n >> s }},
+ {8, false, 8, false, func(n uint8, s uint8) uint8 { return n >> s }},
+ } {
+ fv := reflect.ValueOf(test.f)
+ var args [2]reflect.Value
+ for i := 0; i < test.valueWidth; i++ {
+ // Build value to be shifted.
+ var n int64 = 1
+ for j := 0; j < i; j++ {
+ n <<= 1
+ }
+ args[0] = reflect.ValueOf(n).Convert(fv.Type().In(0))
+ for s := 0; s <= test.shiftWidth; s++ {
+ args[1] = reflect.ValueOf(s).Convert(fv.Type().In(1))
+
+ // Compute desired result. We're testing variable shifts
+ // assuming constant shifts are correct.
+ r := n
+ var op string
+ switch {
+ case test.left:
+ op = "<<"
+ for j := 0; j < s; j++ {
+ r <<= 1
+ }
+ switch test.valueWidth {
+ case 32:
+ r = int64(int32(r))
+ case 16:
+ r = int64(int16(r))
+ case 8:
+ r = int64(int8(r))
+ }
+ case test.signed:
+ op = ">>"
+ switch test.valueWidth {
+ case 32:
+ r = int64(int32(r))
+ case 16:
+ r = int64(int16(r))
+ case 8:
+ r = int64(int8(r))
+ }
+ for j := 0; j < s; j++ {
+ r >>= 1
+ }
+ default:
+ op = ">>>"
+ for j := 0; j < s; j++ {
+ r = int64(uint64(r) >> 1)
+ }
+ }
+
+ // Call function.
+ res := fv.Call(args[:])[0].Convert(reflect.ValueOf(r).Type())
+
+ if res.Int() != r {
+ t.Errorf("%s%dx%d(%x,%x)=%x, want %x", op, test.valueWidth, test.shiftWidth, n, s, res.Int(), r)
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go
new file mode 100644
index 0000000..4d0837b
--- /dev/null
+++ b/src/cmd/compile/internal/gc/sinit.go
@@ -0,0 +1,1172 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "fmt"
+)
+
+type InitEntry struct {
+ Xoffset int64 // struct, array only
+ Expr *Node // bytes of run-time computed expressions
+}
+
+type InitPlan struct {
+ E []InitEntry
+}
+
+// An InitSchedule is used to decompose assignment statements into
+// static and dynamic initialization parts. Static initializations are
+// handled by populating variables' linker symbol data, while dynamic
+// initializations are accumulated to be executed in order.
+type InitSchedule struct {
+ // out is the ordered list of dynamic initialization
+ // statements.
+ out []*Node
+
+ initplans map[*Node]*InitPlan
+ inittemps map[*Node]*Node
+}
+
+func (s *InitSchedule) append(n *Node) {
+ s.out = append(s.out, n)
+}
+
+// staticInit adds an initialization statement n to the schedule.
+func (s *InitSchedule) staticInit(n *Node) {
+ if !s.tryStaticInit(n) {
+ if Debug.P != 0 {
+ Dump("nonstatic", n)
+ }
+ s.append(n)
+ }
+}
+
+// tryStaticInit attempts to statically execute an initialization
+// statement and reports whether it succeeded.
+func (s *InitSchedule) tryStaticInit(n *Node) bool {
+ // Only worry about simple "l = r" assignments. Multiple
+ // variable/expression OAS2 assignments have already been
+ // replaced by multiple simple OAS assignments, and the other
+ // OAS2* assignments mostly necessitate dynamic execution
+ // anyway.
+ if n.Op != OAS {
+ return false
+ }
+ if n.Left.isBlank() && candiscard(n.Right) {
+ return true
+ }
+ lno := setlineno(n)
+ defer func() { lineno = lno }()
+ return s.staticassign(n.Left, n.Right)
+}
+
+// like staticassign but we are copying an already
+// initialized value r.
+func (s *InitSchedule) staticcopy(l *Node, r *Node) bool {
+ if r.Op != ONAME {
+ return false
+ }
+ if r.Class() == PFUNC {
+ pfuncsym(l, r)
+ return true
+ }
+ if r.Class() != PEXTERN || r.Sym.Pkg != localpkg {
+ return false
+ }
+ if r.Name.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
+ return false
+ }
+ if r.Name.Defn.Op != OAS {
+ return false
+ }
+ if r.Type.IsString() { // perhaps overwritten by cmd/link -X (#34675)
+ return false
+ }
+ orig := r
+ r = r.Name.Defn.Right
+
+ for r.Op == OCONVNOP && !types.Identical(r.Type, l.Type) {
+ r = r.Left
+ }
+
+ switch r.Op {
+ case ONAME:
+ if s.staticcopy(l, r) {
+ return true
+ }
+ // We may have skipped past one or more OCONVNOPs, so
+ // use conv to ensure r is assignable to l (#13263).
+ s.append(nod(OAS, l, conv(r, l.Type)))
+ return true
+
+ case OLITERAL:
+ if isZero(r) {
+ return true
+ }
+ litsym(l, r, int(l.Type.Width))
+ return true
+
+ case OADDR:
+ if a := r.Left; a.Op == ONAME {
+ addrsym(l, a)
+ return true
+ }
+
+ case OPTRLIT:
+ switch r.Left.Op {
+ case OARRAYLIT, OSLICELIT, OSTRUCTLIT, OMAPLIT:
+ // copy pointer
+ addrsym(l, s.inittemps[r])
+ return true
+ }
+
+ case OSLICELIT:
+ // copy slice
+ a := s.inittemps[r]
+ slicesym(l, a, r.Right.Int64Val())
+ return true
+
+ case OARRAYLIT, OSTRUCTLIT:
+ p := s.initplans[r]
+
+ n := l.copy()
+ for i := range p.E {
+ e := &p.E[i]
+ n.Xoffset = l.Xoffset + e.Xoffset
+ n.Type = e.Expr.Type
+ if e.Expr.Op == OLITERAL {
+ litsym(n, e.Expr, int(n.Type.Width))
+ continue
+ }
+ ll := n.sepcopy()
+ if s.staticcopy(ll, e.Expr) {
+ continue
+ }
+ // Requires computation, but we're
+ // copying someone else's computation.
+ rr := orig.sepcopy()
+ rr.Type = ll.Type
+ rr.Xoffset += e.Xoffset
+ setlineno(rr)
+ s.append(nod(OAS, ll, rr))
+ }
+
+ return true
+ }
+
+ return false
+}
+
+func (s *InitSchedule) staticassign(l *Node, r *Node) bool {
+ for r.Op == OCONVNOP {
+ r = r.Left
+ }
+
+ switch r.Op {
+ case ONAME:
+ return s.staticcopy(l, r)
+
+ case OLITERAL:
+ if isZero(r) {
+ return true
+ }
+ litsym(l, r, int(l.Type.Width))
+ return true
+
+ case OADDR:
+ var nam Node
+ if stataddr(&nam, r.Left) {
+ addrsym(l, &nam)
+ return true
+ }
+ fallthrough
+
+ case OPTRLIT:
+ switch r.Left.Op {
+ case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT:
+ // Init pointer.
+ a := staticname(r.Left.Type)
+
+ s.inittemps[r] = a
+ addrsym(l, a)
+
+ // Init underlying literal.
+ if !s.staticassign(a, r.Left) {
+ s.append(nod(OAS, a, r.Left))
+ }
+ return true
+ }
+ //dump("not static ptrlit", r);
+
+ case OSTR2BYTES:
+ if l.Class() == PEXTERN && r.Left.Op == OLITERAL {
+ sval := r.Left.StringVal()
+ slicebytes(l, sval)
+ return true
+ }
+
+ case OSLICELIT:
+ s.initplan(r)
+ // Init slice.
+ bound := r.Right.Int64Val()
+ ta := types.NewArray(r.Type.Elem(), bound)
+ ta.SetNoalg(true)
+ a := staticname(ta)
+ s.inittemps[r] = a
+ slicesym(l, a, bound)
+ // Fall through to init underlying array.
+ l = a
+ fallthrough
+
+ case OARRAYLIT, OSTRUCTLIT:
+ s.initplan(r)
+
+ p := s.initplans[r]
+ n := l.copy()
+ for i := range p.E {
+ e := &p.E[i]
+ n.Xoffset = l.Xoffset + e.Xoffset
+ n.Type = e.Expr.Type
+ if e.Expr.Op == OLITERAL {
+ litsym(n, e.Expr, int(n.Type.Width))
+ continue
+ }
+ setlineno(e.Expr)
+ a := n.sepcopy()
+ if !s.staticassign(a, e.Expr) {
+ s.append(nod(OAS, a, e.Expr))
+ }
+ }
+
+ return true
+
+ case OMAPLIT:
+ break
+
+ case OCLOSURE:
+ if hasemptycvars(r) {
+ if Debug_closure > 0 {
+ Warnl(r.Pos, "closure converted to global")
+ }
+ // Closures with no captured variables are globals,
+ // so the assignment can be done at link time.
+ pfuncsym(l, r.Func.Closure.Func.Nname)
+ return true
+ }
+ closuredebugruntimecheck(r)
+
+ case OCONVIFACE:
+ // This logic is mirrored in isStaticCompositeLiteral.
+ // If you change something here, change it there, and vice versa.
+
+ // Determine the underlying concrete type and value we are converting from.
+ val := r
+ for val.Op == OCONVIFACE {
+ val = val.Left
+ }
+ if val.Type.IsInterface() {
+ // val is an interface type.
+ // If val is nil, we can statically initialize l;
+ // both words are zero and so there no work to do, so report success.
+ // If val is non-nil, we have no concrete type to record,
+ // and we won't be able to statically initialize its value, so report failure.
+ return Isconst(val, CTNIL)
+ }
+
+ markTypeUsedInInterface(val.Type, l.Sym.Linksym())
+
+ var itab *Node
+ if l.Type.IsEmptyInterface() {
+ itab = typename(val.Type)
+ } else {
+ itab = itabname(val.Type, l.Type)
+ }
+
+ // Create a copy of l to modify while we emit data.
+ n := l.copy()
+
+ // Emit itab, advance offset.
+ addrsym(n, itab.Left) // itab is an OADDR node
+ n.Xoffset += int64(Widthptr)
+
+ // Emit data.
+ if isdirectiface(val.Type) {
+ if Isconst(val, CTNIL) {
+ // Nil is zero, nothing to do.
+ return true
+ }
+ // Copy val directly into n.
+ n.Type = val.Type
+ setlineno(val)
+ a := n.sepcopy()
+ if !s.staticassign(a, val) {
+ s.append(nod(OAS, a, val))
+ }
+ } else {
+ // Construct temp to hold val, write pointer to temp into n.
+ a := staticname(val.Type)
+ s.inittemps[val] = a
+ if !s.staticassign(a, val) {
+ s.append(nod(OAS, a, val))
+ }
+ addrsym(n, a)
+ }
+
+ return true
+ }
+
+ //dump("not static", r);
+ return false
+}
+
+// initContext is the context in which static data is populated.
+// It is either in an init function or in any other function.
+// Static data populated in an init function will be written either
+// zero times (as a readonly, static data symbol) or
+// one time (during init function execution).
+// Either way, there is no opportunity for races or further modification,
+// so the data can be written to a (possibly readonly) data symbol.
+// Static data populated in any other function needs to be local to
+// that function to allow multiple instances of that function
+// to execute concurrently without clobbering each others' data.
+type initContext uint8
+
+const (
+ inInitFunction initContext = iota
+ inNonInitFunction
+)
+
+func (c initContext) String() string {
+ if c == inInitFunction {
+ return "inInitFunction"
+ }
+ return "inNonInitFunction"
+}
+
+// from here down is the walk analysis
+// of composite literals.
+// most of the work is to generate
+// data statements for the constant
+// part of the composite literal.
+
+var statuniqgen int // name generator for static temps
+
+// staticname returns a name backed by a (writable) static data symbol.
+// Use readonlystaticname for read-only node.
+func staticname(t *types.Type) *Node {
+ // Don't use lookupN; it interns the resulting string, but these are all unique.
+ n := newname(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
+ statuniqgen++
+ addvar(n, t, PEXTERN)
+ return n
+}
+
+// readonlystaticname returns a name backed by a read-only static data symbol.
+func readonlystaticname(t *types.Type) *Node {
+ n := staticname(t)
+ n.MarkReadonly()
+ n.Sym.Linksym().Set(obj.AttrContentAddressable, true)
+ n.Sym.Linksym().Set(obj.AttrLocal, true)
+ return n
+}
+
+func (n *Node) isSimpleName() bool {
+ return n.Op == ONAME && n.Class() != PAUTOHEAP && n.Class() != PEXTERN
+}
+
+func litas(l *Node, r *Node, init *Nodes) {
+ a := nod(OAS, l, r)
+ a = typecheck(a, ctxStmt)
+ a = walkexpr(a, init)
+ init.Append(a)
+}
+
+// initGenType is a bitmap indicating the types of generation that will occur for a static value.
+type initGenType uint8
+
+const (
+ initDynamic initGenType = 1 << iota // contains some dynamic values, for which init code will be generated
+ initConst // contains some constant values, which may be written into data symbols
+)
+
+// getdyn calculates the initGenType for n.
+// If top is false, getdyn is recursing.
+func getdyn(n *Node, top bool) initGenType {
+ switch n.Op {
+ default:
+ if n.isGoConst() {
+ return initConst
+ }
+ return initDynamic
+
+ case OSLICELIT:
+ if !top {
+ return initDynamic
+ }
+ if n.Right.Int64Val()/4 > int64(n.List.Len()) {
+ // <25% of entries have explicit values.
+ // Very rough estimation, it takes 4 bytes of instructions
+ // to initialize 1 byte of result. So don't use a static
+ // initializer if the dynamic initialization code would be
+ // smaller than the static value.
+ // See issue 23780.
+ return initDynamic
+ }
+
+ case OARRAYLIT, OSTRUCTLIT:
+ }
+
+ var mode initGenType
+ for _, n1 := range n.List.Slice() {
+ switch n1.Op {
+ case OKEY:
+ n1 = n1.Right
+ case OSTRUCTKEY:
+ n1 = n1.Left
+ }
+ mode |= getdyn(n1, false)
+ if mode == initDynamic|initConst {
+ break
+ }
+ }
+ return mode
+}
+
+// isStaticCompositeLiteral reports whether n is a compile-time constant.
+func isStaticCompositeLiteral(n *Node) bool {
+ switch n.Op {
+ case OSLICELIT:
+ return false
+ case OARRAYLIT:
+ for _, r := range n.List.Slice() {
+ if r.Op == OKEY {
+ r = r.Right
+ }
+ if !isStaticCompositeLiteral(r) {
+ return false
+ }
+ }
+ return true
+ case OSTRUCTLIT:
+ for _, r := range n.List.Slice() {
+ if r.Op != OSTRUCTKEY {
+ Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r)
+ }
+ if !isStaticCompositeLiteral(r.Left) {
+ return false
+ }
+ }
+ return true
+ case OLITERAL:
+ return true
+ case OCONVIFACE:
+ // See staticassign's OCONVIFACE case for comments.
+ val := n
+ for val.Op == OCONVIFACE {
+ val = val.Left
+ }
+ if val.Type.IsInterface() {
+ return Isconst(val, CTNIL)
+ }
+ if isdirectiface(val.Type) && Isconst(val, CTNIL) {
+ return true
+ }
+ return isStaticCompositeLiteral(val)
+ }
+ return false
+}
+
+// initKind is a kind of static initialization: static, dynamic, or local.
+// Static initialization represents literals and
+// literal components of composite literals.
+// Dynamic initialization represents non-literals and
+// non-literal components of composite literals.
+// LocalCode initialization represents initialization
+// that occurs purely in generated code local to the function of use.
+// Initialization code is sometimes generated in passes,
+// first static then dynamic.
+type initKind uint8
+
+const (
+ initKindStatic initKind = iota + 1
+ initKindDynamic
+ initKindLocalCode
+)
+
+// fixedlit handles struct, array, and slice literals.
+// TODO: expand documentation.
+func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) {
+ isBlank := var_ == nblank
+ var splitnode func(*Node) (a *Node, value *Node)
+ switch n.Op {
+ case OARRAYLIT, OSLICELIT:
+ var k int64
+ splitnode = func(r *Node) (*Node, *Node) {
+ if r.Op == OKEY {
+ k = indexconst(r.Left)
+ if k < 0 {
+ Fatalf("fixedlit: invalid index %v", r.Left)
+ }
+ r = r.Right
+ }
+ a := nod(OINDEX, var_, nodintconst(k))
+ k++
+ if isBlank {
+ a = nblank
+ }
+ return a, r
+ }
+ case OSTRUCTLIT:
+ splitnode = func(r *Node) (*Node, *Node) {
+ if r.Op != OSTRUCTKEY {
+ Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r)
+ }
+ if r.Sym.IsBlank() || isBlank {
+ return nblank, r.Left
+ }
+ setlineno(r)
+ return nodSym(ODOT, var_, r.Sym), r.Left
+ }
+ default:
+ Fatalf("fixedlit bad op: %v", n.Op)
+ }
+
+ for _, r := range n.List.Slice() {
+ a, value := splitnode(r)
+ if a == nblank && candiscard(value) {
+ continue
+ }
+
+ switch value.Op {
+ case OSLICELIT:
+ if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
+ slicelit(ctxt, value, a, init)
+ continue
+ }
+
+ case OARRAYLIT, OSTRUCTLIT:
+ fixedlit(ctxt, kind, value, a, init)
+ continue
+ }
+
+ islit := value.isGoConst()
+ if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
+ continue
+ }
+
+ // build list of assignments: var[index] = expr
+ setlineno(a)
+ a = nod(OAS, a, value)
+ a = typecheck(a, ctxStmt)
+ switch kind {
+ case initKindStatic:
+ genAsStatic(a)
+ case initKindDynamic, initKindLocalCode:
+ a = orderStmtInPlace(a, map[string][]*Node{})
+ a = walkstmt(a)
+ init.Append(a)
+ default:
+ Fatalf("fixedlit: bad kind %d", kind)
+ }
+
+ }
+}
+
+func isSmallSliceLit(n *Node) bool {
+ if n.Op != OSLICELIT {
+ return false
+ }
+
+ r := n.Right
+
+ return smallintconst(r) && (n.Type.Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type.Elem().Width)
+}
+
+func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
+ // make an array type corresponding the number of elements we have
+ t := types.NewArray(n.Type.Elem(), n.Right.Int64Val())
+ dowidth(t)
+
+ if ctxt == inNonInitFunction {
+ // put everything into static array
+ vstat := staticname(t)
+
+ fixedlit(ctxt, initKindStatic, n, vstat, init)
+ fixedlit(ctxt, initKindDynamic, n, vstat, init)
+
+ // copy static to slice
+ var_ = typecheck(var_, ctxExpr|ctxAssign)
+ var nam Node
+ if !stataddr(&nam, var_) || nam.Class() != PEXTERN {
+ Fatalf("slicelit: %v", var_)
+ }
+ slicesym(&nam, vstat, t.NumElem())
+ return
+ }
+
+ // recipe for var = []t{...}
+ // 1. make a static array
+ // var vstat [...]t
+ // 2. assign (data statements) the constant part
+ // vstat = constpart{}
+ // 3. make an auto pointer to array and allocate heap to it
+ // var vauto *[...]t = new([...]t)
+ // 4. copy the static array to the auto array
+ // *vauto = vstat
+ // 5. for each dynamic part assign to the array
+ // vauto[i] = dynamic part
+ // 6. assign slice of allocated heap to var
+ // var = vauto[:]
+ //
+ // an optimization is done if there is no constant part
+ // 3. var vauto *[...]t = new([...]t)
+ // 5. vauto[i] = dynamic part
+ // 6. var = vauto[:]
+
+ // if the literal contains constants,
+ // make static initialized array (1),(2)
+ var vstat *Node
+
+ mode := getdyn(n, true)
+ if mode&initConst != 0 && !isSmallSliceLit(n) {
+ if ctxt == inInitFunction {
+ vstat = readonlystaticname(t)
+ } else {
+ vstat = staticname(t)
+ }
+ fixedlit(ctxt, initKindStatic, n, vstat, init)
+ }
+
+ // make new auto *array (3 declare)
+ vauto := temp(types.NewPtr(t))
+
+ // set auto to point at new temp or heap (3 assign)
+ var a *Node
+ if x := prealloc[n]; x != nil {
+ // temp allocated during order.go for dddarg
+ if !types.Identical(t, x.Type) {
+ panic("dotdotdot base type does not match order's assigned type")
+ }
+
+ if vstat == nil {
+ a = nod(OAS, x, nil)
+ a = typecheck(a, ctxStmt)
+ init.Append(a) // zero new temp
+ } else {
+ // Declare that we're about to initialize all of x.
+ // (Which happens at the *vauto = vstat below.)
+ init.Append(nod(OVARDEF, x, nil))
+ }
+
+ a = nod(OADDR, x, nil)
+ } else if n.Esc == EscNone {
+ a = temp(t)
+ if vstat == nil {
+ a = nod(OAS, temp(t), nil)
+ a = typecheck(a, ctxStmt)
+ init.Append(a) // zero new temp
+ a = a.Left
+ } else {
+ init.Append(nod(OVARDEF, a, nil))
+ }
+
+ a = nod(OADDR, a, nil)
+ } else {
+ a = nod(ONEW, nil, nil)
+ a.List.Set1(typenod(t))
+ }
+
+ a = nod(OAS, vauto, a)
+ a = typecheck(a, ctxStmt)
+ a = walkexpr(a, init)
+ init.Append(a)
+
+ if vstat != nil {
+ // copy static to heap (4)
+ a = nod(ODEREF, vauto, nil)
+
+ a = nod(OAS, a, vstat)
+ a = typecheck(a, ctxStmt)
+ a = walkexpr(a, init)
+ init.Append(a)
+ }
+
+ // put dynamics into array (5)
+ var index int64
+ for _, value := range n.List.Slice() {
+ if value.Op == OKEY {
+ index = indexconst(value.Left)
+ if index < 0 {
+ Fatalf("slicelit: invalid index %v", value.Left)
+ }
+ value = value.Right
+ }
+ a := nod(OINDEX, vauto, nodintconst(index))
+ a.SetBounded(true)
+ index++
+
+ // TODO need to check bounds?
+
+ switch value.Op {
+ case OSLICELIT:
+ break
+
+ case OARRAYLIT, OSTRUCTLIT:
+ k := initKindDynamic
+ if vstat == nil {
+ // Generate both static and dynamic initializations.
+ // See issue #31987.
+ k = initKindLocalCode
+ }
+ fixedlit(ctxt, k, value, a, init)
+ continue
+ }
+
+ if vstat != nil && value.isGoConst() { // already set by copy from static value
+ continue
+ }
+
+ // build list of vauto[c] = expr
+ setlineno(value)
+ a = nod(OAS, a, value)
+
+ a = typecheck(a, ctxStmt)
+ a = orderStmtInPlace(a, map[string][]*Node{})
+ a = walkstmt(a)
+ init.Append(a)
+ }
+
+ // make slice out of heap (6)
+ a = nod(OAS, var_, nod(OSLICE, vauto, nil))
+
+ a = typecheck(a, ctxStmt)
+ a = orderStmtInPlace(a, map[string][]*Node{})
+ a = walkstmt(a)
+ init.Append(a)
+}
+
+func maplit(n *Node, m *Node, init *Nodes) {
+ // make the map var
+ a := nod(OMAKE, nil, nil)
+ a.Esc = n.Esc
+ a.List.Set2(typenod(n.Type), nodintconst(int64(n.List.Len())))
+ litas(m, a, init)
+
+ entries := n.List.Slice()
+
+ // The order pass already removed any dynamic (runtime-computed) entries.
+ // All remaining entries are static. Double-check that.
+ for _, r := range entries {
+ if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
+ Fatalf("maplit: entry is not a literal: %v", r)
+ }
+ }
+
+ if len(entries) > 25 {
+ // For a large number of entries, put them in an array and loop.
+
+ // build types [count]Tindex and [count]Tvalue
+ tk := types.NewArray(n.Type.Key(), int64(len(entries)))
+ te := types.NewArray(n.Type.Elem(), int64(len(entries)))
+
+ tk.SetNoalg(true)
+ te.SetNoalg(true)
+
+ dowidth(tk)
+ dowidth(te)
+
+ // make and initialize static arrays
+ vstatk := readonlystaticname(tk)
+ vstate := readonlystaticname(te)
+
+ datak := nod(OARRAYLIT, nil, nil)
+ datae := nod(OARRAYLIT, nil, nil)
+ for _, r := range entries {
+ datak.List.Append(r.Left)
+ datae.List.Append(r.Right)
+ }
+ fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
+ fixedlit(inInitFunction, initKindStatic, datae, vstate, init)
+
+ // loop adding structure elements to map
+ // for i = 0; i < len(vstatk); i++ {
+ // map[vstatk[i]] = vstate[i]
+ // }
+ i := temp(types.Types[TINT])
+ rhs := nod(OINDEX, vstate, i)
+ rhs.SetBounded(true)
+
+ kidx := nod(OINDEX, vstatk, i)
+ kidx.SetBounded(true)
+ lhs := nod(OINDEX, m, kidx)
+
+ zero := nod(OAS, i, nodintconst(0))
+ cond := nod(OLT, i, nodintconst(tk.NumElem()))
+ incr := nod(OAS, i, nod(OADD, i, nodintconst(1)))
+ body := nod(OAS, lhs, rhs)
+
+ loop := nod(OFOR, cond, incr)
+ loop.Nbody.Set1(body)
+ loop.Ninit.Set1(zero)
+
+ loop = typecheck(loop, ctxStmt)
+ loop = walkstmt(loop)
+ init.Append(loop)
+ return
+ }
+ // For a small number of entries, just add them directly.
+
+ // Build list of var[c] = expr.
+ // Use temporaries so that mapassign1 can have addressable key, elem.
+ // TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
+ tmpkey := temp(m.Type.Key())
+ tmpelem := temp(m.Type.Elem())
+
+ for _, r := range entries {
+ index, elem := r.Left, r.Right
+
+ setlineno(index)
+ a := nod(OAS, tmpkey, index)
+ a = typecheck(a, ctxStmt)
+ a = walkstmt(a)
+ init.Append(a)
+
+ setlineno(elem)
+ a = nod(OAS, tmpelem, elem)
+ a = typecheck(a, ctxStmt)
+ a = walkstmt(a)
+ init.Append(a)
+
+ setlineno(tmpelem)
+ a = nod(OAS, nod(OINDEX, m, tmpkey), tmpelem)
+ a = typecheck(a, ctxStmt)
+ a = walkstmt(a)
+ init.Append(a)
+ }
+
+ a = nod(OVARKILL, tmpkey, nil)
+ a = typecheck(a, ctxStmt)
+ init.Append(a)
+ a = nod(OVARKILL, tmpelem, nil)
+ a = typecheck(a, ctxStmt)
+ init.Append(a)
+}
+
+func anylit(n *Node, var_ *Node, init *Nodes) {
+ t := n.Type
+ switch n.Op {
+ default:
+ Fatalf("anylit: not lit, op=%v node=%v", n.Op, n)
+
+ case ONAME:
+ a := nod(OAS, var_, n)
+ a = typecheck(a, ctxStmt)
+ init.Append(a)
+
+ case OPTRLIT:
+ if !t.IsPtr() {
+ Fatalf("anylit: not ptr")
+ }
+
+ var r *Node
+ if n.Right != nil {
+ // n.Right is stack temporary used as backing store.
+ init.Append(nod(OAS, n.Right, nil)) // zero backing store, just in case (#18410)
+ r = nod(OADDR, n.Right, nil)
+ r = typecheck(r, ctxExpr)
+ } else {
+ r = nod(ONEW, nil, nil)
+ r.SetTypecheck(1)
+ r.Type = t
+ r.Esc = n.Esc
+ }
+
+ r = walkexpr(r, init)
+ a := nod(OAS, var_, r)
+
+ a = typecheck(a, ctxStmt)
+ init.Append(a)
+
+ var_ = nod(ODEREF, var_, nil)
+ var_ = typecheck(var_, ctxExpr|ctxAssign)
+ anylit(n.Left, var_, init)
+
+ case OSTRUCTLIT, OARRAYLIT:
+ if !t.IsStruct() && !t.IsArray() {
+ Fatalf("anylit: not struct/array")
+ }
+
+ if var_.isSimpleName() && n.List.Len() > 4 {
+ // lay out static data
+ vstat := readonlystaticname(t)
+
+ ctxt := inInitFunction
+ if n.Op == OARRAYLIT {
+ ctxt = inNonInitFunction
+ }
+ fixedlit(ctxt, initKindStatic, n, vstat, init)
+
+ // copy static to var
+ a := nod(OAS, var_, vstat)
+
+ a = typecheck(a, ctxStmt)
+ a = walkexpr(a, init)
+ init.Append(a)
+
+ // add expressions to automatic
+ fixedlit(inInitFunction, initKindDynamic, n, var_, init)
+ break
+ }
+
+ var components int64
+ if n.Op == OARRAYLIT {
+ components = t.NumElem()
+ } else {
+ components = int64(t.NumFields())
+ }
+ // initialization of an array or struct with unspecified components (missing fields or arrays)
+ if var_.isSimpleName() || int64(n.List.Len()) < components {
+ a := nod(OAS, var_, nil)
+ a = typecheck(a, ctxStmt)
+ a = walkexpr(a, init)
+ init.Append(a)
+ }
+
+ fixedlit(inInitFunction, initKindLocalCode, n, var_, init)
+
+ case OSLICELIT:
+ slicelit(inInitFunction, n, var_, init)
+
+ case OMAPLIT:
+ if !t.IsMap() {
+ Fatalf("anylit: not map")
+ }
+ maplit(n, var_, init)
+ }
+}
+
+func oaslit(n *Node, init *Nodes) bool {
+ if n.Left == nil || n.Right == nil {
+ // not a special composite literal assignment
+ return false
+ }
+ if n.Left.Type == nil || n.Right.Type == nil {
+ // not a special composite literal assignment
+ return false
+ }
+ if !n.Left.isSimpleName() {
+ // not a special composite literal assignment
+ return false
+ }
+ if !types.Identical(n.Left.Type, n.Right.Type) {
+ // not a special composite literal assignment
+ return false
+ }
+
+ switch n.Right.Op {
+ default:
+ // not a special composite literal assignment
+ return false
+
+ case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
+ if vmatch1(n.Left, n.Right) {
+ // not a special composite literal assignment
+ return false
+ }
+ anylit(n.Right, n.Left, init)
+ }
+
+ n.Op = OEMPTY
+ n.Right = nil
+ return true
+}
+
+func getlit(lit *Node) int {
+ if smallintconst(lit) {
+ return int(lit.Int64Val())
+ }
+ return -1
+}
+
+// stataddr sets nam to the static address of n and reports whether it succeeded.
+func stataddr(nam *Node, n *Node) bool {
+ if n == nil {
+ return false
+ }
+
+ switch n.Op {
+ case ONAME:
+ *nam = *n
+ return true
+
+ case ODOT:
+ if !stataddr(nam, n.Left) {
+ break
+ }
+ nam.Xoffset += n.Xoffset
+ nam.Type = n.Type
+ return true
+
+ case OINDEX:
+ if n.Left.Type.IsSlice() {
+ break
+ }
+ if !stataddr(nam, n.Left) {
+ break
+ }
+ l := getlit(n.Right)
+ if l < 0 {
+ break
+ }
+
+ // Check for overflow.
+ if n.Type.Width != 0 && thearch.MAXWIDTH/n.Type.Width <= int64(l) {
+ break
+ }
+ nam.Xoffset += int64(l) * n.Type.Width
+ nam.Type = n.Type
+ return true
+ }
+
+ return false
+}
+
+func (s *InitSchedule) initplan(n *Node) {
+ if s.initplans[n] != nil {
+ return
+ }
+ p := new(InitPlan)
+ s.initplans[n] = p
+ switch n.Op {
+ default:
+ Fatalf("initplan")
+
+ case OARRAYLIT, OSLICELIT:
+ var k int64
+ for _, a := range n.List.Slice() {
+ if a.Op == OKEY {
+ k = indexconst(a.Left)
+ if k < 0 {
+ Fatalf("initplan arraylit: invalid index %v", a.Left)
+ }
+ a = a.Right
+ }
+ s.addvalue(p, k*n.Type.Elem().Width, a)
+ k++
+ }
+
+ case OSTRUCTLIT:
+ for _, a := range n.List.Slice() {
+ if a.Op != OSTRUCTKEY {
+ Fatalf("initplan structlit")
+ }
+ if a.Sym.IsBlank() {
+ continue
+ }
+ s.addvalue(p, a.Xoffset, a.Left)
+ }
+
+ case OMAPLIT:
+ for _, a := range n.List.Slice() {
+ if a.Op != OKEY {
+ Fatalf("initplan maplit")
+ }
+ s.addvalue(p, -1, a.Right)
+ }
+ }
+}
+
+func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *Node) {
+ // special case: zero can be dropped entirely
+ if isZero(n) {
+ return
+ }
+
+ // special case: inline struct and array (not slice) literals
+ if isvaluelit(n) {
+ s.initplan(n)
+ q := s.initplans[n]
+ for _, qe := range q.E {
+ // qe is a copy; we are not modifying entries in q.E
+ qe.Xoffset += xoffset
+ p.E = append(p.E, qe)
+ }
+ return
+ }
+
+ // add to plan
+ p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n})
+}
+
+func isZero(n *Node) bool {
+ switch n.Op {
+ case OLITERAL:
+ switch u := n.Val().U.(type) {
+ default:
+ Dump("unexpected literal", n)
+ Fatalf("isZero")
+ case *NilVal:
+ return true
+ case string:
+ return u == ""
+ case bool:
+ return !u
+ case *Mpint:
+ return u.CmpInt64(0) == 0
+ case *Mpflt:
+ return u.CmpFloat64(0) == 0
+ case *Mpcplx:
+ return u.Real.CmpFloat64(0) == 0 && u.Imag.CmpFloat64(0) == 0
+ }
+
+ case OARRAYLIT:
+ for _, n1 := range n.List.Slice() {
+ if n1.Op == OKEY {
+ n1 = n1.Right
+ }
+ if !isZero(n1) {
+ return false
+ }
+ }
+ return true
+
+ case OSTRUCTLIT:
+ for _, n1 := range n.List.Slice() {
+ if !isZero(n1.Left) {
+ return false
+ }
+ }
+ return true
+ }
+
+ return false
+}
+
+func isvaluelit(n *Node) bool {
+ return n.Op == OARRAYLIT || n.Op == OSTRUCTLIT
+}
+
+func genAsStatic(as *Node) {
+ if as.Left.Type == nil {
+ Fatalf("genAsStatic as.Left not typechecked")
+ }
+
+ var nam Node
+ if !stataddr(&nam, as.Left) || (nam.Class() != PEXTERN && as.Left != nblank) {
+ Fatalf("genAsStatic: lhs %v", as.Left)
+ }
+
+ switch {
+ case as.Right.Op == OLITERAL:
+ litsym(&nam, as.Right, int(as.Right.Type.Width))
+ case as.Right.Op == ONAME && as.Right.Class() == PFUNC:
+ pfuncsym(&nam, as.Right)
+ default:
+ Fatalf("genAsStatic: rhs %v", as.Right)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/sizeof_test.go b/src/cmd/compile/internal/gc/sizeof_test.go
new file mode 100644
index 0000000..ce4a216
--- /dev/null
+++ b/src/cmd/compile/internal/gc/sizeof_test.go
@@ -0,0 +1,39 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// Assert that the size of important structures do not change unexpectedly.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ {Func{}, 124, 224},
+ {Name{}, 32, 56},
+ {Param{}, 24, 48},
+ {Node{}, 76, 128},
+ }
+
+ for _, tt := range tests {
+ want := tt._32bit
+ if _64bit {
+ want = tt._64bit
+ }
+ got := reflect.TypeOf(tt.val).Size()
+ if want != got {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
new file mode 100644
index 0000000..5b74754
--- /dev/null
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -0,0 +1,7231 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "encoding/binary"
+ "fmt"
+ "html"
+ "os"
+ "path/filepath"
+ "sort"
+
+ "bufio"
+ "bytes"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+)
+
+var ssaConfig *ssa.Config
+var ssaCaches []ssa.Cache
+
+var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for
+var ssaDir string // optional destination for ssa dump file
+var ssaDumpStdout bool // whether to dump to stdout
+var ssaDumpCFG string // generate CFGs for these phases
+const ssaDumpFile = "ssa.html"
+
+// The max number of defers in a function using open-coded defers. We enforce this
+// limit because the deferBits bitmask is currently a single byte (to minimize code size)
+const maxOpenDefers = 8
+
+// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
+var ssaDumpInlined []*Node
+
+func initssaconfig() {
+ types_ := ssa.NewTypes()
+
+ if thearch.SoftFloat {
+ softfloatInit()
+ }
+
+ // Generate a few pointer types that are uncommon in the frontend but common in the backend.
+ // Caching is disabled in the backend, so generating these here avoids allocations.
+ _ = types.NewPtr(types.Types[TINTER]) // *interface{}
+ _ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string
+ _ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{}
+ _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte
+ _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte
+ _ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string
+ _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8
+ _ = types.NewPtr(types.Types[TINT16]) // *int16
+ _ = types.NewPtr(types.Types[TINT64]) // *int64
+ _ = types.NewPtr(types.Errortype) // *error
+ types.NewPtrCacheEnabled = false
+ ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug.N == 0)
+ ssaConfig.SoftFloat = thearch.SoftFloat
+ ssaConfig.Race = flag_race
+ ssaCaches = make([]ssa.Cache, nBackendWorkers)
+
+ // Set up some runtime functions we'll need to call.
+ assertE2I = sysfunc("assertE2I")
+ assertE2I2 = sysfunc("assertE2I2")
+ assertI2I = sysfunc("assertI2I")
+ assertI2I2 = sysfunc("assertI2I2")
+ deferproc = sysfunc("deferproc")
+ deferprocStack = sysfunc("deferprocStack")
+ Deferreturn = sysfunc("deferreturn")
+ Duffcopy = sysfunc("duffcopy")
+ Duffzero = sysfunc("duffzero")
+ gcWriteBarrier = sysfunc("gcWriteBarrier")
+ goschedguarded = sysfunc("goschedguarded")
+ growslice = sysfunc("growslice")
+ msanread = sysfunc("msanread")
+ msanwrite = sysfunc("msanwrite")
+ msanmove = sysfunc("msanmove")
+ newobject = sysfunc("newobject")
+ newproc = sysfunc("newproc")
+ panicdivide = sysfunc("panicdivide")
+ panicdottypeE = sysfunc("panicdottypeE")
+ panicdottypeI = sysfunc("panicdottypeI")
+ panicnildottype = sysfunc("panicnildottype")
+ panicoverflow = sysfunc("panicoverflow")
+ panicshift = sysfunc("panicshift")
+ raceread = sysfunc("raceread")
+ racereadrange = sysfunc("racereadrange")
+ racewrite = sysfunc("racewrite")
+ racewriterange = sysfunc("racewriterange")
+ x86HasPOPCNT = sysvar("x86HasPOPCNT") // bool
+ x86HasSSE41 = sysvar("x86HasSSE41") // bool
+ x86HasFMA = sysvar("x86HasFMA") // bool
+ armHasVFPv4 = sysvar("armHasVFPv4") // bool
+ arm64HasATOMICS = sysvar("arm64HasATOMICS") // bool
+ typedmemclr = sysfunc("typedmemclr")
+ typedmemmove = sysfunc("typedmemmove")
+ Udiv = sysvar("udiv") // asm func with special ABI
+ writeBarrier = sysvar("writeBarrier") // struct { bool; ... }
+ zerobaseSym = sysvar("zerobase")
+
+ // asm funcs with special ABI
+ if thearch.LinkArch.Name == "amd64" {
+ GCWriteBarrierReg = map[int16]*obj.LSym{
+ x86.REG_AX: sysfunc("gcWriteBarrier"),
+ x86.REG_CX: sysfunc("gcWriteBarrierCX"),
+ x86.REG_DX: sysfunc("gcWriteBarrierDX"),
+ x86.REG_BX: sysfunc("gcWriteBarrierBX"),
+ x86.REG_BP: sysfunc("gcWriteBarrierBP"),
+ x86.REG_SI: sysfunc("gcWriteBarrierSI"),
+ x86.REG_R8: sysfunc("gcWriteBarrierR8"),
+ x86.REG_R9: sysfunc("gcWriteBarrierR9"),
+ }
+ }
+
+ if thearch.LinkArch.Family == sys.Wasm {
+ BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("goPanicIndex")
+ BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("goPanicIndexU")
+ BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("goPanicSliceAlen")
+ BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("goPanicSliceAlenU")
+ BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("goPanicSliceAcap")
+ BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("goPanicSliceAcapU")
+ BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("goPanicSliceB")
+ BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("goPanicSliceBU")
+ BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("goPanicSlice3Alen")
+ BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("goPanicSlice3AlenU")
+ BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("goPanicSlice3Acap")
+ BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("goPanicSlice3AcapU")
+ BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("goPanicSlice3B")
+ BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("goPanicSlice3BU")
+ BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("goPanicSlice3C")
+ BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("goPanicSlice3CU")
+ } else {
+ BoundsCheckFunc[ssa.BoundsIndex] = sysfunc("panicIndex")
+ BoundsCheckFunc[ssa.BoundsIndexU] = sysfunc("panicIndexU")
+ BoundsCheckFunc[ssa.BoundsSliceAlen] = sysfunc("panicSliceAlen")
+ BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysfunc("panicSliceAlenU")
+ BoundsCheckFunc[ssa.BoundsSliceAcap] = sysfunc("panicSliceAcap")
+ BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysfunc("panicSliceAcapU")
+ BoundsCheckFunc[ssa.BoundsSliceB] = sysfunc("panicSliceB")
+ BoundsCheckFunc[ssa.BoundsSliceBU] = sysfunc("panicSliceBU")
+ BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysfunc("panicSlice3Alen")
+ BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysfunc("panicSlice3AlenU")
+ BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysfunc("panicSlice3Acap")
+ BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysfunc("panicSlice3AcapU")
+ BoundsCheckFunc[ssa.BoundsSlice3B] = sysfunc("panicSlice3B")
+ BoundsCheckFunc[ssa.BoundsSlice3BU] = sysfunc("panicSlice3BU")
+ BoundsCheckFunc[ssa.BoundsSlice3C] = sysfunc("panicSlice3C")
+ BoundsCheckFunc[ssa.BoundsSlice3CU] = sysfunc("panicSlice3CU")
+ }
+ if thearch.LinkArch.PtrSize == 4 {
+ ExtendCheckFunc[ssa.BoundsIndex] = sysvar("panicExtendIndex")
+ ExtendCheckFunc[ssa.BoundsIndexU] = sysvar("panicExtendIndexU")
+ ExtendCheckFunc[ssa.BoundsSliceAlen] = sysvar("panicExtendSliceAlen")
+ ExtendCheckFunc[ssa.BoundsSliceAlenU] = sysvar("panicExtendSliceAlenU")
+ ExtendCheckFunc[ssa.BoundsSliceAcap] = sysvar("panicExtendSliceAcap")
+ ExtendCheckFunc[ssa.BoundsSliceAcapU] = sysvar("panicExtendSliceAcapU")
+ ExtendCheckFunc[ssa.BoundsSliceB] = sysvar("panicExtendSliceB")
+ ExtendCheckFunc[ssa.BoundsSliceBU] = sysvar("panicExtendSliceBU")
+ ExtendCheckFunc[ssa.BoundsSlice3Alen] = sysvar("panicExtendSlice3Alen")
+ ExtendCheckFunc[ssa.BoundsSlice3AlenU] = sysvar("panicExtendSlice3AlenU")
+ ExtendCheckFunc[ssa.BoundsSlice3Acap] = sysvar("panicExtendSlice3Acap")
+ ExtendCheckFunc[ssa.BoundsSlice3AcapU] = sysvar("panicExtendSlice3AcapU")
+ ExtendCheckFunc[ssa.BoundsSlice3B] = sysvar("panicExtendSlice3B")
+ ExtendCheckFunc[ssa.BoundsSlice3BU] = sysvar("panicExtendSlice3BU")
+ ExtendCheckFunc[ssa.BoundsSlice3C] = sysvar("panicExtendSlice3C")
+ ExtendCheckFunc[ssa.BoundsSlice3CU] = sysvar("panicExtendSlice3CU")
+ }
+
+ // Wasm (all asm funcs with special ABIs)
+ WasmMove = sysvar("wasmMove")
+ WasmZero = sysvar("wasmZero")
+ WasmDiv = sysvar("wasmDiv")
+ WasmTruncS = sysvar("wasmTruncS")
+ WasmTruncU = sysvar("wasmTruncU")
+ SigPanic = sysfunc("sigpanic")
+}
+
+// getParam returns the Field of ith param of node n (which is a
+// function/method/interface call), where the receiver of a method call is
+// considered as the 0th parameter. This does not include the receiver of an
+// interface call.
+func getParam(n *Node, i int) *types.Field {
+ t := n.Left.Type
+ if n.Op == OCALLMETH {
+ if i == 0 {
+ return t.Recv()
+ }
+ return t.Params().Field(i - 1)
+ }
+ return t.Params().Field(i)
+}
+
+// dvarint writes a varint v to the funcdata in symbol x and returns the new offset
+func dvarint(x *obj.LSym, off int, v int64) int {
+ if v < 0 || v > 1e9 {
+ panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
+ }
+ if v < 1<<7 {
+ return duint8(x, off, uint8(v))
+ }
+ off = duint8(x, off, uint8((v&127)|128))
+ if v < 1<<14 {
+ return duint8(x, off, uint8(v>>7))
+ }
+ off = duint8(x, off, uint8(((v>>7)&127)|128))
+ if v < 1<<21 {
+ return duint8(x, off, uint8(v>>14))
+ }
+ off = duint8(x, off, uint8(((v>>14)&127)|128))
+ if v < 1<<28 {
+ return duint8(x, off, uint8(v>>21))
+ }
+ off = duint8(x, off, uint8(((v>>21)&127)|128))
+ return duint8(x, off, uint8(v>>28))
+}
+
+// emitOpenDeferInfo emits FUNCDATA information about the defers in a function
+// that is using open-coded defers. This funcdata is used to determine the active
+// defers in a function and execute those defers during panic processing.
+//
+// The funcdata is all encoded in varints (since values will almost always be less than
+// 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets)
+// for stack variables are specified as the number of bytes below varp (pointer to the
+// top of the local variables) for their starting address. The format is:
+//
+// - Max total argument size among all the defers
+// - Offset of the deferBits variable
+// - Number of defers in the function
+// - Information about each defer call, in reverse order of appearance in the function:
+// - Total argument size of the call
+// - Offset of the closure value to call
+// - Number of arguments (including interface receiver or method receiver as first arg)
+// - Information about each argument
+// - Offset of the stored defer argument in this function's frame
+// - Size of the argument
+// - Offset of where argument should be placed in the args frame when making call
+func (s *state) emitOpenDeferInfo() {
+ x := Ctxt.Lookup(s.curfn.Func.lsym.Name + ".opendefer")
+ s.curfn.Func.lsym.Func().OpenCodedDeferInfo = x
+ off := 0
+
+ // Compute maxargsize (max size of arguments for all defers)
+ // first, so we can output it first to the funcdata
+ var maxargsize int64
+ for i := len(s.openDefers) - 1; i >= 0; i-- {
+ r := s.openDefers[i]
+ argsize := r.n.Left.Type.ArgWidth()
+ if argsize > maxargsize {
+ maxargsize = argsize
+ }
+ }
+ off = dvarint(x, off, maxargsize)
+ off = dvarint(x, off, -s.deferBitsTemp.Xoffset)
+ off = dvarint(x, off, int64(len(s.openDefers)))
+
+ // Write in reverse-order, for ease of running in that order at runtime
+ for i := len(s.openDefers) - 1; i >= 0; i-- {
+ r := s.openDefers[i]
+ off = dvarint(x, off, r.n.Left.Type.ArgWidth())
+ off = dvarint(x, off, -r.closureNode.Xoffset)
+ numArgs := len(r.argNodes)
+ if r.rcvrNode != nil {
+ // If there's an interface receiver, treat/place it as the first
+ // arg. (If there is a method receiver, it's already included as
+ // first arg in r.argNodes.)
+ numArgs++
+ }
+ off = dvarint(x, off, int64(numArgs))
+ if r.rcvrNode != nil {
+ off = dvarint(x, off, -r.rcvrNode.Xoffset)
+ off = dvarint(x, off, s.config.PtrSize)
+ off = dvarint(x, off, 0)
+ }
+ for j, arg := range r.argNodes {
+ f := getParam(r.n, j)
+ off = dvarint(x, off, -arg.Xoffset)
+ off = dvarint(x, off, f.Type.Size())
+ off = dvarint(x, off, f.Offset)
+ }
+ }
+}
+
+// buildssa builds an SSA function for fn.
+// worker indicates which of the backend workers is doing the processing.
+func buildssa(fn *Node, worker int) *ssa.Func {
+ name := fn.funcname()
+ printssa := false
+ if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset"
+ printssa = name == ssaDump || myimportpath+"."+name == ssaDump
+ }
+ var astBuf *bytes.Buffer
+ if printssa {
+ astBuf = &bytes.Buffer{}
+ fdumplist(astBuf, "buildssa-enter", fn.Func.Enter)
+ fdumplist(astBuf, "buildssa-body", fn.Nbody)
+ fdumplist(astBuf, "buildssa-exit", fn.Func.Exit)
+ if ssaDumpStdout {
+ fmt.Println("generating SSA for", name)
+ fmt.Print(astBuf.String())
+ }
+ }
+
+ var s state
+ s.pushLine(fn.Pos)
+ defer s.popLine()
+
+ s.hasdefer = fn.Func.HasDefer()
+ if fn.Func.Pragma&CgoUnsafeArgs != 0 {
+ s.cgoUnsafeArgs = true
+ }
+
+ fe := ssafn{
+ curfn: fn,
+ log: printssa && ssaDumpStdout,
+ }
+ s.curfn = fn
+
+ s.f = ssa.NewFunc(&fe)
+ s.config = ssaConfig
+ s.f.Type = fn.Type
+ s.f.Config = ssaConfig
+ s.f.Cache = &ssaCaches[worker]
+ s.f.Cache.Reset()
+ s.f.Name = name
+ s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
+ s.f.PrintOrHtmlSSA = printssa
+ if fn.Func.Pragma&Nosplit != 0 {
+ s.f.NoSplit = true
+ }
+ s.panics = map[funcLine]*ssa.Block{}
+ s.softFloat = s.config.SoftFloat
+
+ // Allocate starting block
+ s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
+ s.f.Entry.Pos = fn.Pos
+
+ if printssa {
+ ssaDF := ssaDumpFile
+ if ssaDir != "" {
+ ssaDF = filepath.Join(ssaDir, myimportpath+"."+name+".html")
+ ssaD := filepath.Dir(ssaDF)
+ os.MkdirAll(ssaD, 0755)
+ }
+ s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
+ // TODO: generate and print a mapping from nodes to values and blocks
+ dumpSourcesColumn(s.f.HTMLWriter, fn)
+ s.f.HTMLWriter.WriteAST("AST", astBuf)
+ }
+
+ // Allocate starting values
+ s.labels = map[string]*ssaLabel{}
+ s.labeledNodes = map[*Node]*ssaLabel{}
+ s.fwdVars = map[*Node]*ssa.Value{}
+ s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
+
+ s.hasOpenDefers = Debug.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
+ switch {
+ case s.hasOpenDefers && (Ctxt.Flag_shared || Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
+ // Don't support open-coded defers for 386 ONLY when using shared
+ // libraries, because there is extra code (added by rewriteToUseGot())
+ // preceding the deferreturn/ret code that is generated by gencallret()
+ // that we don't track correctly.
+ s.hasOpenDefers = false
+ }
+ if s.hasOpenDefers && s.curfn.Func.Exit.Len() > 0 {
+ // Skip doing open defers if there is any extra exit code (likely
+ // copying heap-allocated return values or race detection), since
+ // we will not generate that code in the case of the extra
+ // deferreturn/ret segment.
+ s.hasOpenDefers = false
+ }
+ if s.hasOpenDefers &&
+ s.curfn.Func.numReturns*s.curfn.Func.numDefers > 15 {
+ // Since we are generating defer calls at every exit for
+ // open-coded defers, skip doing open-coded defers if there are
+ // too many returns (especially if there are multiple defers).
+ // Open-coded defers are most important for improving performance
+ // for smaller functions (which don't have many returns).
+ s.hasOpenDefers = false
+ }
+
+ s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
+ s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])
+
+ s.startBlock(s.f.Entry)
+ s.vars[&memVar] = s.startmem
+ if s.hasOpenDefers {
+ // Create the deferBits variable and stack slot. deferBits is a
+ // bitmask showing which of the open-coded defers in this function
+ // have been activated.
+ deferBitsTemp := tempAt(src.NoXPos, s.curfn, types.Types[TUINT8])
+ s.deferBitsTemp = deferBitsTemp
+ // For this value, AuxInt is initialized to zero by default
+ startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[TUINT8])
+ s.vars[&deferBitsVar] = startDeferBits
+ s.deferBitsAddr = s.addr(deferBitsTemp)
+ s.store(types.Types[TUINT8], s.deferBitsAddr, startDeferBits)
+ // Make sure that the deferBits stack slot is kept alive (for use
+ // by panics) and stores to deferBits are not eliminated, even if
+ // all checking code on deferBits in the function exit can be
+ // eliminated, because the defer statements were all
+ // unconditional.
+ s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
+ }
+
+ // Generate addresses of local declarations
+ s.decladdrs = map[*Node]*ssa.Value{}
+ var args []ssa.Param
+ var results []ssa.Param
+ for _, n := range fn.Func.Dcl {
+ switch n.Class() {
+ case PPARAM:
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
+ args = append(args, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
+ case PPARAMOUT:
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
+ results = append(results, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
+ if s.canSSA(n) {
+ // Save ssa-able PPARAMOUT variables so we can
+ // store them back to the stack at the end of
+ // the function.
+ s.returns = append(s.returns, n)
+ }
+ case PAUTO:
+ // processed at each use, to prevent Addr coming
+ // before the decl.
+ case PAUTOHEAP:
+ // moved to heap - already handled by frontend
+ case PFUNC:
+ // local function - already handled by frontend
+ default:
+ s.Fatalf("local variable with class %v unimplemented", n.Class())
+ }
+ }
+
+ // Populate SSAable arguments.
+ for _, n := range fn.Func.Dcl {
+ if n.Class() == PPARAM && s.canSSA(n) {
+ v := s.newValue0A(ssa.OpArg, n.Type, n)
+ s.vars[n] = v
+ s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
+ }
+ }
+
+ // Convert the AST-based IR to the SSA-based IR
+ s.stmtList(fn.Func.Enter)
+ s.stmtList(fn.Nbody)
+
+ // fallthrough to exit
+ if s.curBlock != nil {
+ s.pushLine(fn.Func.Endlineno)
+ s.exit()
+ s.popLine()
+ }
+
+ for _, b := range s.f.Blocks {
+ if b.Pos != src.NoXPos {
+ s.updateUnsetPredPos(b)
+ }
+ }
+
+ s.insertPhis()
+
+ // Main call to ssa package to compile function
+ ssa.Compile(s.f)
+
+ if s.hasOpenDefers {
+ s.emitOpenDeferInfo()
+ }
+
+ return s.f
+}
+
+func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *Node) {
+ // Read sources of target function fn.
+ fname := Ctxt.PosTable.Pos(fn.Pos).Filename()
+ targetFn, err := readFuncLines(fname, fn.Pos.Line(), fn.Func.Endlineno.Line())
+ if err != nil {
+ writer.Logf("cannot read sources for function %v: %v", fn, err)
+ }
+
+ // Read sources of inlined functions.
+ var inlFns []*ssa.FuncLines
+ for _, fi := range ssaDumpInlined {
+ var elno src.XPos
+ if fi.Name.Defn == nil {
+ // Endlineno is filled from exported data.
+ elno = fi.Func.Endlineno
+ } else {
+ elno = fi.Name.Defn.Func.Endlineno
+ }
+ fname := Ctxt.PosTable.Pos(fi.Pos).Filename()
+ fnLines, err := readFuncLines(fname, fi.Pos.Line(), elno.Line())
+ if err != nil {
+ writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
+ continue
+ }
+ inlFns = append(inlFns, fnLines)
+ }
+
+ sort.Sort(ssa.ByTopo(inlFns))
+ if targetFn != nil {
+ inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...)
+ }
+
+ writer.WriteSources("sources", inlFns)
+}
+
+func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) {
+ f, err := os.Open(os.ExpandEnv(file))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ var lines []string
+ ln := uint(1)
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() && ln <= end {
+ if ln >= start {
+ lines = append(lines, scanner.Text())
+ }
+ ln++
+ }
+ return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil
+}
+
+// updateUnsetPredPos propagates the earliest-value position information for b
+// towards all of b's predecessors that need a position, and recurs on that
+// predecessor if its position is updated. B should have a non-empty position.
+func (s *state) updateUnsetPredPos(b *ssa.Block) {
+ if b.Pos == src.NoXPos {
+ s.Fatalf("Block %s should have a position", b)
+ }
+ bestPos := src.NoXPos
+ for _, e := range b.Preds {
+ p := e.Block()
+ if !p.LackingPos() {
+ continue
+ }
+ if bestPos == src.NoXPos {
+ bestPos = b.Pos
+ for _, v := range b.Values {
+ if v.LackingPos() {
+ continue
+ }
+ if v.Pos != src.NoXPos {
+ // Assume values are still in roughly textual order;
+ // TODO: could also seek minimum position?
+ bestPos = v.Pos
+ break
+ }
+ }
+ }
+ p.Pos = bestPos
+ s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
+ }
+}
+
+// Information about each open-coded defer.
+type openDeferInfo struct {
+ // The ODEFER node representing the function call of the defer
+ n *Node
+ // If defer call is closure call, the address of the argtmp where the
+ // closure is stored.
+ closure *ssa.Value
+ // The node representing the argtmp where the closure is stored - used for
+ // function, method, or interface call, to store a closure that panic
+ // processing can use for this defer.
+ closureNode *Node
+ // If defer call is interface call, the address of the argtmp where the
+ // receiver is stored
+ rcvr *ssa.Value
+ // The node representing the argtmp where the receiver is stored
+ rcvrNode *Node
+ // The addresses of the argtmps where the evaluated arguments of the defer
+ // function call are stored.
+ argVals []*ssa.Value
+ // The nodes representing the argtmps where the args of the defer are stored
+ argNodes []*Node
+}
+
+type state struct {
+ // configuration (arch) information
+ config *ssa.Config
+
+ // function we're building
+ f *ssa.Func
+
+ // Node for function
+ curfn *Node
+
+ // labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f
+ labels map[string]*ssaLabel
+ labeledNodes map[*Node]*ssaLabel
+
+ // unlabeled break and continue statement tracking
+ breakTo *ssa.Block // current target for plain break statement
+ continueTo *ssa.Block // current target for plain continue statement
+
+ // current location where we're interpreting the AST
+ curBlock *ssa.Block
+
+ // variable assignments in the current block (map from variable symbol to ssa value)
+ // *Node is the unique identifier (an ONAME Node) for the variable.
+ // TODO: keep a single varnum map, then make all of these maps slices instead?
+ vars map[*Node]*ssa.Value
+
+ // fwdVars are variables that are used before they are defined in the current block.
+ // This map exists just to coalesce multiple references into a single FwdRef op.
+ // *Node is the unique identifier (an ONAME Node) for the variable.
+ fwdVars map[*Node]*ssa.Value
+
+ // all defined variables at the end of each block. Indexed by block ID.
+ defvars []map[*Node]*ssa.Value
+
+ // addresses of PPARAM and PPARAMOUT variables.
+ decladdrs map[*Node]*ssa.Value
+
+ // starting values. Memory, stack pointer, and globals pointer
+ startmem *ssa.Value
+ sp *ssa.Value
+ sb *ssa.Value
+ // value representing address of where deferBits autotmp is stored
+ deferBitsAddr *ssa.Value
+ deferBitsTemp *Node
+
+ // line number stack. The current line number is top of stack
+ line []src.XPos
+ // the last line number processed; it may have been popped
+ lastPos src.XPos
+
+ // list of panic calls by function name and line number.
+ // Used to deduplicate panic calls.
+ panics map[funcLine]*ssa.Block
+
+ // list of PPARAMOUT (return) variables.
+ returns []*Node
+
+ cgoUnsafeArgs bool
+ hasdefer bool // whether the function contains a defer statement
+ softFloat bool
+ hasOpenDefers bool // whether we are doing open-coded defers
+
+ // If doing open-coded defers, list of info about the defer calls in
+ // scanning order. Hence, at exit we should run these defers in reverse
+ // order of this list
+ openDefers []*openDeferInfo
+ // For open-coded defers, this is the beginning and end blocks of the last
+ // defer exit code that we have generated so far. We use these to share
+ // code between exits if the shareDeferExits option (disabled by default)
+ // is on.
+ lastDeferExit *ssa.Block // Entry block of last defer exit code we generated
+ lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated
+ lastDeferCount int // Number of defers encountered at that point
+
+ prevCall *ssa.Value // the previous call; use this to tie results to the call op.
+}
+
+type funcLine struct {
+ f *obj.LSym
+ base *src.PosBase
+ line uint
+}
+
+type ssaLabel struct {
+ target *ssa.Block // block identified by this label
+ breakTarget *ssa.Block // block to break to in control flow node identified by this label
+ continueTarget *ssa.Block // block to continue to in control flow node identified by this label
+}
+
+// label returns the label associated with sym, creating it if necessary.
+func (s *state) label(sym *types.Sym) *ssaLabel {
+ lab := s.labels[sym.Name]
+ if lab == nil {
+ lab = new(ssaLabel)
+ s.labels[sym.Name] = lab
+ }
+ return lab
+}
+
+func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
+func (s *state) Log() bool { return s.f.Log() }
+func (s *state) Fatalf(msg string, args ...interface{}) {
+ s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
+}
+func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
+func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
+
+var (
+ // dummy node for the memory variable
+ memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}}
+
+ // dummy nodes for temporary variables
+ ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}}
+ lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}}
+ newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}}
+ capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}}
+ typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}}
+ okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}}
+ deferBitsVar = Node{Op: ONAME, Sym: &types.Sym{Name: "deferBits"}}
+)
+
+// startBlock sets the current block we're generating code in to b.
+func (s *state) startBlock(b *ssa.Block) {
+ if s.curBlock != nil {
+ s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
+ }
+ s.curBlock = b
+ s.vars = map[*Node]*ssa.Value{}
+ for n := range s.fwdVars {
+ delete(s.fwdVars, n)
+ }
+}
+
+// endBlock marks the end of generating code for the current block.
+// Returns the (former) current block. Returns nil if there is no current
+// block, i.e. if no code flows to the current execution point.
+func (s *state) endBlock() *ssa.Block {
+ b := s.curBlock
+ if b == nil {
+ return nil
+ }
+ for len(s.defvars) <= int(b.ID) {
+ s.defvars = append(s.defvars, nil)
+ }
+ s.defvars[b.ID] = s.vars
+ s.curBlock = nil
+ s.vars = nil
+ if b.LackingPos() {
+ // Empty plain blocks get the line of their successor (handled after all blocks created),
+ // except for increment blocks in For statements (handled in ssa conversion of OFOR),
+ // and for blocks ending in GOTO/BREAK/CONTINUE.
+ b.Pos = src.NoXPos
+ } else {
+ b.Pos = s.lastPos
+ }
+ return b
+}
+
+// pushLine pushes a line number on the line number stack.
+func (s *state) pushLine(line src.XPos) {
+ if !line.IsKnown() {
+ // the frontend may emit node with line number missing,
+ // use the parent line number in this case.
+ line = s.peekPos()
+ if Debug.K != 0 {
+ Warn("buildssa: unknown position (line 0)")
+ }
+ } else {
+ s.lastPos = line
+ }
+
+ s.line = append(s.line, line)
+}
+
+// popLine pops the top of the line number stack.
+func (s *state) popLine() {
+ s.line = s.line[:len(s.line)-1]
+}
+
+// peekPos peeks the top of the line number stack.
+func (s *state) peekPos() src.XPos {
+ return s.line[len(s.line)-1]
+}
+
+// newValue0 adds a new value with no arguments to the current block.
+func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
+ return s.curBlock.NewValue0(s.peekPos(), op, t)
+}
+
+// newValue0A adds a new value with no arguments and an aux value to the current block.
+func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
+ return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
+}
+
+// newValue0I adds a new value with no arguments and an auxint value to the current block.
+func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
+ return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
+}
+
+// newValue1 adds a new value with one argument to the current block.
+func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
+}
+
+// newValue1A adds a new value with one argument and an aux value to the current block.
+func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
+}
+
+// newValue1Apos adds a new value with one argument and an aux value to the current block.
+// isStmt determines whether the created values may be a statement or not
+// (i.e., false means never, yes means maybe).
+func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value, isStmt bool) *ssa.Value {
+ if isStmt {
+ return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
+ }
+ return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg)
+}
+
+// newValue1I adds a new value with one argument and an auxint value to the current block.
+func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
+}
+
+// newValue2 adds a new value with two arguments to the current block.
+func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
+}
+
+// newValue2A adds a new value with two arguments and an aux value to the current block.
+func (s *state) newValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
+}
+
+// newValue2Apos adds a new value with two arguments and an aux value to the current block.
+// isStmt determines whether the created values may be a statement or not
+// (i.e., false means never, yes means maybe).
+func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
+ if isStmt {
+ return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
+ }
+ return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1)
+}
+
+// newValue2I adds a new value with two arguments and an auxint value to the current block.
+func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
+}
+
+// newValue3 adds a new value with three arguments to the current block.
+func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
+}
+
+// newValue3I adds a new value with three arguments and an auxint value to the current block.
+func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
+}
+
+// newValue3A adds a new value with three arguments and an aux value to the current block.
+func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
+}
+
+// newValue3Apos adds a new value with three arguments and an aux value to the current block.
+// isStmt determines whether the created values may be a statement or not
+// (i.e., false means never, yes means maybe).
+func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
+ if isStmt {
+ return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
+ }
+ return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2)
+}
+
+// newValue4 adds a new value with four arguments to the current block.
+func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
+}
+
+// newValue4 adds a new value with four arguments and an auxint value to the current block.
+func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3)
+}
+
+// entryNewValue0 adds a new value with no arguments to the entry block.
+func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
+ return s.f.Entry.NewValue0(src.NoXPos, op, t)
+}
+
+// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
+func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
+ return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux)
+}
+
+// entryNewValue1 adds a new value with one argument to the entry block.
+func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
+ return s.f.Entry.NewValue1(src.NoXPos, op, t, arg)
+}
+
+// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
+func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
+ return s.f.Entry.NewValue1I(src.NoXPos, op, t, auxint, arg)
+}
+
+// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
+func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
+ return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg)
+}
+
+// entryNewValue2 adds a new value with two arguments to the entry block.
+func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.f.Entry.NewValue2(src.NoXPos, op, t, arg0, arg1)
+}
+
+// entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
+func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.f.Entry.NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
+}
+
+// const* routines add a new const value to the entry block.
+func (s *state) constSlice(t *types.Type) *ssa.Value {
+ return s.f.ConstSlice(t)
+}
+func (s *state) constInterface(t *types.Type) *ssa.Value {
+ return s.f.ConstInterface(t)
+}
+func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) }
+func (s *state) constEmptyString(t *types.Type) *ssa.Value {
+ return s.f.ConstEmptyString(t)
+}
+func (s *state) constBool(c bool) *ssa.Value {
+ return s.f.ConstBool(types.Types[TBOOL], c)
+}
+func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
+ return s.f.ConstInt8(t, c)
+}
+func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
+ return s.f.ConstInt16(t, c)
+}
+func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
+ return s.f.ConstInt32(t, c)
+}
+func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
+ return s.f.ConstInt64(t, c)
+}
+func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
+ return s.f.ConstFloat32(t, c)
+}
+func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
+ return s.f.ConstFloat64(t, c)
+}
+func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
+ if s.config.PtrSize == 8 {
+ return s.constInt64(t, c)
+ }
+ if int64(int32(c)) != c {
+ s.Fatalf("integer constant too big %d", c)
+ }
+ return s.constInt32(t, int32(c))
+}
+func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
+ return s.f.ConstOffPtrSP(t, c, s.sp)
+}
+
+// newValueOrSfCall* are wrappers around newValue*, which may create a call to a
+// soft-float runtime function instead (when emitting soft-float code).
+func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
+ if s.softFloat {
+ if c, ok := s.sfcall(op, arg); ok {
+ return c
+ }
+ }
+ return s.newValue1(op, t, arg)
+}
+func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+ if s.softFloat {
+ if c, ok := s.sfcall(op, arg0, arg1); ok {
+ return c
+ }
+ }
+ return s.newValue2(op, t, arg0, arg1)
+}
+
+type instrumentKind uint8
+
+const (
+ instrumentRead = iota
+ instrumentWrite
+ instrumentMove
+)
+
+func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) {
+ s.instrument2(t, addr, nil, kind)
+}
+
+// instrumentFields instruments a read/write operation on addr.
+// If it is instrumenting for MSAN and t is a struct type, it instruments
+// operation for each field, instead of for the whole struct.
+func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
+ if !flag_msan || !t.IsStruct() {
+ s.instrument(t, addr, kind)
+ return
+ }
+ for _, f := range t.Fields().Slice() {
+ if f.Sym.IsBlank() {
+ continue
+ }
+ offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), f.Offset, addr)
+ s.instrumentFields(f.Type, offptr, kind)
+ }
+}
+
+func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
+ if flag_msan {
+ s.instrument2(t, dst, src, instrumentMove)
+ } else {
+ s.instrument(t, src, instrumentRead)
+ s.instrument(t, dst, instrumentWrite)
+ }
+}
+
+func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
+ if !s.curfn.Func.InstrumentBody() {
+ return
+ }
+
+ w := t.Size()
+ if w == 0 {
+ return // can't race on zero-sized things
+ }
+
+ if ssa.IsSanitizerSafeAddr(addr) {
+ return
+ }
+
+ var fn *obj.LSym
+ needWidth := false
+
+ if addr2 != nil && kind != instrumentMove {
+ panic("instrument2: non-nil addr2 for non-move instrumentation")
+ }
+
+ if flag_msan {
+ switch kind {
+ case instrumentRead:
+ fn = msanread
+ case instrumentWrite:
+ fn = msanwrite
+ case instrumentMove:
+ fn = msanmove
+ default:
+ panic("unreachable")
+ }
+ needWidth = true
+ } else if flag_race && t.NumComponents(types.CountBlankFields) > 1 {
+ // for composite objects we have to write every address
+ // because a write might happen to any subobject.
+ // composites with only one element don't have subobjects, though.
+ switch kind {
+ case instrumentRead:
+ fn = racereadrange
+ case instrumentWrite:
+ fn = racewriterange
+ default:
+ panic("unreachable")
+ }
+ needWidth = true
+ } else if flag_race {
+ // for non-composite objects we can write just the start
+ // address, as any write must write the first byte.
+ switch kind {
+ case instrumentRead:
+ fn = raceread
+ case instrumentWrite:
+ fn = racewrite
+ default:
+ panic("unreachable")
+ }
+ } else {
+ panic("unreachable")
+ }
+
+ args := []*ssa.Value{addr}
+ if addr2 != nil {
+ args = append(args, addr2)
+ }
+ if needWidth {
+ args = append(args, s.constInt(types.Types[TUINTPTR], w))
+ }
+ s.rtcall(fn, true, nil, args...)
+}
+
+func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value {
+ s.instrumentFields(t, src, instrumentRead)
+ return s.rawLoad(t, src)
+}
+
+func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpLoad, t, src, s.mem())
+}
+
+func (s *state) store(t *types.Type, dst, val *ssa.Value) {
+ s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
+}
+
+func (s *state) zero(t *types.Type, dst *ssa.Value) {
+ s.instrument(t, dst, instrumentWrite)
+ store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
+ store.Aux = t
+ s.vars[&memVar] = store
+}
+
+func (s *state) move(t *types.Type, dst, src *ssa.Value) {
+ s.instrumentMove(t, dst, src)
+ store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
+ store.Aux = t
+ s.vars[&memVar] = store
+}
+
+// stmtList converts the statement list n to SSA and adds it to s.
+func (s *state) stmtList(l Nodes) {
+ for _, n := range l.Slice() {
+ s.stmt(n)
+ }
+}
+
+// stmt converts the statement n to SSA and adds it to s.
+func (s *state) stmt(n *Node) {
+ if !(n.Op == OVARKILL || n.Op == OVARLIVE || n.Op == OVARDEF) {
+ // OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
+ s.pushLine(n.Pos)
+ defer s.popLine()
+ }
+
+ // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
+ // then this code is dead. Stop here.
+ if s.curBlock == nil && n.Op != OLABEL {
+ return
+ }
+
+ s.stmtList(n.Ninit)
+ switch n.Op {
+
+ case OBLOCK:
+ s.stmtList(n.List)
+
+ // No-ops
+ case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
+
+ // Expression statements
+ case OCALLFUNC:
+ if isIntrinsicCall(n) {
+ s.intrinsicCall(n)
+ return
+ }
+ fallthrough
+
+ case OCALLMETH, OCALLINTER:
+ s.callResult(n, callNormal)
+ if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC {
+ if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" ||
+ n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
+ m := s.mem()
+ b := s.endBlock()
+ b.Kind = ssa.BlockExit
+ b.SetControl(m)
+ // TODO: never rewrite OPANIC to OCALLFUNC in the
+ // first place. Need to wait until all backends
+ // go through SSA.
+ }
+ }
+ case ODEFER:
+ if Debug_defer > 0 {
+ var defertype string
+ if s.hasOpenDefers {
+ defertype = "open-coded"
+ } else if n.Esc == EscNever {
+ defertype = "stack-allocated"
+ } else {
+ defertype = "heap-allocated"
+ }
+ Warnl(n.Pos, "%s defer", defertype)
+ }
+ if s.hasOpenDefers {
+ s.openDeferRecord(n.Left)
+ } else {
+ d := callDefer
+ if n.Esc == EscNever {
+ d = callDeferStack
+ }
+ s.callResult(n.Left, d)
+ }
+ case OGO:
+ s.callResult(n.Left, callGo)
+
+ case OAS2DOTTYPE:
+ res, resok := s.dottype(n.Right, true)
+ deref := false
+ if !canSSAType(n.Right.Type) {
+ if res.Op != ssa.OpLoad {
+ s.Fatalf("dottype of non-load")
+ }
+ mem := s.mem()
+ if mem.Op == ssa.OpVarKill {
+ mem = mem.Args[0]
+ }
+ if res.Args[1] != mem {
+ s.Fatalf("memory no longer live from 2-result dottype load")
+ }
+ deref = true
+ res = res.Args[0]
+ }
+ s.assign(n.List.First(), res, deref, 0)
+ s.assign(n.List.Second(), resok, false, 0)
+ return
+
+ case OAS2FUNC:
+ // We come here only when it is an intrinsic call returning two values.
+ if !isIntrinsicCall(n.Right) {
+ s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right)
+ }
+ v := s.intrinsicCall(n.Right)
+ v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
+ v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
+ s.assign(n.List.First(), v1, false, 0)
+ s.assign(n.List.Second(), v2, false, 0)
+ return
+
+ case ODCL:
+ if n.Left.Class() == PAUTOHEAP {
+ s.Fatalf("DCL %v", n)
+ }
+
+ case OLABEL:
+ sym := n.Sym
+ lab := s.label(sym)
+
+ // Associate label with its control flow node, if any
+ if ctl := n.labeledControl(); ctl != nil {
+ s.labeledNodes[ctl] = lab
+ }
+
+ // The label might already have a target block via a goto.
+ if lab.target == nil {
+ lab.target = s.f.NewBlock(ssa.BlockPlain)
+ }
+
+ // Go to that label.
+ // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
+ if s.curBlock != nil {
+ b := s.endBlock()
+ b.AddEdgeTo(lab.target)
+ }
+ s.startBlock(lab.target)
+
+ case OGOTO:
+ sym := n.Sym
+
+ lab := s.label(sym)
+ if lab.target == nil {
+ lab.target = s.f.NewBlock(ssa.BlockPlain)
+ }
+
+ b := s.endBlock()
+ b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
+ b.AddEdgeTo(lab.target)
+
+ case OAS:
+ if n.Left == n.Right && n.Left.Op == ONAME {
+ // An x=x assignment. No point in doing anything
+ // here. In addition, skipping this assignment
+ // prevents generating:
+ // VARDEF x
+ // COPY x -> x
+ // which is bad because x is incorrectly considered
+ // dead before the vardef. See issue #14904.
+ return
+ }
+
+ // Evaluate RHS.
+ rhs := n.Right
+ if rhs != nil {
+ switch rhs.Op {
+ case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
+ // All literals with nonzero fields have already been
+ // rewritten during walk. Any that remain are just T{}
+ // or equivalents. Use the zero value.
+ if !isZero(rhs) {
+ s.Fatalf("literal with nonzero value in SSA: %v", rhs)
+ }
+ rhs = nil
+ case OAPPEND:
+ // Check whether we're writing the result of an append back to the same slice.
+ // If so, we handle it specially to avoid write barriers on the fast
+ // (non-growth) path.
+ if !samesafeexpr(n.Left, rhs.List.First()) || Debug.N != 0 {
+ break
+ }
+ // If the slice can be SSA'd, it'll be on the stack,
+ // so there will be no write barriers,
+ // so there's no need to attempt to prevent them.
+ if s.canSSA(n.Left) {
+ if Debug_append > 0 { // replicating old diagnostic message
+ Warnl(n.Pos, "append: len-only update (in local slice)")
+ }
+ break
+ }
+ if Debug_append > 0 {
+ Warnl(n.Pos, "append: len-only update")
+ }
+ s.append(rhs, true)
+ return
+ }
+ }
+
+ if n.Left.isBlank() {
+ // _ = rhs
+ // Just evaluate rhs for side-effects.
+ if rhs != nil {
+ s.expr(rhs)
+ }
+ return
+ }
+
+ var t *types.Type
+ if n.Right != nil {
+ t = n.Right.Type
+ } else {
+ t = n.Left.Type
+ }
+
+ var r *ssa.Value
+ deref := !canSSAType(t)
+ if deref {
+ if rhs == nil {
+ r = nil // Signal assign to use OpZero.
+ } else {
+ r = s.addr(rhs)
+ }
+ } else {
+ if rhs == nil {
+ r = s.zeroVal(t)
+ } else {
+ r = s.expr(rhs)
+ }
+ }
+
+ var skip skipMask
+ if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
+ // We're assigning a slicing operation back to its source.
+ // Don't write back fields we aren't changing. See issue #14855.
+ i, j, k := rhs.SliceBounds()
+ if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64Val() == 0) {
+ // [0:...] is the same as [:...]
+ i = nil
+ }
+ // TODO: detect defaults for len/cap also.
+ // Currently doesn't really work because (*p)[:len(*p)] appears here as:
+ // tmp = len(*p)
+ // (*p)[:tmp]
+ //if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
+ // j = nil
+ //}
+ //if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
+ // k = nil
+ //}
+ if i == nil {
+ skip |= skipPtr
+ if j == nil {
+ skip |= skipLen
+ }
+ if k == nil {
+ skip |= skipCap
+ }
+ }
+ }
+
+ s.assign(n.Left, r, deref, skip)
+
+ case OIF:
+ if Isconst(n.Left, CTBOOL) {
+ s.stmtList(n.Left.Ninit)
+ if n.Left.BoolVal() {
+ s.stmtList(n.Nbody)
+ } else {
+ s.stmtList(n.Rlist)
+ }
+ break
+ }
+
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ var likely int8
+ if n.Likely() {
+ likely = 1
+ }
+ var bThen *ssa.Block
+ if n.Nbody.Len() != 0 {
+ bThen = s.f.NewBlock(ssa.BlockPlain)
+ } else {
+ bThen = bEnd
+ }
+ var bElse *ssa.Block
+ if n.Rlist.Len() != 0 {
+ bElse = s.f.NewBlock(ssa.BlockPlain)
+ } else {
+ bElse = bEnd
+ }
+ s.condBranch(n.Left, bThen, bElse, likely)
+
+ if n.Nbody.Len() != 0 {
+ s.startBlock(bThen)
+ s.stmtList(n.Nbody)
+ if b := s.endBlock(); b != nil {
+ b.AddEdgeTo(bEnd)
+ }
+ }
+ if n.Rlist.Len() != 0 {
+ s.startBlock(bElse)
+ s.stmtList(n.Rlist)
+ if b := s.endBlock(); b != nil {
+ b.AddEdgeTo(bEnd)
+ }
+ }
+ s.startBlock(bEnd)
+
+ case ORETURN:
+ s.stmtList(n.List)
+ b := s.exit()
+ b.Pos = s.lastPos.WithIsStmt()
+
+ case ORETJMP:
+ s.stmtList(n.List)
+ b := s.exit()
+ b.Kind = ssa.BlockRetJmp // override BlockRet
+ b.Aux = n.Sym.Linksym()
+
+ case OCONTINUE, OBREAK:
+ var to *ssa.Block
+ if n.Sym == nil {
+ // plain break/continue
+ switch n.Op {
+ case OCONTINUE:
+ to = s.continueTo
+ case OBREAK:
+ to = s.breakTo
+ }
+ } else {
+ // labeled break/continue; look up the target
+ sym := n.Sym
+ lab := s.label(sym)
+ switch n.Op {
+ case OCONTINUE:
+ to = lab.continueTarget
+ case OBREAK:
+ to = lab.breakTarget
+ }
+ }
+
+ b := s.endBlock()
+ b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
+ b.AddEdgeTo(to)
+
+ case OFOR, OFORUNTIL:
+ // OFOR: for Ninit; Left; Right { Nbody }
+ // cond (Left); body (Nbody); incr (Right)
+ //
+ // OFORUNTIL: for Ninit; Left; Right; List { Nbody }
+ // => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end:
+ bCond := s.f.NewBlock(ssa.BlockPlain)
+ bBody := s.f.NewBlock(ssa.BlockPlain)
+ bIncr := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ // ensure empty for loops have correct position; issue #30167
+ bBody.Pos = n.Pos
+
+ // first, jump to condition test (OFOR) or body (OFORUNTIL)
+ b := s.endBlock()
+ if n.Op == OFOR {
+ b.AddEdgeTo(bCond)
+ // generate code to test condition
+ s.startBlock(bCond)
+ if n.Left != nil {
+ s.condBranch(n.Left, bBody, bEnd, 1)
+ } else {
+ b := s.endBlock()
+ b.Kind = ssa.BlockPlain
+ b.AddEdgeTo(bBody)
+ }
+
+ } else {
+ b.AddEdgeTo(bBody)
+ }
+
+ // set up for continue/break in body
+ prevContinue := s.continueTo
+ prevBreak := s.breakTo
+ s.continueTo = bIncr
+ s.breakTo = bEnd
+ lab := s.labeledNodes[n]
+ if lab != nil {
+ // labeled for loop
+ lab.continueTarget = bIncr
+ lab.breakTarget = bEnd
+ }
+
+ // generate body
+ s.startBlock(bBody)
+ s.stmtList(n.Nbody)
+
+ // tear down continue/break
+ s.continueTo = prevContinue
+ s.breakTo = prevBreak
+ if lab != nil {
+ lab.continueTarget = nil
+ lab.breakTarget = nil
+ }
+
+ // done with body, goto incr
+ if b := s.endBlock(); b != nil {
+ b.AddEdgeTo(bIncr)
+ }
+
+ // generate incr (and, for OFORUNTIL, condition)
+ s.startBlock(bIncr)
+ if n.Right != nil {
+ s.stmt(n.Right)
+ }
+ if n.Op == OFOR {
+ if b := s.endBlock(); b != nil {
+ b.AddEdgeTo(bCond)
+ // It can happen that bIncr ends in a block containing only VARKILL,
+ // and that muddles the debugging experience.
+ if n.Op != OFORUNTIL && b.Pos == src.NoXPos {
+ b.Pos = bCond.Pos
+ }
+ }
+ } else {
+ // bCond is unused in OFORUNTIL, so repurpose it.
+ bLateIncr := bCond
+ // test condition
+ s.condBranch(n.Left, bLateIncr, bEnd, 1)
+ // generate late increment
+ s.startBlock(bLateIncr)
+ s.stmtList(n.List)
+ s.endBlock().AddEdgeTo(bBody)
+ }
+
+ s.startBlock(bEnd)
+
+ case OSWITCH, OSELECT:
+ // These have been mostly rewritten by the front end into their Nbody fields.
+ // Our main task is to correctly hook up any break statements.
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ prevBreak := s.breakTo
+ s.breakTo = bEnd
+ lab := s.labeledNodes[n]
+ if lab != nil {
+ // labeled
+ lab.breakTarget = bEnd
+ }
+
+ // generate body code
+ s.stmtList(n.Nbody)
+
+ s.breakTo = prevBreak
+ if lab != nil {
+ lab.breakTarget = nil
+ }
+
+ // walk adds explicit OBREAK nodes to the end of all reachable code paths.
+ // If we still have a current block here, then mark it unreachable.
+ if s.curBlock != nil {
+ m := s.mem()
+ b := s.endBlock()
+ b.Kind = ssa.BlockExit
+ b.SetControl(m)
+ }
+ s.startBlock(bEnd)
+
+ case OVARDEF:
+ if !s.canSSA(n.Left) {
+ s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false)
+ }
+ case OVARKILL:
+ // Insert a varkill op to record that a variable is no longer live.
+ // We only care about liveness info at call sites, so putting the
+ // varkill in the store chain is enough to keep it correctly ordered
+ // with respect to call ops.
+ if !s.canSSA(n.Left) {
+ s.vars[&memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false)
+ }
+
+ case OVARLIVE:
+ // Insert a varlive op to record that a variable is still live.
+ if !n.Left.Name.Addrtaken() {
+ s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
+ }
+ switch n.Left.Class() {
+ case PAUTO, PPARAM, PPARAMOUT:
+ default:
+ s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left)
+ }
+ s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem())
+
+ case OCHECKNIL:
+ p := s.expr(n.Left)
+ s.nilCheck(p)
+
+ case OINLMARK:
+ s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Xoffset, s.mem())
+
+ default:
+ s.Fatalf("unhandled stmt %v", n.Op)
+ }
+}
+
+// If true, share as many open-coded defer exits as possible (with the downside of
+// worse line-number information)
+const shareDeferExits = false
+
+// exit processes any code that needs to be generated just before returning.
+// It returns a BlockRet block that ends the control flow. Its control value
+// will be set to the final memory state.
+func (s *state) exit() *ssa.Block {
+ if s.hasdefer {
+ if s.hasOpenDefers {
+ if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
+ if s.curBlock.Kind != ssa.BlockPlain {
+ panic("Block for an exit should be BlockPlain")
+ }
+ s.curBlock.AddEdgeTo(s.lastDeferExit)
+ s.endBlock()
+ return s.lastDeferFinalBlock
+ }
+ s.openDeferExit()
+ } else {
+ s.rtcall(Deferreturn, true, nil)
+ }
+ }
+
+ // Run exit code. Typically, this code copies heap-allocated PPARAMOUT
+ // variables back to the stack.
+ s.stmtList(s.curfn.Func.Exit)
+
+ // Store SSAable PPARAMOUT variables back to stack locations.
+ for _, n := range s.returns {
+ addr := s.decladdrs[n]
+ val := s.variable(n, n.Type)
+ s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ s.store(n.Type, addr, val)
+ // TODO: if val is ever spilled, we'd like to use the
+ // PPARAMOUT slot for spilling it. That won't happen
+ // currently.
+ }
+
+ // Do actual return.
+ m := s.mem()
+ b := s.endBlock()
+ b.Kind = ssa.BlockRet
+ b.SetControl(m)
+ if s.hasdefer && s.hasOpenDefers {
+ s.lastDeferFinalBlock = b
+ }
+ return b
+}
+
+type opAndType struct {
+ op Op
+ etype types.EType
+}
+
+var opToSSA = map[opAndType]ssa.Op{
+ opAndType{OADD, TINT8}: ssa.OpAdd8,
+ opAndType{OADD, TUINT8}: ssa.OpAdd8,
+ opAndType{OADD, TINT16}: ssa.OpAdd16,
+ opAndType{OADD, TUINT16}: ssa.OpAdd16,
+ opAndType{OADD, TINT32}: ssa.OpAdd32,
+ opAndType{OADD, TUINT32}: ssa.OpAdd32,
+ opAndType{OADD, TINT64}: ssa.OpAdd64,
+ opAndType{OADD, TUINT64}: ssa.OpAdd64,
+ opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
+ opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
+
+ opAndType{OSUB, TINT8}: ssa.OpSub8,
+ opAndType{OSUB, TUINT8}: ssa.OpSub8,
+ opAndType{OSUB, TINT16}: ssa.OpSub16,
+ opAndType{OSUB, TUINT16}: ssa.OpSub16,
+ opAndType{OSUB, TINT32}: ssa.OpSub32,
+ opAndType{OSUB, TUINT32}: ssa.OpSub32,
+ opAndType{OSUB, TINT64}: ssa.OpSub64,
+ opAndType{OSUB, TUINT64}: ssa.OpSub64,
+ opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
+ opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
+
+ opAndType{ONOT, TBOOL}: ssa.OpNot,
+
+ opAndType{ONEG, TINT8}: ssa.OpNeg8,
+ opAndType{ONEG, TUINT8}: ssa.OpNeg8,
+ opAndType{ONEG, TINT16}: ssa.OpNeg16,
+ opAndType{ONEG, TUINT16}: ssa.OpNeg16,
+ opAndType{ONEG, TINT32}: ssa.OpNeg32,
+ opAndType{ONEG, TUINT32}: ssa.OpNeg32,
+ opAndType{ONEG, TINT64}: ssa.OpNeg64,
+ opAndType{ONEG, TUINT64}: ssa.OpNeg64,
+ opAndType{ONEG, TFLOAT32}: ssa.OpNeg32F,
+ opAndType{ONEG, TFLOAT64}: ssa.OpNeg64F,
+
+ opAndType{OBITNOT, TINT8}: ssa.OpCom8,
+ opAndType{OBITNOT, TUINT8}: ssa.OpCom8,
+ opAndType{OBITNOT, TINT16}: ssa.OpCom16,
+ opAndType{OBITNOT, TUINT16}: ssa.OpCom16,
+ opAndType{OBITNOT, TINT32}: ssa.OpCom32,
+ opAndType{OBITNOT, TUINT32}: ssa.OpCom32,
+ opAndType{OBITNOT, TINT64}: ssa.OpCom64,
+ opAndType{OBITNOT, TUINT64}: ssa.OpCom64,
+
+ opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag,
+ opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
+ opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal,
+ opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
+
+ opAndType{OMUL, TINT8}: ssa.OpMul8,
+ opAndType{OMUL, TUINT8}: ssa.OpMul8,
+ opAndType{OMUL, TINT16}: ssa.OpMul16,
+ opAndType{OMUL, TUINT16}: ssa.OpMul16,
+ opAndType{OMUL, TINT32}: ssa.OpMul32,
+ opAndType{OMUL, TUINT32}: ssa.OpMul32,
+ opAndType{OMUL, TINT64}: ssa.OpMul64,
+ opAndType{OMUL, TUINT64}: ssa.OpMul64,
+ opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
+ opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
+
+ opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
+ opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
+
+ opAndType{ODIV, TINT8}: ssa.OpDiv8,
+ opAndType{ODIV, TUINT8}: ssa.OpDiv8u,
+ opAndType{ODIV, TINT16}: ssa.OpDiv16,
+ opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
+ opAndType{ODIV, TINT32}: ssa.OpDiv32,
+ opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
+ opAndType{ODIV, TINT64}: ssa.OpDiv64,
+ opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
+
+ opAndType{OMOD, TINT8}: ssa.OpMod8,
+ opAndType{OMOD, TUINT8}: ssa.OpMod8u,
+ opAndType{OMOD, TINT16}: ssa.OpMod16,
+ opAndType{OMOD, TUINT16}: ssa.OpMod16u,
+ opAndType{OMOD, TINT32}: ssa.OpMod32,
+ opAndType{OMOD, TUINT32}: ssa.OpMod32u,
+ opAndType{OMOD, TINT64}: ssa.OpMod64,
+ opAndType{OMOD, TUINT64}: ssa.OpMod64u,
+
+ opAndType{OAND, TINT8}: ssa.OpAnd8,
+ opAndType{OAND, TUINT8}: ssa.OpAnd8,
+ opAndType{OAND, TINT16}: ssa.OpAnd16,
+ opAndType{OAND, TUINT16}: ssa.OpAnd16,
+ opAndType{OAND, TINT32}: ssa.OpAnd32,
+ opAndType{OAND, TUINT32}: ssa.OpAnd32,
+ opAndType{OAND, TINT64}: ssa.OpAnd64,
+ opAndType{OAND, TUINT64}: ssa.OpAnd64,
+
+ opAndType{OOR, TINT8}: ssa.OpOr8,
+ opAndType{OOR, TUINT8}: ssa.OpOr8,
+ opAndType{OOR, TINT16}: ssa.OpOr16,
+ opAndType{OOR, TUINT16}: ssa.OpOr16,
+ opAndType{OOR, TINT32}: ssa.OpOr32,
+ opAndType{OOR, TUINT32}: ssa.OpOr32,
+ opAndType{OOR, TINT64}: ssa.OpOr64,
+ opAndType{OOR, TUINT64}: ssa.OpOr64,
+
+ opAndType{OXOR, TINT8}: ssa.OpXor8,
+ opAndType{OXOR, TUINT8}: ssa.OpXor8,
+ opAndType{OXOR, TINT16}: ssa.OpXor16,
+ opAndType{OXOR, TUINT16}: ssa.OpXor16,
+ opAndType{OXOR, TINT32}: ssa.OpXor32,
+ opAndType{OXOR, TUINT32}: ssa.OpXor32,
+ opAndType{OXOR, TINT64}: ssa.OpXor64,
+ opAndType{OXOR, TUINT64}: ssa.OpXor64,
+
+ opAndType{OEQ, TBOOL}: ssa.OpEqB,
+ opAndType{OEQ, TINT8}: ssa.OpEq8,
+ opAndType{OEQ, TUINT8}: ssa.OpEq8,
+ opAndType{OEQ, TINT16}: ssa.OpEq16,
+ opAndType{OEQ, TUINT16}: ssa.OpEq16,
+ opAndType{OEQ, TINT32}: ssa.OpEq32,
+ opAndType{OEQ, TUINT32}: ssa.OpEq32,
+ opAndType{OEQ, TINT64}: ssa.OpEq64,
+ opAndType{OEQ, TUINT64}: ssa.OpEq64,
+ opAndType{OEQ, TINTER}: ssa.OpEqInter,
+ opAndType{OEQ, TSLICE}: ssa.OpEqSlice,
+ opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
+ opAndType{OEQ, TMAP}: ssa.OpEqPtr,
+ opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
+ opAndType{OEQ, TPTR}: ssa.OpEqPtr,
+ opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
+ opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
+ opAndType{OEQ, TFLOAT64}: ssa.OpEq64F,
+ opAndType{OEQ, TFLOAT32}: ssa.OpEq32F,
+
+ opAndType{ONE, TBOOL}: ssa.OpNeqB,
+ opAndType{ONE, TINT8}: ssa.OpNeq8,
+ opAndType{ONE, TUINT8}: ssa.OpNeq8,
+ opAndType{ONE, TINT16}: ssa.OpNeq16,
+ opAndType{ONE, TUINT16}: ssa.OpNeq16,
+ opAndType{ONE, TINT32}: ssa.OpNeq32,
+ opAndType{ONE, TUINT32}: ssa.OpNeq32,
+ opAndType{ONE, TINT64}: ssa.OpNeq64,
+ opAndType{ONE, TUINT64}: ssa.OpNeq64,
+ opAndType{ONE, TINTER}: ssa.OpNeqInter,
+ opAndType{ONE, TSLICE}: ssa.OpNeqSlice,
+ opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
+ opAndType{ONE, TMAP}: ssa.OpNeqPtr,
+ opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
+ opAndType{ONE, TPTR}: ssa.OpNeqPtr,
+ opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
+ opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
+ opAndType{ONE, TFLOAT64}: ssa.OpNeq64F,
+ opAndType{ONE, TFLOAT32}: ssa.OpNeq32F,
+
+ opAndType{OLT, TINT8}: ssa.OpLess8,
+ opAndType{OLT, TUINT8}: ssa.OpLess8U,
+ opAndType{OLT, TINT16}: ssa.OpLess16,
+ opAndType{OLT, TUINT16}: ssa.OpLess16U,
+ opAndType{OLT, TINT32}: ssa.OpLess32,
+ opAndType{OLT, TUINT32}: ssa.OpLess32U,
+ opAndType{OLT, TINT64}: ssa.OpLess64,
+ opAndType{OLT, TUINT64}: ssa.OpLess64U,
+ opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
+ opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
+
+ opAndType{OLE, TINT8}: ssa.OpLeq8,
+ opAndType{OLE, TUINT8}: ssa.OpLeq8U,
+ opAndType{OLE, TINT16}: ssa.OpLeq16,
+ opAndType{OLE, TUINT16}: ssa.OpLeq16U,
+ opAndType{OLE, TINT32}: ssa.OpLeq32,
+ opAndType{OLE, TUINT32}: ssa.OpLeq32U,
+ opAndType{OLE, TINT64}: ssa.OpLeq64,
+ opAndType{OLE, TUINT64}: ssa.OpLeq64U,
+ opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
+ opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
+}
+
+func (s *state) concreteEtype(t *types.Type) types.EType {
+ e := t.Etype
+ switch e {
+ default:
+ return e
+ case TINT:
+ if s.config.PtrSize == 8 {
+ return TINT64
+ }
+ return TINT32
+ case TUINT:
+ if s.config.PtrSize == 8 {
+ return TUINT64
+ }
+ return TUINT32
+ case TUINTPTR:
+ if s.config.PtrSize == 8 {
+ return TUINT64
+ }
+ return TUINT32
+ }
+}
+
+func (s *state) ssaOp(op Op, t *types.Type) ssa.Op {
+ etype := s.concreteEtype(t)
+ x, ok := opToSSA[opAndType{op, etype}]
+ if !ok {
+ s.Fatalf("unhandled binary op %v %s", op, etype)
+ }
+ return x
+}
+
+func floatForComplex(t *types.Type) *types.Type {
+ switch t.Etype {
+ case TCOMPLEX64:
+ return types.Types[TFLOAT32]
+ case TCOMPLEX128:
+ return types.Types[TFLOAT64]
+ }
+ Fatalf("unexpected type: %v", t)
+ return nil
+}
+
+func complexForFloat(t *types.Type) *types.Type {
+ switch t.Etype {
+ case TFLOAT32:
+ return types.Types[TCOMPLEX64]
+ case TFLOAT64:
+ return types.Types[TCOMPLEX128]
+ }
+ Fatalf("unexpected type: %v", t)
+ return nil
+}
+
+type opAndTwoTypes struct {
+ op Op
+ etype1 types.EType
+ etype2 types.EType
+}
+
+type twoTypes struct {
+ etype1 types.EType
+ etype2 types.EType
+}
+
+type twoOpsAndType struct {
+ op1 ssa.Op
+ op2 ssa.Op
+ intermediateType types.EType
+}
+
+var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
+
+ twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
+ twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
+ twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
+ twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
+
+ twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
+ twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
+ twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
+ twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
+
+ twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
+ twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
+ twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
+ twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
+
+ twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
+ twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
+ twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
+ twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
+ // unsigned
+ twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
+ twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
+ twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
+ twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead
+
+ twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
+ twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
+ twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
+ twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead
+
+ twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
+ twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
+ twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
+ twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead
+
+ twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
+ twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
+ twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
+ twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead
+
+ // float
+ twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
+ twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64},
+ twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32},
+ twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
+}
+
+// this map is used only for 32-bit arch, and only includes the difference
+// on 32-bit arch, don't use int64<->float conversion for uint32
+var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
+ twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
+ twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
+ twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
+ twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
+}
+
+// uint64<->float conversions, only on machines that have instructions for that
+var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
+ twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64},
+ twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64},
+ twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64},
+ twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64},
+}
+
+var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
+ opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
+ opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
+ opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16,
+ opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
+ opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32,
+ opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
+ opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64,
+ opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
+
+ opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8,
+ opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8,
+ opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16,
+ opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
+ opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32,
+ opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
+ opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64,
+ opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
+
+ opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8,
+ opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8,
+ opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16,
+ opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
+ opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32,
+ opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
+ opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64,
+ opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
+
+ opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8,
+ opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8,
+ opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16,
+ opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
+ opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32,
+ opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
+ opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64,
+ opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
+
+ opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8,
+ opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8,
+ opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16,
+ opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
+ opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32,
+ opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
+ opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64,
+ opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
+
+ opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8,
+ opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8,
+ opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16,
+ opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
+ opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32,
+ opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
+ opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64,
+ opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
+
+ opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8,
+ opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8,
+ opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16,
+ opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
+ opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32,
+ opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
+ opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64,
+ opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
+
+ opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8,
+ opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8,
+ opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16,
+ opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
+ opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32,
+ opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
+ opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64,
+ opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
+}
+
+func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op {
+ etype1 := s.concreteEtype(t)
+ etype2 := s.concreteEtype(u)
+ x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
+ if !ok {
+ s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
+ }
+ return x
+}
+
+// expr converts the expression n to ssa, adds it to s and returns the ssa result.
+func (s *state) expr(n *Node) *ssa.Value {
+ if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
+ // ONAMEs and named OLITERALs have the line number
+ // of the decl, not the use. See issue 14742.
+ s.pushLine(n.Pos)
+ defer s.popLine()
+ }
+
+ s.stmtList(n.Ninit)
+ switch n.Op {
+ case OBYTES2STRTMP:
+ slice := s.expr(n.Left)
+ ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
+ return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
+ case OSTR2BYTESTMP:
+ str := s.expr(n.Left)
+ ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
+ len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str)
+ return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
+ case OCFUNC:
+ aux := n.Left.Sym.Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
+ case ONAME:
+ if n.Class() == PFUNC {
+ // "value" of a function is the address of the function's closure
+ sym := funcsym(n.Sym).Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb)
+ }
+ if s.canSSA(n) {
+ return s.variable(n, n.Type)
+ }
+ addr := s.addr(n)
+ return s.load(n.Type, addr)
+ case OCLOSUREVAR:
+ addr := s.addr(n)
+ return s.load(n.Type, addr)
+ case OLITERAL:
+ switch u := n.Val().U.(type) {
+ case *Mpint:
+ i := u.Int64()
+ switch n.Type.Size() {
+ case 1:
+ return s.constInt8(n.Type, int8(i))
+ case 2:
+ return s.constInt16(n.Type, int16(i))
+ case 4:
+ return s.constInt32(n.Type, int32(i))
+ case 8:
+ return s.constInt64(n.Type, i)
+ default:
+ s.Fatalf("bad integer size %d", n.Type.Size())
+ return nil
+ }
+ case string:
+ if u == "" {
+ return s.constEmptyString(n.Type)
+ }
+ return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
+ case bool:
+ return s.constBool(u)
+ case *NilVal:
+ t := n.Type
+ switch {
+ case t.IsSlice():
+ return s.constSlice(t)
+ case t.IsInterface():
+ return s.constInterface(t)
+ default:
+ return s.constNil(t)
+ }
+ case *Mpflt:
+ switch n.Type.Size() {
+ case 4:
+ return s.constFloat32(n.Type, u.Float32())
+ case 8:
+ return s.constFloat64(n.Type, u.Float64())
+ default:
+ s.Fatalf("bad float size %d", n.Type.Size())
+ return nil
+ }
+ case *Mpcplx:
+ r := &u.Real
+ i := &u.Imag
+ switch n.Type.Size() {
+ case 8:
+ pt := types.Types[TFLOAT32]
+ return s.newValue2(ssa.OpComplexMake, n.Type,
+ s.constFloat32(pt, r.Float32()),
+ s.constFloat32(pt, i.Float32()))
+ case 16:
+ pt := types.Types[TFLOAT64]
+ return s.newValue2(ssa.OpComplexMake, n.Type,
+ s.constFloat64(pt, r.Float64()),
+ s.constFloat64(pt, i.Float64()))
+ default:
+ s.Fatalf("bad float size %d", n.Type.Size())
+ return nil
+ }
+
+ default:
+ s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype())
+ return nil
+ }
+ case OCONVNOP:
+ to := n.Type
+ from := n.Left.Type
+
+ // Assume everything will work out, so set up our return value.
+ // Anything interesting that happens from here is a fatal.
+ x := s.expr(n.Left)
+
+ // Special case for not confusing GC and liveness.
+ // We don't want pointers accidentally classified
+ // as not-pointers or vice-versa because of copy
+ // elision.
+ if to.IsPtrShaped() != from.IsPtrShaped() {
+ return s.newValue2(ssa.OpConvert, to, x, s.mem())
+ }
+
+ v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
+
+ // CONVNOP closure
+ if to.Etype == TFUNC && from.IsPtrShaped() {
+ return v
+ }
+
+ // named <--> unnamed type or typed <--> untyped const
+ if from.Etype == to.Etype {
+ return v
+ }
+
+ // unsafe.Pointer <--> *T
+ if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
+ return v
+ }
+
+ // map <--> *hmap
+ if to.Etype == TMAP && from.IsPtr() &&
+ to.MapType().Hmap == from.Elem() {
+ return v
+ }
+
+ dowidth(from)
+ dowidth(to)
+ if from.Width != to.Width {
+ s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
+ return nil
+ }
+ if etypesign(from.Etype) != etypesign(to.Etype) {
+ s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
+ return nil
+ }
+
+ if instrumenting {
+ // These appear to be fine, but they fail the
+ // integer constraint below, so okay them here.
+ // Sample non-integer conversion: map[string]string -> *uint8
+ return v
+ }
+
+ if etypesign(from.Etype) == 0 {
+ s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
+ return nil
+ }
+
+ // integer, same width, same sign
+ return v
+
+ case OCONV:
+ x := s.expr(n.Left)
+ ft := n.Left.Type // from type
+ tt := n.Type // to type
+ if ft.IsBoolean() && tt.IsKind(TUINT8) {
+ // Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
+ return s.newValue1(ssa.OpCopy, n.Type, x)
+ }
+ if ft.IsInteger() && tt.IsInteger() {
+ var op ssa.Op
+ if tt.Size() == ft.Size() {
+ op = ssa.OpCopy
+ } else if tt.Size() < ft.Size() {
+ // truncation
+ switch 10*ft.Size() + tt.Size() {
+ case 21:
+ op = ssa.OpTrunc16to8
+ case 41:
+ op = ssa.OpTrunc32to8
+ case 42:
+ op = ssa.OpTrunc32to16
+ case 81:
+ op = ssa.OpTrunc64to8
+ case 82:
+ op = ssa.OpTrunc64to16
+ case 84:
+ op = ssa.OpTrunc64to32
+ default:
+ s.Fatalf("weird integer truncation %v -> %v", ft, tt)
+ }
+ } else if ft.IsSigned() {
+ // sign extension
+ switch 10*ft.Size() + tt.Size() {
+ case 12:
+ op = ssa.OpSignExt8to16
+ case 14:
+ op = ssa.OpSignExt8to32
+ case 18:
+ op = ssa.OpSignExt8to64
+ case 24:
+ op = ssa.OpSignExt16to32
+ case 28:
+ op = ssa.OpSignExt16to64
+ case 48:
+ op = ssa.OpSignExt32to64
+ default:
+ s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
+ }
+ } else {
+ // zero extension
+ switch 10*ft.Size() + tt.Size() {
+ case 12:
+ op = ssa.OpZeroExt8to16
+ case 14:
+ op = ssa.OpZeroExt8to32
+ case 18:
+ op = ssa.OpZeroExt8to64
+ case 24:
+ op = ssa.OpZeroExt16to32
+ case 28:
+ op = ssa.OpZeroExt16to64
+ case 48:
+ op = ssa.OpZeroExt32to64
+ default:
+ s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
+ }
+ }
+ return s.newValue1(op, n.Type, x)
+ }
+
+ if ft.IsFloat() || tt.IsFloat() {
+ conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
+ if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS && !s.softFloat {
+ if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
+ conv = conv1
+ }
+ }
+ if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || thearch.LinkArch.Family == sys.S390X || s.softFloat {
+ if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
+ conv = conv1
+ }
+ }
+
+ if thearch.LinkArch.Family == sys.MIPS && !s.softFloat {
+ if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
+ // tt is float32 or float64, and ft is also unsigned
+ if tt.Size() == 4 {
+ return s.uint32Tofloat32(n, x, ft, tt)
+ }
+ if tt.Size() == 8 {
+ return s.uint32Tofloat64(n, x, ft, tt)
+ }
+ } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
+ // ft is float32 or float64, and tt is unsigned integer
+ if ft.Size() == 4 {
+ return s.float32ToUint32(n, x, ft, tt)
+ }
+ if ft.Size() == 8 {
+ return s.float64ToUint32(n, x, ft, tt)
+ }
+ }
+ }
+
+ if !ok {
+ s.Fatalf("weird float conversion %v -> %v", ft, tt)
+ }
+ op1, op2, it := conv.op1, conv.op2, conv.intermediateType
+
+ if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
+ // normal case, not tripping over unsigned 64
+ if op1 == ssa.OpCopy {
+ if op2 == ssa.OpCopy {
+ return x
+ }
+ return s.newValueOrSfCall1(op2, n.Type, x)
+ }
+ if op2 == ssa.OpCopy {
+ return s.newValueOrSfCall1(op1, n.Type, x)
+ }
+ return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x))
+ }
+ // Tricky 64-bit unsigned cases.
+ if ft.IsInteger() {
+ // tt is float32 or float64, and ft is also unsigned
+ if tt.Size() == 4 {
+ return s.uint64Tofloat32(n, x, ft, tt)
+ }
+ if tt.Size() == 8 {
+ return s.uint64Tofloat64(n, x, ft, tt)
+ }
+ s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
+ }
+ // ft is float32 or float64, and tt is unsigned integer
+ if ft.Size() == 4 {
+ return s.float32ToUint64(n, x, ft, tt)
+ }
+ if ft.Size() == 8 {
+ return s.float64ToUint64(n, x, ft, tt)
+ }
+ s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
+ return nil
+ }
+
+ if ft.IsComplex() && tt.IsComplex() {
+ var op ssa.Op
+ if ft.Size() == tt.Size() {
+ switch ft.Size() {
+ case 8:
+ op = ssa.OpRound32F
+ case 16:
+ op = ssa.OpRound64F
+ default:
+ s.Fatalf("weird complex conversion %v -> %v", ft, tt)
+ }
+ } else if ft.Size() == 8 && tt.Size() == 16 {
+ op = ssa.OpCvt32Fto64F
+ } else if ft.Size() == 16 && tt.Size() == 8 {
+ op = ssa.OpCvt64Fto32F
+ } else {
+ s.Fatalf("weird complex conversion %v -> %v", ft, tt)
+ }
+ ftp := floatForComplex(ft)
+ ttp := floatForComplex(tt)
+ return s.newValue2(ssa.OpComplexMake, tt,
+ s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
+ s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
+ }
+
+ s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
+ return nil
+
+ case ODOTTYPE:
+ res, _ := s.dottype(n, false)
+ return res
+
+ // binary ops
+ case OLT, OEQ, ONE, OLE, OGE, OGT:
+ a := s.expr(n.Left)
+ b := s.expr(n.Right)
+ if n.Left.Type.IsComplex() {
+ pt := floatForComplex(n.Left.Type)
+ op := s.ssaOp(OEQ, pt)
+ r := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
+ i := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
+ c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i)
+ switch n.Op {
+ case OEQ:
+ return c
+ case ONE:
+ return s.newValue1(ssa.OpNot, types.Types[TBOOL], c)
+ default:
+ s.Fatalf("ordered complex compare %v", n.Op)
+ }
+ }
+
+ // Convert OGE and OGT into OLE and OLT.
+ op := n.Op
+ switch op {
+ case OGE:
+ op, a, b = OLE, b, a
+ case OGT:
+ op, a, b = OLT, b, a
+ }
+ if n.Left.Type.IsFloat() {
+ // float comparison
+ return s.newValueOrSfCall2(s.ssaOp(op, n.Left.Type), types.Types[TBOOL], a, b)
+ }
+ // integer comparison
+ return s.newValue2(s.ssaOp(op, n.Left.Type), types.Types[TBOOL], a, b)
+ case OMUL:
+ a := s.expr(n.Left)
+ b := s.expr(n.Right)
+ if n.Type.IsComplex() {
+ mulop := ssa.OpMul64F
+ addop := ssa.OpAdd64F
+ subop := ssa.OpSub64F
+ pt := floatForComplex(n.Type) // Could be Float32 or Float64
+ wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
+
+ areal := s.newValue1(ssa.OpComplexReal, pt, a)
+ breal := s.newValue1(ssa.OpComplexReal, pt, b)
+ aimag := s.newValue1(ssa.OpComplexImag, pt, a)
+ bimag := s.newValue1(ssa.OpComplexImag, pt, b)
+
+ if pt != wt { // Widen for calculation
+ areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
+ breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
+ aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
+ bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
+ }
+
+ xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
+ ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
+
+ if pt != wt { // Narrow to store back
+ xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
+ ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
+ }
+
+ return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
+ }
+
+ if n.Type.IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ }
+
+ return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+
+ case ODIV:
+ a := s.expr(n.Left)
+ b := s.expr(n.Right)
+ if n.Type.IsComplex() {
+ // TODO this is not executed because the front-end substitutes a runtime call.
+ // That probably ought to change; with modest optimization the widen/narrow
+ // conversions could all be elided in larger expression trees.
+ mulop := ssa.OpMul64F
+ addop := ssa.OpAdd64F
+ subop := ssa.OpSub64F
+ divop := ssa.OpDiv64F
+ pt := floatForComplex(n.Type) // Could be Float32 or Float64
+ wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
+
+ areal := s.newValue1(ssa.OpComplexReal, pt, a)
+ breal := s.newValue1(ssa.OpComplexReal, pt, b)
+ aimag := s.newValue1(ssa.OpComplexImag, pt, a)
+ bimag := s.newValue1(ssa.OpComplexImag, pt, b)
+
+ if pt != wt { // Widen for calculation
+ areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
+ breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
+ aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
+ bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
+ }
+
+ denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
+ xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
+ ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
+
+ // TODO not sure if this is best done in wide precision or narrow
+ // Double-rounding might be an issue.
+ // Note that the pre-SSA implementation does the entire calculation
+ // in wide format, so wide is compatible.
+ xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
+ ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
+
+ if pt != wt { // Narrow to store back
+ xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
+ ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
+ }
+ return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
+ }
+ if n.Type.IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ }
+ return s.intDivide(n, a, b)
+ case OMOD:
+ a := s.expr(n.Left)
+ b := s.expr(n.Right)
+ return s.intDivide(n, a, b)
+ case OADD, OSUB:
+ a := s.expr(n.Left)
+ b := s.expr(n.Right)
+ if n.Type.IsComplex() {
+ pt := floatForComplex(n.Type)
+ op := s.ssaOp(n.Op, pt)
+ return s.newValue2(ssa.OpComplexMake, n.Type,
+ s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
+ s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
+ }
+ if n.Type.IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ }
+ return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ case OAND, OOR, OXOR:
+ a := s.expr(n.Left)
+ b := s.expr(n.Right)
+ return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ case OANDNOT:
+ a := s.expr(n.Left)
+ b := s.expr(n.Right)
+ b = s.newValue1(s.ssaOp(OBITNOT, b.Type), b.Type, b)
+ return s.newValue2(s.ssaOp(OAND, n.Type), a.Type, a, b)
+ case OLSH, ORSH:
+ a := s.expr(n.Left)
+ b := s.expr(n.Right)
+ bt := b.Type
+ if bt.IsSigned() {
+ cmp := s.newValue2(s.ssaOp(OLE, bt), types.Types[TBOOL], s.zeroVal(bt), b)
+ s.check(cmp, panicshift)
+ bt = bt.ToUnsigned()
+ }
+ return s.newValue2(s.ssaShiftOp(n.Op, n.Type, bt), a.Type, a, b)
+ case OANDAND, OOROR:
+ // To implement OANDAND (and OOROR), we introduce a
+ // new temporary variable to hold the result. The
+ // variable is associated with the OANDAND node in the
+ // s.vars table (normally variables are only
+ // associated with ONAME nodes). We convert
+ // A && B
+ // to
+ // var = A
+ // if var {
+ // var = B
+ // }
+ // Using var in the subsequent block introduces the
+ // necessary phi variable.
+ el := s.expr(n.Left)
+ s.vars[n] = el
+
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(el)
+ // In theory, we should set b.Likely here based on context.
+ // However, gc only gives us likeliness hints
+ // in a single place, for plain OIF statements,
+ // and passing around context is finnicky, so don't bother for now.
+
+ bRight := s.f.NewBlock(ssa.BlockPlain)
+ bResult := s.f.NewBlock(ssa.BlockPlain)
+ if n.Op == OANDAND {
+ b.AddEdgeTo(bRight)
+ b.AddEdgeTo(bResult)
+ } else if n.Op == OOROR {
+ b.AddEdgeTo(bResult)
+ b.AddEdgeTo(bRight)
+ }
+
+ s.startBlock(bRight)
+ er := s.expr(n.Right)
+ s.vars[n] = er
+
+ b = s.endBlock()
+ b.AddEdgeTo(bResult)
+
+ s.startBlock(bResult)
+ return s.variable(n, types.Types[TBOOL])
+ case OCOMPLEX:
+ r := s.expr(n.Left)
+ i := s.expr(n.Right)
+ return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
+
+ // unary ops
+ case ONEG:
+ a := s.expr(n.Left)
+ if n.Type.IsComplex() {
+ tp := floatForComplex(n.Type)
+ negop := s.ssaOp(n.Op, tp)
+ return s.newValue2(ssa.OpComplexMake, n.Type,
+ s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
+ s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
+ }
+ return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
+ case ONOT, OBITNOT:
+ a := s.expr(n.Left)
+ return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
+ case OIMAG, OREAL:
+ a := s.expr(n.Left)
+ return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
+ case OPLUS:
+ return s.expr(n.Left)
+
+ case OADDR:
+ return s.addr(n.Left)
+
+ case ORESULT:
+ if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
+ // Do the old thing
+ addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
+ return s.rawLoad(n.Type, addr)
+ }
+ which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
+ if which == -1 {
+ // Do the old thing // TODO: Panic instead.
+ addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
+ return s.rawLoad(n.Type, addr)
+ }
+ if canSSAType(n.Type) {
+ return s.newValue1I(ssa.OpSelectN, n.Type, which, s.prevCall)
+ } else {
+ addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type), which, s.prevCall)
+ return s.rawLoad(n.Type, addr)
+ }
+
+ case ODEREF:
+ p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
+ return s.load(n.Type, p)
+
+ case ODOT:
+ if n.Left.Op == OSTRUCTLIT {
+ // All literals with nonzero fields have already been
+ // rewritten during walk. Any that remain are just T{}
+ // or equivalents. Use the zero value.
+ if !isZero(n.Left) {
+ s.Fatalf("literal with nonzero value in SSA: %v", n.Left)
+ }
+ return s.zeroVal(n.Type)
+ }
+ // If n is addressable and can't be represented in
+ // SSA, then load just the selected field. This
+ // prevents false memory dependencies in race/msan
+ // instrumentation.
+ if islvalue(n) && !s.canSSA(n) {
+ p := s.addr(n)
+ return s.load(n.Type, p)
+ }
+ v := s.expr(n.Left)
+ return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
+
+ case ODOTPTR:
+ p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
+ p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p)
+ return s.load(n.Type, p)
+
+ case OINDEX:
+ switch {
+ case n.Left.Type.IsString():
+ if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) {
+ // Replace "abc"[1] with 'b'.
+ // Delayed until now because "abc"[1] is not an ideal constant.
+ // See test/fixedbugs/issue11370.go.
+ return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.StringVal()[n.Right.Int64Val()])))
+ }
+ a := s.expr(n.Left)
+ i := s.expr(n.Right)
+ len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a)
+ i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
+ ptrtyp := s.f.Config.Types.BytePtr
+ ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
+ if Isconst(n.Right, CTINT) {
+ ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64Val(), ptr)
+ } else {
+ ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
+ }
+ return s.load(types.Types[TUINT8], ptr)
+ case n.Left.Type.IsSlice():
+ p := s.addr(n)
+ return s.load(n.Left.Type.Elem(), p)
+ case n.Left.Type.IsArray():
+ if canSSAType(n.Left.Type) {
+ // SSA can handle arrays of length at most 1.
+ bound := n.Left.Type.NumElem()
+ a := s.expr(n.Left)
+ i := s.expr(n.Right)
+ if bound == 0 {
+ // Bounds check will never succeed. Might as well
+ // use constants for the bounds check.
+ z := s.constInt(types.Types[TINT], 0)
+ s.boundsCheck(z, z, ssa.BoundsIndex, false)
+ // The return value won't be live, return junk.
+ return s.newValue0(ssa.OpUnknown, n.Type)
+ }
+ len := s.constInt(types.Types[TINT], bound)
+ s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
+ return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
+ }
+ p := s.addr(n)
+ return s.load(n.Left.Type.Elem(), p)
+ default:
+ s.Fatalf("bad type for index %v", n.Left.Type)
+ return nil
+ }
+
+ case OLEN, OCAP:
+ switch {
+ case n.Left.Type.IsSlice():
+ op := ssa.OpSliceLen
+ if n.Op == OCAP {
+ op = ssa.OpSliceCap
+ }
+ return s.newValue1(op, types.Types[TINT], s.expr(n.Left))
+ case n.Left.Type.IsString(): // string; not reachable for OCAP
+ return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left))
+ case n.Left.Type.IsMap(), n.Left.Type.IsChan():
+ return s.referenceTypeBuiltin(n, s.expr(n.Left))
+ default: // array
+ return s.constInt(types.Types[TINT], n.Left.Type.NumElem())
+ }
+
+ case OSPTR:
+ a := s.expr(n.Left)
+ if n.Left.Type.IsSlice() {
+ return s.newValue1(ssa.OpSlicePtr, n.Type, a)
+ } else {
+ return s.newValue1(ssa.OpStringPtr, n.Type, a)
+ }
+
+ case OITAB:
+ a := s.expr(n.Left)
+ return s.newValue1(ssa.OpITab, n.Type, a)
+
+ case OIDATA:
+ a := s.expr(n.Left)
+ return s.newValue1(ssa.OpIData, n.Type, a)
+
+ case OEFACE:
+ tab := s.expr(n.Left)
+ data := s.expr(n.Right)
+ return s.newValue2(ssa.OpIMake, n.Type, tab, data)
+
+ case OSLICEHEADER:
+ p := s.expr(n.Left)
+ l := s.expr(n.List.First())
+ c := s.expr(n.List.Second())
+ return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
+
+ case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
+ v := s.expr(n.Left)
+ var i, j, k *ssa.Value
+ low, high, max := n.SliceBounds()
+ if low != nil {
+ i = s.expr(low)
+ }
+ if high != nil {
+ j = s.expr(high)
+ }
+ if max != nil {
+ k = s.expr(max)
+ }
+ p, l, c := s.slice(v, i, j, k, n.Bounded())
+ return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
+
+ case OSLICESTR:
+ v := s.expr(n.Left)
+ var i, j *ssa.Value
+ low, high, _ := n.SliceBounds()
+ if low != nil {
+ i = s.expr(low)
+ }
+ if high != nil {
+ j = s.expr(high)
+ }
+ p, l, _ := s.slice(v, i, j, nil, n.Bounded())
+ return s.newValue2(ssa.OpStringMake, n.Type, p, l)
+
+ case OCALLFUNC:
+ if isIntrinsicCall(n) {
+ return s.intrinsicCall(n)
+ }
+ fallthrough
+
+ case OCALLINTER, OCALLMETH:
+ return s.callResult(n, callNormal)
+
+ case OGETG:
+ return s.newValue1(ssa.OpGetG, n.Type, s.mem())
+
+ case OAPPEND:
+ return s.append(n, false)
+
+ case OSTRUCTLIT, OARRAYLIT:
+ // All literals with nonzero fields have already been
+ // rewritten during walk. Any that remain are just T{}
+ // or equivalents. Use the zero value.
+ if !isZero(n) {
+ s.Fatalf("literal with nonzero value in SSA: %v", n)
+ }
+ return s.zeroVal(n.Type)
+
+ case ONEWOBJ:
+ if n.Type.Elem().Size() == 0 {
+ return s.newValue1A(ssa.OpAddr, n.Type, zerobaseSym, s.sb)
+ }
+ typ := s.expr(n.Left)
+ vv := s.rtcall(newobject, true, []*types.Type{n.Type}, typ)
+ return vv[0]
+
+ default:
+ s.Fatalf("unhandled expr %v", n.Op)
+ return nil
+ }
+}
+
+// append converts an OAPPEND node to SSA.
+// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
+// adds it to s, and returns the Value.
+// If inplace is true, it writes the result of the OAPPEND expression n
+// back to the slice being appended to, and returns nil.
+// inplace MUST be set to false if the slice can be SSA'd.
+func (s *state) append(n *Node, inplace bool) *ssa.Value {
+ // If inplace is false, process as expression "append(s, e1, e2, e3)":
+ //
+ // ptr, len, cap := s
+ // newlen := len + 3
+ // if newlen > cap {
+ // ptr, len, cap = growslice(s, newlen)
+ // newlen = len + 3 // recalculate to avoid a spill
+ // }
+ // // with write barriers, if needed:
+ // *(ptr+len) = e1
+ // *(ptr+len+1) = e2
+ // *(ptr+len+2) = e3
+ // return makeslice(ptr, newlen, cap)
+ //
+ //
+ // If inplace is true, process as statement "s = append(s, e1, e2, e3)":
+ //
+ // a := &s
+ // ptr, len, cap := s
+ // newlen := len + 3
+ // if uint(newlen) > uint(cap) {
+ // newptr, len, newcap = growslice(ptr, len, cap, newlen)
+ // vardef(a) // if necessary, advise liveness we are writing a new a
+ // *a.cap = newcap // write before ptr to avoid a spill
+ // *a.ptr = newptr // with write barrier
+ // }
+ // newlen = len + 3 // recalculate to avoid a spill
+ // *a.len = newlen
+ // // with write barriers, if needed:
+ // *(ptr+len) = e1
+ // *(ptr+len+1) = e2
+ // *(ptr+len+2) = e3
+
+ et := n.Type.Elem()
+ pt := types.NewPtr(et)
+
+ // Evaluate slice
+ sn := n.List.First() // the slice node is the first in the list
+
+ var slice, addr *ssa.Value
+ if inplace {
+ addr = s.addr(sn)
+ slice = s.load(n.Type, addr)
+ } else {
+ slice = s.expr(sn)
+ }
+
+ // Allocate new blocks
+ grow := s.f.NewBlock(ssa.BlockPlain)
+ assign := s.f.NewBlock(ssa.BlockPlain)
+
+ // Decide if we need to grow
+ nargs := int64(n.List.Len() - 1)
+ p := s.newValue1(ssa.OpSlicePtr, pt, slice)
+ l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
+ c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice)
+ nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
+
+ cmp := s.newValue2(s.ssaOp(OLT, types.Types[TUINT]), types.Types[TBOOL], c, nl)
+ s.vars[&ptrVar] = p
+
+ if !inplace {
+ s.vars[&newlenVar] = nl
+ s.vars[&capVar] = c
+ } else {
+ s.vars[&lenVar] = l
+ }
+
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.Likely = ssa.BranchUnlikely
+ b.SetControl(cmp)
+ b.AddEdgeTo(grow)
+ b.AddEdgeTo(assign)
+
+ // Call growslice
+ s.startBlock(grow)
+ taddr := s.expr(n.Left)
+ r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl)
+
+ if inplace {
+ if sn.Op == ONAME && sn.Class() != PEXTERN {
+ // Tell liveness we're about to build a new slice
+ s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
+ }
+ capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceCapOffset, addr)
+ s.store(types.Types[TINT], capaddr, r[2])
+ s.store(pt, addr, r[0])
+ // load the value we just stored to avoid having to spill it
+ s.vars[&ptrVar] = s.load(pt, addr)
+ s.vars[&lenVar] = r[1] // avoid a spill in the fast path
+ } else {
+ s.vars[&ptrVar] = r[0]
+ s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs))
+ s.vars[&capVar] = r[2]
+ }
+
+ b = s.endBlock()
+ b.AddEdgeTo(assign)
+
+ // assign new elements to slots
+ s.startBlock(assign)
+
+ if inplace {
+ l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len
+ nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
+ lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, sliceLenOffset, addr)
+ s.store(types.Types[TINT], lenaddr, nl)
+ }
+
+ // Evaluate args
+ type argRec struct {
+ // if store is true, we're appending the value v. If false, we're appending the
+ // value at *v.
+ v *ssa.Value
+ store bool
+ }
+ args := make([]argRec, 0, nargs)
+ for _, n := range n.List.Slice()[1:] {
+ if canSSAType(n.Type) {
+ args = append(args, argRec{v: s.expr(n), store: true})
+ } else {
+ v := s.addr(n)
+ args = append(args, argRec{v: v})
+ }
+ }
+
+ p = s.variable(&ptrVar, pt) // generates phi for ptr
+ if !inplace {
+ nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl
+ c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap
+ }
+ p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
+ for i, arg := range args {
+ addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i)))
+ if arg.store {
+ s.storeType(et, addr, arg.v, 0, true)
+ } else {
+ s.move(et, addr, arg.v)
+ }
+ }
+
+ delete(s.vars, &ptrVar)
+ if inplace {
+ delete(s.vars, &lenVar)
+ return nil
+ }
+ delete(s.vars, &newlenVar)
+ delete(s.vars, &capVar)
+ // make result
+ return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
+}
+
+// condBranch evaluates the boolean expression cond and branches to yes
+// if cond is true and no if cond is false.
+// This function is intended to handle && and || better than just calling
+// s.expr(cond) and branching on the result.
+func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
+ switch cond.Op {
+ case OANDAND:
+ mid := s.f.NewBlock(ssa.BlockPlain)
+ s.stmtList(cond.Ninit)
+ s.condBranch(cond.Left, mid, no, max8(likely, 0))
+ s.startBlock(mid)
+ s.condBranch(cond.Right, yes, no, likely)
+ return
+ // Note: if likely==1, then both recursive calls pass 1.
+ // If likely==-1, then we don't have enough information to decide
+ // whether the first branch is likely or not. So we pass 0 for
+ // the likeliness of the first branch.
+ // TODO: have the frontend give us branch prediction hints for
+ // OANDAND and OOROR nodes (if it ever has such info).
+ case OOROR:
+ mid := s.f.NewBlock(ssa.BlockPlain)
+ s.stmtList(cond.Ninit)
+ s.condBranch(cond.Left, yes, mid, min8(likely, 0))
+ s.startBlock(mid)
+ s.condBranch(cond.Right, yes, no, likely)
+ return
+ // Note: if likely==-1, then both recursive calls pass -1.
+ // If likely==1, then we don't have enough info to decide
+ // the likelihood of the first branch.
+ case ONOT:
+ s.stmtList(cond.Ninit)
+ s.condBranch(cond.Left, no, yes, -likely)
+ return
+ }
+ c := s.expr(cond)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(c)
+ b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
+ b.AddEdgeTo(yes)
+ b.AddEdgeTo(no)
+}
+
+type skipMask uint8
+
+const (
+ skipPtr skipMask = 1 << iota
+ skipLen
+ skipCap
+)
+
+// assign does left = right.
+// Right has already been evaluated to ssa, left has not.
+// If deref is true, then we do left = *right instead (and right has already been nil-checked).
+// If deref is true and right == nil, just do left = 0.
+// skip indicates assignments (at the top level) that can be avoided.
+func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) {
+ if left.Op == ONAME && left.isBlank() {
+ return
+ }
+ t := left.Type
+ dowidth(t)
+ if s.canSSA(left) {
+ if deref {
+ s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
+ }
+ if left.Op == ODOT {
+ // We're assigning to a field of an ssa-able value.
+ // We need to build a new structure with the new value for the
+ // field we're assigning and the old values for the other fields.
+ // For instance:
+ // type T struct {a, b, c int}
+ // var T x
+ // x.b = 5
+ // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
+
+ // Grab information about the structure type.
+ t := left.Left.Type
+ nf := t.NumFields()
+ idx := fieldIdx(left)
+
+ // Grab old value of structure.
+ old := s.expr(left.Left)
+
+ // Make new structure.
+ new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
+
+ // Add fields as args.
+ for i := 0; i < nf; i++ {
+ if i == idx {
+ new.AddArg(right)
+ } else {
+ new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
+ }
+ }
+
+ // Recursively assign the new value we've made to the base of the dot op.
+ s.assign(left.Left, new, false, 0)
+ // TODO: do we need to update named values here?
+ return
+ }
+ if left.Op == OINDEX && left.Left.Type.IsArray() {
+ s.pushLine(left.Pos)
+ defer s.popLine()
+ // We're assigning to an element of an ssa-able array.
+ // a[i] = v
+ t := left.Left.Type
+ n := t.NumElem()
+
+ i := s.expr(left.Right) // index
+ if n == 0 {
+ // The bounds check must fail. Might as well
+ // ignore the actual index and just use zeros.
+ z := s.constInt(types.Types[TINT], 0)
+ s.boundsCheck(z, z, ssa.BoundsIndex, false)
+ return
+ }
+ if n != 1 {
+ s.Fatalf("assigning to non-1-length array")
+ }
+ // Rewrite to a = [1]{v}
+ len := s.constInt(types.Types[TINT], 1)
+ s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
+ v := s.newValue1(ssa.OpArrayMake1, t, right)
+ s.assign(left.Left, v, false, 0)
+ return
+ }
+ // Update variable assignment.
+ s.vars[left] = right
+ s.addNamedValue(left, right)
+ return
+ }
+
+ // If this assignment clobbers an entire local variable, then emit
+ // OpVarDef so liveness analysis knows the variable is redefined.
+ if base := clobberBase(left); base.Op == ONAME && base.Class() != PEXTERN && skip == 0 {
+ s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !base.IsAutoTmp())
+ }
+
+ // Left is not ssa-able. Compute its address.
+ addr := s.addr(left)
+ if isReflectHeaderDataField(left) {
+ // Package unsafe's documentation says storing pointers into
+ // reflect.SliceHeader and reflect.StringHeader's Data fields
+ // is valid, even though they have type uintptr (#19168).
+ // Mark it pointer type to signal the writebarrier pass to
+ // insert a write barrier.
+ t = types.Types[TUNSAFEPTR]
+ }
+ if deref {
+ // Treat as a mem->mem move.
+ if right == nil {
+ s.zero(t, addr)
+ } else {
+ s.move(t, addr, right)
+ }
+ return
+ }
+ // Treat as a store.
+ s.storeType(t, addr, right, skip, !left.IsAutoTmp())
+}
+
+// zeroVal returns the zero value for type t.
+func (s *state) zeroVal(t *types.Type) *ssa.Value {
+ switch {
+ case t.IsInteger():
+ switch t.Size() {
+ case 1:
+ return s.constInt8(t, 0)
+ case 2:
+ return s.constInt16(t, 0)
+ case 4:
+ return s.constInt32(t, 0)
+ case 8:
+ return s.constInt64(t, 0)
+ default:
+ s.Fatalf("bad sized integer type %v", t)
+ }
+ case t.IsFloat():
+ switch t.Size() {
+ case 4:
+ return s.constFloat32(t, 0)
+ case 8:
+ return s.constFloat64(t, 0)
+ default:
+ s.Fatalf("bad sized float type %v", t)
+ }
+ case t.IsComplex():
+ switch t.Size() {
+ case 8:
+ z := s.constFloat32(types.Types[TFLOAT32], 0)
+ return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
+ case 16:
+ z := s.constFloat64(types.Types[TFLOAT64], 0)
+ return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
+ default:
+ s.Fatalf("bad sized complex type %v", t)
+ }
+
+ case t.IsString():
+ return s.constEmptyString(t)
+ case t.IsPtrShaped():
+ return s.constNil(t)
+ case t.IsBoolean():
+ return s.constBool(false)
+ case t.IsInterface():
+ return s.constInterface(t)
+ case t.IsSlice():
+ return s.constSlice(t)
+ case t.IsStruct():
+ n := t.NumFields()
+ v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
+ for i := 0; i < n; i++ {
+ v.AddArg(s.zeroVal(t.FieldType(i)))
+ }
+ return v
+ case t.IsArray():
+ switch t.NumElem() {
+ case 0:
+ return s.entryNewValue0(ssa.OpArrayMake0, t)
+ case 1:
+ return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
+ }
+ }
+ s.Fatalf("zero for type %v not implemented", t)
+ return nil
+}
+
+type callKind int8
+
+const (
+ callNormal callKind = iota
+ callDefer
+ callDeferStack
+ callGo
+)
+
+type sfRtCallDef struct {
+ rtfn *obj.LSym
+ rtype types.EType
+}
+
+var softFloatOps map[ssa.Op]sfRtCallDef
+
+func softfloatInit() {
+ // Some of these operations get transformed by sfcall.
+ softFloatOps = map[ssa.Op]sfRtCallDef{
+ ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
+ ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
+ ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
+ ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
+ ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), TFLOAT32},
+ ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), TFLOAT64},
+ ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), TFLOAT32},
+ ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), TFLOAT64},
+
+ ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), TBOOL},
+ ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), TBOOL},
+ ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), TBOOL},
+ ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), TBOOL},
+ ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), TBOOL},
+ ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), TBOOL},
+ ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL},
+ ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL},
+
+ ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), TFLOAT32},
+ ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), TINT32},
+ ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), TFLOAT32},
+ ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), TINT64},
+ ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), TFLOAT32},
+ ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), TUINT64},
+ ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), TFLOAT64},
+ ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), TINT32},
+ ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), TFLOAT64},
+ ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), TINT64},
+ ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), TFLOAT64},
+ ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), TUINT64},
+ ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), TFLOAT64},
+ ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), TFLOAT32},
+ }
+}
+
+// TODO: do not emit sfcall if operation can be optimized to constant in later
+// opt phase
+func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
+ if callDef, ok := softFloatOps[op]; ok {
+ switch op {
+ case ssa.OpLess32F,
+ ssa.OpLess64F,
+ ssa.OpLeq32F,
+ ssa.OpLeq64F:
+ args[0], args[1] = args[1], args[0]
+ case ssa.OpSub32F,
+ ssa.OpSub64F:
+ args[1] = s.newValue1(s.ssaOp(ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
+ }
+
+ result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
+ if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
+ result = s.newValue1(ssa.OpNot, result.Type, result)
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+var intrinsics map[intrinsicKey]intrinsicBuilder
+
+// An intrinsicBuilder converts a call node n into an ssa value that
+// implements that call as an intrinsic. args is a list of arguments to the func.
+type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value
+
+type intrinsicKey struct {
+ arch *sys.Arch
+ pkg string
+ fn string
+}
+
+func init() {
+ intrinsics = map[intrinsicKey]intrinsicBuilder{}
+
+ var all []*sys.Arch
+ var p4 []*sys.Arch
+ var p8 []*sys.Arch
+ var lwatomics []*sys.Arch
+ for _, a := range &sys.Archs {
+ all = append(all, a)
+ if a.PtrSize == 4 {
+ p4 = append(p4, a)
+ } else {
+ p8 = append(p8, a)
+ }
+ if a.Family != sys.PPC64 {
+ lwatomics = append(lwatomics, a)
+ }
+ }
+
+ // add adds the intrinsic b for pkg.fn for the given list of architectures.
+ add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
+ for _, a := range archs {
+ intrinsics[intrinsicKey{a, pkg, fn}] = b
+ }
+ }
+ // addF does the same as add but operates on architecture families.
+ addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
+ m := 0
+ for _, f := range archFamilies {
+ if f >= 32 {
+ panic("too many architecture families")
+ }
+ m |= 1 << uint(f)
+ }
+ for _, a := range all {
+ if m>>uint(a.Family)&1 != 0 {
+ intrinsics[intrinsicKey{a, pkg, fn}] = b
+ }
+ }
+ }
+ // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
+ alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
+ aliased := false
+ for _, a := range archs {
+ if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
+ intrinsics[intrinsicKey{a, pkg, fn}] = b
+ aliased = true
+ }
+ }
+ if !aliased {
+ panic(fmt.Sprintf("attempted to alias undefined intrinsic: %s.%s", pkg, fn))
+ }
+ }
+
+ /******** runtime ********/
+ if !instrumenting {
+ add("runtime", "slicebytetostringtmp",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ // Compiler frontend optimizations emit OBYTES2STRTMP nodes
+ // for the backend instead of slicebytetostringtmp calls
+ // when not instrumenting.
+ return s.newValue2(ssa.OpStringMake, n.Type, args[0], args[1])
+ },
+ all...)
+ }
+ addF("runtime/internal/math", "MulUintptr",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
+ }
+ return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
+ },
+ sys.AMD64, sys.I386, sys.MIPS64)
+ add("runtime", "KeepAlive",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
+ s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
+ return nil
+ },
+ all...)
+ add("runtime", "getclosureptr",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
+ },
+ all...)
+
+ add("runtime", "getcallerpc",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
+ },
+ all...)
+
+ add("runtime", "getcallersp",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
+ },
+ all...)
+
+ /******** runtime/internal/sys ********/
+ addF("runtime/internal/sys", "Ctz32",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
+ addF("runtime/internal/sys", "Ctz64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
+ addF("runtime/internal/sys", "Bswap32",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
+ addF("runtime/internal/sys", "Bswap64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
+
+ /******** runtime/internal/atomic ********/
+ addF("runtime/internal/atomic", "Load",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Load8",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[TUINT8], types.TypeMem), args[0], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT8], v)
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Load64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "LoadAcq",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ },
+ sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "LoadAcq64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ },
+ sys.PPC64)
+ addF("runtime/internal/atomic", "Loadp",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+
+ addF("runtime/internal/atomic", "Store",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Store8",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Store64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "StorepNoWB",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "StoreRel",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "StoreRel64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.PPC64)
+
+ addF("runtime/internal/atomic", "Xchg",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ },
+ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Xchg64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ },
+ sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+
+ type atomicOpEmitter func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType)
+
+ makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.EType, emit atomicOpEmitter) intrinsicBuilder {
+
+ return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ // Target Atomic feature is identified by dynamic detection
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
+ v := s.load(types.Types[TBOOL], addr)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely
+
+ // We have atomic instructions - use it directly.
+ s.startBlock(bTrue)
+ emit(s, n, args, op1, typ)
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Use original instruction sequence.
+ s.startBlock(bFalse)
+ emit(s, n, args, op0, typ)
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ if rtyp == TNIL {
+ return nil
+ } else {
+ return s.variable(n, types.Types[rtyp])
+ }
+ }
+ }
+
+ atomicXchgXaddEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
+ }
+ addF("runtime/internal/atomic", "Xchg",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Xchg64",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64),
+ sys.ARM64)
+
+ addF("runtime/internal/atomic", "Xadd",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
+ },
+ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Xadd64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
+ },
+ sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+
+ addF("runtime/internal/atomic", "Xadd",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, TUINT32, TUINT32, atomicXchgXaddEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Xadd64",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, TUINT64, TUINT64, atomicXchgXaddEmitterARM64),
+ sys.ARM64)
+
+ addF("runtime/internal/atomic", "Cas",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
+ },
+ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Cas64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
+ },
+ sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "CasRel",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
+ },
+ sys.PPC64)
+
+ atomicCasEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ v := s.newValue4(op, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
+ }
+
+ addF("runtime/internal/atomic", "Cas",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, TUINT32, TBOOL, atomicCasEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Cas64",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, TUINT64, TBOOL, atomicCasEmitterARM64),
+ sys.ARM64)
+
+ addF("runtime/internal/atomic", "And8",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "And",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "Or8",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "Or",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
+
+ atomicAndOrEmitterARM64 := func(s *state, n *Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ s.vars[&memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
+ }
+
+ addF("runtime/internal/atomic", "And8",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "And",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Or8",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Or",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, TNIL, TNIL, atomicAndOrEmitterARM64),
+ sys.ARM64)
+
+ alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
+ alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
+ alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
+ alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
+ alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
+ alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
+ alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
+ alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
+ alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
+ alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
+ alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
+ alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
+ alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
+ alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
+ alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
+ alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
+ alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
+ alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
+ alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
+ alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
+ alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
+ alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
+ alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
+ alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
+ alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
+ alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
+ alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
+ alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
+ alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
+
+ /******** math ********/
+ addF("math", "Sqrt",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0])
+ },
+ sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
+ addF("math", "Trunc",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
+ addF("math", "Ceil",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
+ addF("math", "Floor",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
+ addF("math", "Round",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.PPC64, sys.S390X)
+ addF("math", "RoundToEven",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.S390X, sys.Wasm)
+ addF("math", "Abs",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm)
+ addF("math", "Copysign",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1])
+ },
+ sys.PPC64, sys.Wasm)
+ addF("math", "FMA",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
+ },
+ sys.ARM64, sys.PPC64, sys.S390X)
+ addF("math", "FMA",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ if !s.config.UseFMA {
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ return s.variable(n, types.Types[TFLOAT64])
+ }
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasFMA)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely // >= haswell cpus are common
+
+ // We have the intrinsic - use it directly.
+ s.startBlock(bTrue)
+ s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Call the pure Go version.
+ s.startBlock(bFalse)
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ return s.variable(n, types.Types[TFLOAT64])
+ },
+ sys.AMD64)
+ addF("math", "FMA",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ if !s.config.UseFMA {
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ return s.variable(n, types.Types[TFLOAT64])
+ }
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), armHasVFPv4, s.sb)
+ v := s.load(types.Types[TBOOL], addr)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely
+
+ // We have the intrinsic - use it directly.
+ s.startBlock(bTrue)
+ s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[TFLOAT64], args[0], args[1], args[2])
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Call the pure Go version.
+ s.startBlock(bFalse)
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ return s.variable(n, types.Types[TFLOAT64])
+ },
+ sys.ARM)
+
+ makeRoundAMD64 := func(op ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasSSE41)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
+
+ // We have the intrinsic - use it directly.
+ s.startBlock(bTrue)
+ s.vars[n] = s.newValue1(op, types.Types[TFLOAT64], args[0])
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Call the pure Go version.
+ s.startBlock(bFalse)
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ return s.variable(n, types.Types[TFLOAT64])
+ }
+ }
+ addF("math", "RoundToEven",
+ makeRoundAMD64(ssa.OpRoundToEven),
+ sys.AMD64)
+ addF("math", "Floor",
+ makeRoundAMD64(ssa.OpFloor),
+ sys.AMD64)
+ addF("math", "Ceil",
+ makeRoundAMD64(ssa.OpCeil),
+ sys.AMD64)
+ addF("math", "Trunc",
+ makeRoundAMD64(ssa.OpTrunc),
+ sys.AMD64)
+
+ /******** math/bits ********/
+ addF("math/bits", "TrailingZeros64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "TrailingZeros32",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "TrailingZeros16",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
+ c := s.constInt32(types.Types[TUINT32], 1<<16)
+ y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
+ },
+ sys.MIPS)
+ addF("math/bits", "TrailingZeros16",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz16, types.Types[TINT], args[0])
+ },
+ sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
+ addF("math/bits", "TrailingZeros16",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
+ c := s.constInt64(types.Types[TUINT64], 1<<16)
+ y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
+ },
+ sys.S390X, sys.PPC64)
+ addF("math/bits", "TrailingZeros8",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
+ c := s.constInt32(types.Types[TUINT32], 1<<8)
+ y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
+ },
+ sys.MIPS)
+ addF("math/bits", "TrailingZeros8",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz8, types.Types[TINT], args[0])
+ },
+ sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
+ addF("math/bits", "TrailingZeros8",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
+ c := s.constInt64(types.Types[TUINT64], 1<<8)
+ y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
+ },
+ sys.S390X)
+ alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
+ alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
+ // ReverseBytes inlines correctly, no need to intrinsify it.
+ // ReverseBytes16 lowers to a rotate, no need for anything special here.
+ addF("math/bits", "Len64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "Len32",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64)
+ addF("math/bits", "Len32",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
+ }
+ x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
+ },
+ sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "Len16",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
+ }
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
+ },
+ sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "Len16",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen16, types.Types[TINT], args[0])
+ },
+ sys.AMD64)
+ addF("math/bits", "Len8",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
+ }
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
+ },
+ sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "Len8",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen8, types.Types[TINT], args[0])
+ },
+ sys.AMD64)
+ addF("math/bits", "Len",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
+ }
+ return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ // LeadingZeros is handled because it trivially calls Len.
+ addF("math/bits", "Reverse64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "Reverse32",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "Reverse16",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "Reverse8",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "Reverse",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
+ }
+ return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "RotateLeft8",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft8, types.Types[TUINT8], args[0], args[1])
+ },
+ sys.AMD64)
+ addF("math/bits", "RotateLeft16",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft16, types.Types[TUINT16], args[0], args[1])
+ },
+ sys.AMD64)
+ addF("math/bits", "RotateLeft32",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft32, types.Types[TUINT32], args[0], args[1])
+ },
+ sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
+ addF("math/bits", "RotateLeft64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft64, types.Types[TUINT64], args[0], args[1])
+ },
+ sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
+ alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
+
+ makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasPOPCNT)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
+
+ // We have the intrinsic - use it directly.
+ s.startBlock(bTrue)
+ op := op64
+ if s.config.PtrSize == 4 {
+ op = op32
+ }
+ s.vars[n] = s.newValue1(op, types.Types[TINT], args[0])
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Call the pure Go version.
+ s.startBlock(bFalse)
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT]
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ return s.variable(n, types.Types[TINT])
+ }
+ }
+ addF("math/bits", "OnesCount64",
+ makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
+ sys.AMD64)
+ addF("math/bits", "OnesCount64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0])
+ },
+ sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
+ addF("math/bits", "OnesCount32",
+ makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
+ sys.AMD64)
+ addF("math/bits", "OnesCount32",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0])
+ },
+ sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
+ addF("math/bits", "OnesCount16",
+ makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
+ sys.AMD64)
+ addF("math/bits", "OnesCount16",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount16, types.Types[TINT], args[0])
+ },
+ sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
+ addF("math/bits", "OnesCount8",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount8, types.Types[TINT], args[0])
+ },
+ sys.S390X, sys.PPC64, sys.Wasm)
+ addF("math/bits", "OnesCount",
+ makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
+ sys.AMD64)
+ addF("math/bits", "Mul64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
+ },
+ sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64)
+ alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
+ addF("math/bits", "Add64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
+ },
+ sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
+ alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X)
+ addF("math/bits", "Sub64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
+ },
+ sys.AMD64, sys.ARM64, sys.S390X)
+ alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
+ addF("math/bits", "Div64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ // check for divide-by-zero/overflow and panic with appropriate message
+ cmpZero := s.newValue2(s.ssaOp(ONE, types.Types[TUINT64]), types.Types[TBOOL], args[2], s.zeroVal(types.Types[TUINT64]))
+ s.check(cmpZero, panicdivide)
+ cmpOverflow := s.newValue2(s.ssaOp(OLT, types.Types[TUINT64]), types.Types[TBOOL], args[0], args[2])
+ s.check(cmpOverflow, panicoverflow)
+ return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
+ },
+ sys.AMD64)
+ alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
+
+ alias("runtime/internal/sys", "Ctz8", "math/bits", "TrailingZeros8", all...)
+ alias("runtime/internal/sys", "TrailingZeros8", "math/bits", "TrailingZeros8", all...)
+ alias("runtime/internal/sys", "TrailingZeros64", "math/bits", "TrailingZeros64", all...)
+ alias("runtime/internal/sys", "Len8", "math/bits", "Len8", all...)
+ alias("runtime/internal/sys", "Len64", "math/bits", "Len64", all...)
+ alias("runtime/internal/sys", "OnesCount64", "math/bits", "OnesCount64", all...)
+
+ /******** sync/atomic ********/
+
+ // Note: these are disabled by flag_race in findIntrinsic below.
+ alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
+ alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
+ alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
+ alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
+ alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
+ alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
+ alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
+
+ alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
+ alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
+ // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
+ alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
+ alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
+ alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
+ alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
+
+ alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
+ alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
+ alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
+ alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
+ alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
+ alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
+
+ alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
+ alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
+ alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
+ alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
+ alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
+ alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
+
+ alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
+ alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
+ alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
+ alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
+ alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
+ alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
+
+ /******** math/big ********/
+ add("math/big", "mulWW",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
+ },
+ sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
+}
+
+// findIntrinsic returns a function which builds the SSA equivalent of the
+// function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
+func findIntrinsic(sym *types.Sym) intrinsicBuilder {
+ if sym == nil || sym.Pkg == nil {
+ return nil
+ }
+ pkg := sym.Pkg.Path
+ if sym.Pkg == localpkg {
+ pkg = myimportpath
+ }
+ if flag_race && pkg == "sync/atomic" {
+ // The race detector needs to be able to intercept these calls.
+ // We can't intrinsify them.
+ return nil
+ }
+ // Skip intrinsifying math functions (which may contain hard-float
+ // instructions) when soft-float
+ if thearch.SoftFloat && pkg == "math" {
+ return nil
+ }
+
+ fn := sym.Name
+ if ssa.IntrinsicsDisable {
+ if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
+ // These runtime functions don't have definitions, must be intrinsics.
+ } else {
+ return nil
+ }
+ }
+ return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
+}
+
+func isIntrinsicCall(n *Node) bool {
+ if n == nil || n.Left == nil {
+ return false
+ }
+ return findIntrinsic(n.Left.Sym) != nil
+}
+
+// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
+func (s *state) intrinsicCall(n *Node) *ssa.Value {
+ v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
+ if ssa.IntrinsicsDebug > 0 {
+ x := v
+ if x == nil {
+ x = s.mem()
+ }
+ if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
+ x = x.Args[0]
+ }
+ Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
+ }
+ return v
+}
+
+// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
+func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
+ // Construct map of temps; see comments in s.call about the structure of n.
+ temps := map[*Node]*ssa.Value{}
+ for _, a := range n.List.Slice() {
+ if a.Op != OAS {
+ s.Fatalf("non-assignment as a temp function argument %v", a.Op)
+ }
+ l, r := a.Left, a.Right
+ if l.Op != ONAME {
+ s.Fatalf("non-ONAME temp function argument %v", a.Op)
+ }
+ // Evaluate and store to "temporary".
+ // Walk ensures these temporaries are dead outside of n.
+ temps[l] = s.expr(r)
+ }
+ args := make([]*ssa.Value, n.Rlist.Len())
+ for i, n := range n.Rlist.Slice() {
+ // Store a value to an argument slot.
+ if x, ok := temps[n]; ok {
+ // This is a previously computed temporary.
+ args[i] = x
+ continue
+ }
+ // This is an explicit value; evaluate it.
+ args[i] = s.expr(n)
+ }
+ return args
+}
+
+// openDeferRecord adds code to evaluate and store the args for an open-code defer
+// call, and records info about the defer, so we can generate proper code on the
+// exit paths. n is the sub-node of the defer node that is the actual function
+// call. We will also record funcdata information on where the args are stored
+// (as well as the deferBits variable), and this will enable us to run the proper
+// defer calls during panics.
+func (s *state) openDeferRecord(n *Node) {
+ // Do any needed expression evaluation for the args (including the
+ // receiver, if any). This may be evaluating something like 'autotmp_3 =
+ // once.mutex'. Such a statement will create a mapping in s.vars[] from
+ // the autotmp name to the evaluated SSA arg value, but won't do any
+ // stores to the stack.
+ s.stmtList(n.List)
+
+ var args []*ssa.Value
+ var argNodes []*Node
+
+ opendefer := &openDeferInfo{
+ n: n,
+ }
+ fn := n.Left
+ if n.Op == OCALLFUNC {
+ // We must always store the function value in a stack slot for the
+ // runtime panic code to use. But in the defer exit code, we will
+ // call the function directly if it is a static function.
+ closureVal := s.expr(fn)
+ closure := s.openDeferSave(nil, fn.Type, closureVal)
+ opendefer.closureNode = closure.Aux.(*Node)
+ if !(fn.Op == ONAME && fn.Class() == PFUNC) {
+ opendefer.closure = closure
+ }
+ } else if n.Op == OCALLMETH {
+ if fn.Op != ODOTMETH {
+ Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
+ }
+ closureVal := s.getMethodClosure(fn)
+ // We must always store the function value in a stack slot for the
+ // runtime panic code to use. But in the defer exit code, we will
+ // call the method directly.
+ closure := s.openDeferSave(nil, fn.Type, closureVal)
+ opendefer.closureNode = closure.Aux.(*Node)
+ } else {
+ if fn.Op != ODOTINTER {
+ Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
+ }
+ closure, rcvr := s.getClosureAndRcvr(fn)
+ opendefer.closure = s.openDeferSave(nil, closure.Type, closure)
+ // Important to get the receiver type correct, so it is recognized
+ // as a pointer for GC purposes.
+ opendefer.rcvr = s.openDeferSave(nil, fn.Type.Recv().Type, rcvr)
+ opendefer.closureNode = opendefer.closure.Aux.(*Node)
+ opendefer.rcvrNode = opendefer.rcvr.Aux.(*Node)
+ }
+ for _, argn := range n.Rlist.Slice() {
+ var v *ssa.Value
+ if canSSAType(argn.Type) {
+ v = s.openDeferSave(nil, argn.Type, s.expr(argn))
+ } else {
+ v = s.openDeferSave(argn, argn.Type, nil)
+ }
+ args = append(args, v)
+ argNodes = append(argNodes, v.Aux.(*Node))
+ }
+ opendefer.argVals = args
+ opendefer.argNodes = argNodes
+ index := len(s.openDefers)
+ s.openDefers = append(s.openDefers, opendefer)
+
+ // Update deferBits only after evaluation and storage to stack of
+ // args/receiver/interface is successful.
+ bitvalue := s.constInt8(types.Types[TUINT8], 1<<uint(index))
+ newDeferBits := s.newValue2(ssa.OpOr8, types.Types[TUINT8], s.variable(&deferBitsVar, types.Types[TUINT8]), bitvalue)
+ s.vars[&deferBitsVar] = newDeferBits
+ s.store(types.Types[TUINT8], s.deferBitsAddr, newDeferBits)
+}
+
+// openDeferSave generates SSA nodes to store a value (with type t) for an
+// open-coded defer at an explicit autotmp location on the stack, so it can be
+// reloaded and used for the appropriate call on exit. If type t is SSAable, then
+// val must be non-nil (and n should be nil) and val is the value to be stored. If
+// type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
+// evaluated (via s.addr() below) to get the value that is to be stored. The
+// function returns an SSA value representing a pointer to the autotmp location.
+func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value {
+ canSSA := canSSAType(t)
+ var pos src.XPos
+ if canSSA {
+ pos = val.Pos
+ } else {
+ pos = n.Pos
+ }
+ argTemp := tempAt(pos.WithNotStmt(), s.curfn, t)
+ argTemp.Name.SetOpenDeferSlot(true)
+ var addrArgTemp *ssa.Value
+ // Use OpVarLive to make sure stack slots for the args, etc. are not
+ // removed by dead-store elimination
+ if s.curBlock.ID != s.f.Entry.ID {
+ // Force the argtmp storing this defer function/receiver/arg to be
+ // declared in the entry block, so that it will be live for the
+ // defer exit code (which will actually access it only if the
+ // associated defer call has been activated).
+ s.defvars[s.f.Entry.ID][&memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][&memVar])
+ s.defvars[s.f.Entry.ID][&memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][&memVar])
+ addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.defvars[s.f.Entry.ID][&memVar])
+ } else {
+ // Special case if we're still in the entry block. We can't use
+ // the above code, since s.defvars[s.f.Entry.ID] isn't defined
+ // until we end the entry block with s.endBlock().
+ s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
+ s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
+ addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false)
+ }
+ if t.HasPointers() {
+ // Since we may use this argTemp during exit depending on the
+ // deferBits, we must define it unconditionally on entry.
+ // Therefore, we must make sure it is zeroed out in the entry
+ // block if it contains pointers, else GC may wrongly follow an
+ // uninitialized pointer value.
+ argTemp.Name.SetNeedzero(true)
+ }
+ if !canSSA {
+ a := s.addr(n)
+ s.move(t, addrArgTemp, a)
+ return addrArgTemp
+ }
+ // We are storing to the stack, hence we can avoid the full checks in
+ // storeType() (no write barrier) and do a simple store().
+ s.store(t, addrArgTemp, val)
+ return addrArgTemp
+}
+
+// openDeferExit generates SSA for processing all the open coded defers at exit.
+// The code involves loading deferBits, and checking each of the bits to see if
+// the corresponding defer statement was executed. For each bit that is turned
+// on, the associated defer call is made.
+func (s *state) openDeferExit() {
+ deferExit := s.f.NewBlock(ssa.BlockPlain)
+ s.endBlock().AddEdgeTo(deferExit)
+ s.startBlock(deferExit)
+ s.lastDeferExit = deferExit
+ s.lastDeferCount = len(s.openDefers)
+ zeroval := s.constInt8(types.Types[TUINT8], 0)
+ testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
+ // Test for and run defers in reverse order
+ for i := len(s.openDefers) - 1; i >= 0; i-- {
+ r := s.openDefers[i]
+ bCond := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ deferBits := s.variable(&deferBitsVar, types.Types[TUINT8])
+ // Generate code to check if the bit associated with the current
+ // defer is set.
+ bitval := s.constInt8(types.Types[TUINT8], 1<<uint(i))
+ andval := s.newValue2(ssa.OpAnd8, types.Types[TUINT8], deferBits, bitval)
+ eqVal := s.newValue2(ssa.OpEq8, types.Types[TBOOL], andval, zeroval)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(eqVal)
+ b.AddEdgeTo(bEnd)
+ b.AddEdgeTo(bCond)
+ bCond.AddEdgeTo(bEnd)
+ s.startBlock(bCond)
+
+ // Clear this bit in deferBits and force store back to stack, so
+ // we will not try to re-run this defer call if this defer call panics.
+ nbitval := s.newValue1(ssa.OpCom8, types.Types[TUINT8], bitval)
+ maskedval := s.newValue2(ssa.OpAnd8, types.Types[TUINT8], deferBits, nbitval)
+ s.store(types.Types[TUINT8], s.deferBitsAddr, maskedval)
+ // Use this value for following tests, so we keep previous
+ // bits cleared.
+ s.vars[&deferBitsVar] = maskedval
+
+ // Generate code to call the function call of the defer, using the
+ // closure/receiver/args that were stored in argtmps at the point
+ // of the defer statement.
+ argStart := Ctxt.FixedFrameSize()
+ fn := r.n.Left
+ stksize := fn.Type.ArgWidth()
+ var ACArgs []ssa.Param
+ var ACResults []ssa.Param
+ var callArgs []*ssa.Value
+ if r.rcvr != nil {
+ // rcvr in case of OCALLINTER
+ v := s.load(r.rcvr.Type.Elem(), r.rcvr)
+ addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
+ if testLateExpansion {
+ callArgs = append(callArgs, v)
+ } else {
+ s.store(types.Types[TUINTPTR], addr, v)
+ }
+ }
+ for j, argAddrVal := range r.argVals {
+ f := getParam(r.n, j)
+ pt := types.NewPtr(f.Type)
+ ACArgs = append(ACArgs, ssa.Param{Type: f.Type, Offset: int32(argStart + f.Offset)})
+ if testLateExpansion {
+ var a *ssa.Value
+ if !canSSAType(f.Type) {
+ a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
+ } else {
+ a = s.load(f.Type, argAddrVal)
+ }
+ callArgs = append(callArgs, a)
+ } else {
+ addr := s.constOffPtrSP(pt, argStart+f.Offset)
+ if !canSSAType(f.Type) {
+ s.move(f.Type, addr, argAddrVal)
+ } else {
+ argVal := s.load(f.Type, argAddrVal)
+ s.storeType(f.Type, addr, argVal, 0, false)
+ }
+ }
+ }
+ var call *ssa.Value
+ if r.closure != nil {
+ v := s.load(r.closure.Type.Elem(), r.closure)
+ s.maybeNilCheckClosure(v, callDefer)
+ codeptr := s.rawLoad(types.Types[TUINTPTR], v)
+ aux := ssa.ClosureAuxCall(ACArgs, ACResults)
+ if testLateExpansion {
+ callArgs = append(callArgs, s.mem())
+ call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, aux, codeptr, v, s.mem())
+ }
+ } else {
+ aux := ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults)
+ if testLateExpansion {
+ callArgs = append(callArgs, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ } else {
+ // Do a static call if the original call was a static function or method
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ }
+ }
+ call.AuxInt = stksize
+ if testLateExpansion {
+ s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ } else {
+ s.vars[&memVar] = call
+ }
+ // Make sure that the stack slots with pointers are kept live
+ // through the call (which is a pre-emption point). Also, we will
+ // use the first call of the last defer exit to compute liveness
+ // for the deferreturn, so we want all stack slots to be live.
+ if r.closureNode != nil {
+ s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
+ }
+ if r.rcvrNode != nil {
+ if r.rcvrNode.Type.HasPointers() {
+ s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
+ }
+ }
+ for _, argNode := range r.argNodes {
+ if argNode.Type.HasPointers() {
+ s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
+ }
+ }
+
+ s.endBlock()
+ s.startBlock(bEnd)
+ }
+}
+
+func (s *state) callResult(n *Node, k callKind) *ssa.Value {
+ return s.call(n, k, false)
+}
+
+func (s *state) callAddr(n *Node, k callKind) *ssa.Value {
+ return s.call(n, k, true)
+}
+
+// Calls the function n using the specified call type.
+// Returns the address of the return value (or nil if none).
+func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
+ s.prevCall = nil
+ var sym *types.Sym // target symbol (if static)
+ var closure *ssa.Value // ptr to closure to run (if dynamic)
+ var codeptr *ssa.Value // ptr to target code (if dynamic)
+ var rcvr *ssa.Value // receiver to set
+ fn := n.Left
+ var ACArgs []ssa.Param
+ var ACResults []ssa.Param
+ var callArgs []*ssa.Value
+ res := n.Left.Type.Results()
+ if k == callNormal {
+ nf := res.NumFields()
+ for i := 0; i < nf; i++ {
+ fp := res.Field(i)
+ ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + Ctxt.FixedFrameSize())})
+ }
+ }
+
+ testLateExpansion := false
+
+ switch n.Op {
+ case OCALLFUNC:
+ testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
+ if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC {
+ sym = fn.Sym
+ break
+ }
+ closure = s.expr(fn)
+ if k != callDefer && k != callDeferStack {
+ // Deferred nil function needs to panic when the function is invoked,
+ // not the point of defer statement.
+ s.maybeNilCheckClosure(closure, k)
+ }
+ case OCALLMETH:
+ if fn.Op != ODOTMETH {
+ s.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
+ }
+ testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
+ if k == callNormal {
+ sym = fn.Sym
+ break
+ }
+ closure = s.getMethodClosure(fn)
+ // Note: receiver is already present in n.Rlist, so we don't
+ // want to set it here.
+ case OCALLINTER:
+ if fn.Op != ODOTINTER {
+ s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
+ }
+ testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
+ var iclosure *ssa.Value
+ iclosure, rcvr = s.getClosureAndRcvr(fn)
+ if k == callNormal {
+ codeptr = s.load(types.Types[TUINTPTR], iclosure)
+ } else {
+ closure = iclosure
+ }
+ }
+ dowidth(fn.Type)
+ stksize := fn.Type.ArgWidth() // includes receiver, args, and results
+
+ // Run all assignments of temps.
+ // The temps are introduced to avoid overwriting argument
+ // slots when arguments themselves require function calls.
+ s.stmtList(n.List)
+
+ var call *ssa.Value
+ if k == callDeferStack {
+ testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f)
+ // Make a defer struct d on the stack.
+ t := deferstruct(stksize)
+ d := tempAt(n.Pos, s.curfn, t)
+
+ s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
+ addr := s.addr(d)
+
+ // Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
+ // 0: siz
+ s.store(types.Types[TUINT32],
+ s.newValue1I(ssa.OpOffPtr, types.Types[TUINT32].PtrTo(), t.FieldOff(0), addr),
+ s.constInt32(types.Types[TUINT32], int32(stksize)))
+ // 1: started, set in deferprocStack
+ // 2: heap, set in deferprocStack
+ // 3: openDefer
+ // 4: sp, set in deferprocStack
+ // 5: pc, set in deferprocStack
+ // 6: fn
+ s.store(closure.Type,
+ s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(6), addr),
+ closure)
+ // 7: panic, set in deferprocStack
+ // 8: link, set in deferprocStack
+ // 9: framepc
+ // 10: varp
+ // 11: fd
+
+ // Then, store all the arguments of the defer call.
+ ft := fn.Type
+ off := t.FieldOff(12)
+ args := n.Rlist.Slice()
+
+ // Set receiver (for interface calls). Always a pointer.
+ if rcvr != nil {
+ p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
+ s.store(types.Types[TUINTPTR], p, rcvr)
+ }
+ // Set receiver (for method calls).
+ if n.Op == OCALLMETH {
+ f := ft.Recv()
+ s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset)
+ args = args[1:]
+ }
+ // Set other args.
+ for _, f := range ft.Params().Fields().Slice() {
+ s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset)
+ args = args[1:]
+ }
+
+ // Call runtime.deferprocStack with pointer to _defer record.
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(Ctxt.FixedFrameSize())})
+ aux := ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults)
+ if testLateExpansion {
+ callArgs = append(callArgs, addr, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ } else {
+ arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize())
+ s.store(types.Types[TUINTPTR], arg0, addr)
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ }
+ if stksize < int64(Widthptr) {
+ // We need room for both the call to deferprocStack and the call to
+ // the deferred function.
+ // TODO Revisit this if/when we pass args in registers.
+ stksize = int64(Widthptr)
+ }
+ call.AuxInt = stksize
+ } else {
+ // Store arguments to stack, including defer/go arguments and receiver for method calls.
+ // These are written in SP-offset order.
+ argStart := Ctxt.FixedFrameSize()
+ // Defer/go args.
+ if k != callNormal {
+ // Write argsize and closure (args to newproc/deferproc).
+ argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINT32], Offset: int32(argStart)})
+ if testLateExpansion {
+ callArgs = append(callArgs, argsize)
+ } else {
+ addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
+ s.store(types.Types[TUINT32], addr, argsize)
+ }
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart) + int32(Widthptr)})
+ if testLateExpansion {
+ callArgs = append(callArgs, closure)
+ } else {
+ addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
+ s.store(types.Types[TUINTPTR], addr, closure)
+ }
+ stksize += 2 * int64(Widthptr)
+ argStart += 2 * int64(Widthptr)
+ }
+
+ // Set receiver (for interface calls).
+ if rcvr != nil {
+ addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
+ ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
+ if testLateExpansion {
+ callArgs = append(callArgs, rcvr)
+ } else {
+ s.store(types.Types[TUINTPTR], addr, rcvr)
+ }
+ }
+
+ // Write args.
+ t := n.Left.Type
+ args := n.Rlist.Slice()
+ if n.Op == OCALLMETH {
+ f := t.Recv()
+ ACArg, arg := s.putArg(args[0], f.Type, argStart+f.Offset, testLateExpansion)
+ ACArgs = append(ACArgs, ACArg)
+ callArgs = append(callArgs, arg)
+ args = args[1:]
+ }
+ for i, n := range args {
+ f := t.Params().Field(i)
+ ACArg, arg := s.putArg(n, f.Type, argStart+f.Offset, testLateExpansion)
+ ACArgs = append(ACArgs, ACArg)
+ callArgs = append(callArgs, arg)
+ }
+
+ callArgs = append(callArgs, s.mem())
+
+ // call target
+ switch {
+ case k == callDefer:
+ aux := ssa.StaticAuxCall(deferproc, ACArgs, ACResults)
+ if testLateExpansion {
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ }
+ case k == callGo:
+ aux := ssa.StaticAuxCall(newproc, ACArgs, ACResults)
+ if testLateExpansion {
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ }
+ case closure != nil:
+ // rawLoad because loading the code pointer from a
+ // closure is always safe, but IsSanitizerSafeAddr
+ // can't always figure that out currently, and it's
+ // critical that we not clobber any arguments already
+ // stored onto the stack.
+ codeptr = s.rawLoad(types.Types[TUINTPTR], closure)
+ if testLateExpansion {
+ aux := ssa.ClosureAuxCall(ACArgs, ACResults)
+ call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, closure, s.mem())
+ }
+ case codeptr != nil:
+ if testLateExpansion {
+ aux := ssa.InterfaceAuxCall(ACArgs, ACResults)
+ call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem())
+ }
+ case sym != nil:
+ if testLateExpansion {
+ aux := ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults)
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ } else {
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults), s.mem())
+ }
+ default:
+ s.Fatalf("bad call type %v %v", n.Op, n)
+ }
+ call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
+ }
+ if testLateExpansion {
+ s.prevCall = call
+ s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ } else {
+ s.vars[&memVar] = call
+ }
+ // Insert OVARLIVE nodes
+ s.stmtList(n.Nbody)
+
+ // Finish block for defers
+ if k == callDefer || k == callDeferStack {
+ b := s.endBlock()
+ b.Kind = ssa.BlockDefer
+ b.SetControl(call)
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bNext)
+ // Add recover edge to exit code.
+ r := s.f.NewBlock(ssa.BlockPlain)
+ s.startBlock(r)
+ s.exit()
+ b.AddEdgeTo(r)
+ b.Likely = ssa.BranchLikely
+ s.startBlock(bNext)
+ }
+
+ if res.NumFields() == 0 || k != callNormal {
+ // call has no return value. Continue with the next statement.
+ return nil
+ }
+ fp := res.Field(0)
+ if returnResultAddr {
+ pt := types.NewPtr(fp.Type)
+ if testLateExpansion {
+ return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call)
+ }
+ return s.constOffPtrSP(pt, fp.Offset+Ctxt.FixedFrameSize())
+ }
+
+ if testLateExpansion {
+ return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
+ }
+ return s.load(n.Type, s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize()))
+}
+
+// maybeNilCheckClosure checks if a nil check of a closure is needed in some
+// architecture-dependent situations and, if so, emits the nil check.
+func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
+ if thearch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo {
+ // On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
+ // TODO(neelance): On other architectures this should be eliminated by the optimization steps
+ s.nilCheck(closure)
+ }
+}
+
+// getMethodClosure returns a value representing the closure for a method call
+func (s *state) getMethodClosure(fn *Node) *ssa.Value {
+ // Make a name n2 for the function.
+ // fn.Sym might be sync.(*Mutex).Unlock.
+ // Make a PFUNC node out of that, then evaluate it.
+ // We get back an SSA value representing &sync.(*Mutex).Unlock·f.
+ // We can then pass that to defer or go.
+ n2 := newnamel(fn.Pos, fn.Sym)
+ n2.Name.Curfn = s.curfn
+ n2.SetClass(PFUNC)
+ // n2.Sym already existed, so it's already marked as a function.
+ n2.Pos = fn.Pos
+ n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
+ return s.expr(n2)
+}
+
+// getClosureAndRcvr returns values for the appropriate closure and receiver of an
+// interface call
+func (s *state) getClosureAndRcvr(fn *Node) (*ssa.Value, *ssa.Value) {
+ i := s.expr(fn.Left)
+ itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i)
+ s.nilCheck(itab)
+ itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
+ closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
+ rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
+ return closure, rcvr
+}
+
+// etypesign returns the signed-ness of e, for integer/pointer etypes.
+// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
+func etypesign(e types.EType) int8 {
+ switch e {
+ case TINT8, TINT16, TINT32, TINT64, TINT:
+ return -1
+ case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
+ return +1
+ }
+ return 0
+}
+
+// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
+// The value that the returned Value represents is guaranteed to be non-nil.
+func (s *state) addr(n *Node) *ssa.Value {
+ if n.Op != ONAME {
+ s.pushLine(n.Pos)
+ defer s.popLine()
+ }
+
+ t := types.NewPtr(n.Type)
+ switch n.Op {
+ case ONAME:
+ switch n.Class() {
+ case PEXTERN:
+ // global variable
+ v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb)
+ // TODO: Make OpAddr use AuxInt as well as Aux.
+ if n.Xoffset != 0 {
+ v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
+ }
+ return v
+ case PPARAM:
+ // parameter slot
+ v := s.decladdrs[n]
+ if v != nil {
+ return v
+ }
+ if n == nodfp {
+ // Special arg that points to the frame pointer (Used by ORECOVER).
+ return s.entryNewValue2A(ssa.OpLocalAddr, t, n, s.sp, s.startmem)
+ }
+ s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
+ return nil
+ case PAUTO:
+ return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !n.IsAutoTmp())
+
+ case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
+ // ensure that we reuse symbols for out parameters so
+ // that cse works on their addresses
+ return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
+ default:
+ s.Fatalf("variable address class %v not implemented", n.Class())
+ return nil
+ }
+ case ORESULT:
+ // load return from callee
+ if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
+ return s.constOffPtrSP(t, n.Xoffset)
+ }
+ which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
+ if which == -1 {
+ // Do the old thing // TODO: Panic instead.
+ return s.constOffPtrSP(t, n.Xoffset)
+ }
+ x := s.newValue1I(ssa.OpSelectNAddr, t, which, s.prevCall)
+ return x
+
+ case OINDEX:
+ if n.Left.Type.IsSlice() {
+ a := s.expr(n.Left)
+ i := s.expr(n.Right)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a)
+ i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
+ p := s.newValue1(ssa.OpSlicePtr, t, a)
+ return s.newValue2(ssa.OpPtrIndex, t, p, i)
+ } else { // array
+ a := s.addr(n.Left)
+ i := s.expr(n.Right)
+ len := s.constInt(types.Types[TINT], n.Left.Type.NumElem())
+ i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
+ return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i)
+ }
+ case ODEREF:
+ return s.exprPtr(n.Left, n.Bounded(), n.Pos)
+ case ODOT:
+ p := s.addr(n.Left)
+ return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
+ case ODOTPTR:
+ p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
+ return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
+ case OCLOSUREVAR:
+ return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
+ s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
+ case OCONVNOP:
+ addr := s.addr(n.Left)
+ return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
+ case OCALLFUNC, OCALLINTER, OCALLMETH:
+ return s.callAddr(n, callNormal)
+ case ODOTTYPE:
+ v, _ := s.dottype(n, false)
+ if v.Op != ssa.OpLoad {
+ s.Fatalf("dottype of non-load")
+ }
+ if v.Args[1] != s.mem() {
+ s.Fatalf("memory no longer live from dottype load")
+ }
+ return v.Args[0]
+ default:
+ s.Fatalf("unhandled addr %v", n.Op)
+ return nil
+ }
+}
+
+// canSSA reports whether n is SSA-able.
+// n must be an ONAME (or an ODOT sequence with an ONAME base).
+func (s *state) canSSA(n *Node) bool {
+ if Debug.N != 0 {
+ return false
+ }
+ for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
+ n = n.Left
+ }
+ if n.Op != ONAME {
+ return false
+ }
+ if n.Name.Addrtaken() {
+ return false
+ }
+ if n.isParamHeapCopy() {
+ return false
+ }
+ if n.Class() == PAUTOHEAP {
+ s.Fatalf("canSSA of PAUTOHEAP %v", n)
+ }
+ switch n.Class() {
+ case PEXTERN:
+ return false
+ case PPARAMOUT:
+ if s.hasdefer {
+ // TODO: handle this case? Named return values must be
+ // in memory so that the deferred function can see them.
+ // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
+ // Or maybe not, see issue 18860. Even unnamed return values
+ // must be written back so if a defer recovers, the caller can see them.
+ return false
+ }
+ if s.cgoUnsafeArgs {
+ // Cgo effectively takes the address of all result args,
+ // but the compiler can't see that.
+ return false
+ }
+ }
+ if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
+ // wrappers generated by genwrapper need to update
+ // the .this pointer in place.
+ // TODO: treat as a PPARAMOUT?
+ return false
+ }
+ return canSSAType(n.Type)
+ // TODO: try to make more variables SSAable?
+}
+
+// canSSA reports whether variables of type t are SSA-able.
+func canSSAType(t *types.Type) bool {
+ dowidth(t)
+ if t.Width > int64(4*Widthptr) {
+ // 4*Widthptr is an arbitrary constant. We want it
+ // to be at least 3*Widthptr so slices can be registerized.
+ // Too big and we'll introduce too much register pressure.
+ return false
+ }
+ switch t.Etype {
+ case TARRAY:
+ // We can't do larger arrays because dynamic indexing is
+ // not supported on SSA variables.
+ // TODO: allow if all indexes are constant.
+ if t.NumElem() <= 1 {
+ return canSSAType(t.Elem())
+ }
+ return false
+ case TSTRUCT:
+ if t.NumFields() > ssa.MaxStruct {
+ return false
+ }
+ for _, t1 := range t.Fields().Slice() {
+ if !canSSAType(t1.Type) {
+ return false
+ }
+ }
+ return true
+ default:
+ return true
+ }
+}
+
+// exprPtr evaluates n to a pointer and nil-checks it.
+func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value {
+ p := s.expr(n)
+ if bounded || n.NonNil() {
+ if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
+ s.f.Warnl(lineno, "removed nil check")
+ }
+ return p
+ }
+ s.nilCheck(p)
+ return p
+}
+
+// nilCheck generates nil pointer checking code.
+// Used only for automatically inserted nil checks,
+// not for user code like 'x != nil'.
+func (s *state) nilCheck(ptr *ssa.Value) {
+ if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() {
+ return
+ }
+ s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
+}
+
+// boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
+// Starts a new block on return.
+// On input, len must be converted to full int width and be nonnegative.
+// Returns idx converted to full int width.
+// If bounded is true then caller guarantees the index is not out of bounds
+// (but boundsCheck will still extend the index to full int width).
+func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
+ idx = s.extendIndex(idx, len, kind, bounded)
+
+ if bounded || Debug.B != 0 {
+ // If bounded or bounds checking is flag-disabled, then no check necessary,
+ // just return the extended index.
+ //
+ // Here, bounded == true if the compiler generated the index itself,
+ // such as in the expansion of a slice initializer. These indexes are
+ // compiler-generated, not Go program variables, so they cannot be
+ // attacker-controlled, so we can omit Spectre masking as well.
+ //
+ // Note that we do not want to omit Spectre masking in code like:
+ //
+ // if 0 <= i && i < len(x) {
+ // use(x[i])
+ // }
+ //
+ // Lucky for us, bounded==false for that code.
+ // In that case (handled below), we emit a bound check (and Spectre mask)
+ // and then the prove pass will remove the bounds check.
+ // In theory the prove pass could potentially remove certain
+ // Spectre masks, but it's very delicate and probably better
+ // to be conservative and leave them all in.
+ return idx
+ }
+
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ bPanic := s.f.NewBlock(ssa.BlockExit)
+
+ if !idx.Type.IsSigned() {
+ switch kind {
+ case ssa.BoundsIndex:
+ kind = ssa.BoundsIndexU
+ case ssa.BoundsSliceAlen:
+ kind = ssa.BoundsSliceAlenU
+ case ssa.BoundsSliceAcap:
+ kind = ssa.BoundsSliceAcapU
+ case ssa.BoundsSliceB:
+ kind = ssa.BoundsSliceBU
+ case ssa.BoundsSlice3Alen:
+ kind = ssa.BoundsSlice3AlenU
+ case ssa.BoundsSlice3Acap:
+ kind = ssa.BoundsSlice3AcapU
+ case ssa.BoundsSlice3B:
+ kind = ssa.BoundsSlice3BU
+ case ssa.BoundsSlice3C:
+ kind = ssa.BoundsSlice3CU
+ }
+ }
+
+ var cmp *ssa.Value
+ if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
+ cmp = s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len)
+ } else {
+ cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len)
+ }
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+ b.AddEdgeTo(bNext)
+ b.AddEdgeTo(bPanic)
+
+ s.startBlock(bPanic)
+ if thearch.LinkArch.Family == sys.Wasm {
+ // TODO(khr): figure out how to do "register" based calling convention for bounds checks.
+ // Should be similar to gcWriteBarrier, but I can't make it work.
+ s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
+ } else {
+ mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem())
+ s.endBlock().SetControl(mem)
+ }
+ s.startBlock(bNext)
+
+ // In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
+ if spectreIndex {
+ op := ssa.OpSpectreIndex
+ if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
+ op = ssa.OpSpectreSliceIndex
+ }
+ idx = s.newValue2(op, types.Types[TINT], idx, len)
+ }
+
+ return idx
+}
+
+// If cmp (a bool) is false, panic using the given function.
+func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ line := s.peekPos()
+ pos := Ctxt.PosTable.Pos(line)
+ fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
+ bPanic := s.panics[fl]
+ if bPanic == nil {
+ bPanic = s.f.NewBlock(ssa.BlockPlain)
+ s.panics[fl] = bPanic
+ s.startBlock(bPanic)
+ // The panic call takes/returns memory to ensure that the right
+ // memory state is observed if the panic happens.
+ s.rtcall(fn, false, nil)
+ }
+ b.AddEdgeTo(bNext)
+ b.AddEdgeTo(bPanic)
+ s.startBlock(bNext)
+}
+
+func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
+ needcheck := true
+ switch b.Op {
+ case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
+ if b.AuxInt != 0 {
+ needcheck = false
+ }
+ }
+ if needcheck {
+ // do a size-appropriate check for zero
+ cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type))
+ s.check(cmp, panicdivide)
+ }
+ return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+}
+
+// rtcall issues a call to the given runtime function fn with the listed args.
+// Returns a slice of results of the given result types.
+// The call is added to the end of the current block.
+// If returns is false, the block is marked as an exit block.
+func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
+ s.prevCall = nil
+ // Write args to the stack
+ off := Ctxt.FixedFrameSize()
+ testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
+ var ACArgs []ssa.Param
+ var ACResults []ssa.Param
+ var callArgs []*ssa.Value
+
+ for _, arg := range args {
+ t := arg.Type
+ off = Rnd(off, t.Alignment())
+ size := t.Size()
+ ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)})
+ if testLateExpansion {
+ callArgs = append(callArgs, arg)
+ } else {
+ ptr := s.constOffPtrSP(t.PtrTo(), off)
+ s.store(t, ptr, arg)
+ }
+ off += size
+ }
+ off = Rnd(off, int64(Widthreg))
+
+ // Accumulate results types and offsets
+ offR := off
+ for _, t := range results {
+ offR = Rnd(offR, t.Alignment())
+ ACResults = append(ACResults, ssa.Param{Type: t, Offset: int32(offR)})
+ offR += t.Size()
+ }
+
+ // Issue call
+ var call *ssa.Value
+ aux := ssa.StaticAuxCall(fn, ACArgs, ACResults)
+ if testLateExpansion {
+ callArgs = append(callArgs, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ } else {
+ call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
+ s.vars[&memVar] = call
+ }
+
+ if !returns {
+ // Finish block
+ b := s.endBlock()
+ b.Kind = ssa.BlockExit
+ b.SetControl(call)
+ call.AuxInt = off - Ctxt.FixedFrameSize()
+ if len(results) > 0 {
+ s.Fatalf("panic call can't have results")
+ }
+ return nil
+ }
+
+ // Load results
+ res := make([]*ssa.Value, len(results))
+ if testLateExpansion {
+ for i, t := range results {
+ off = Rnd(off, t.Alignment())
+ if canSSAType(t) {
+ res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call)
+ } else {
+ addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), int64(i), call)
+ res[i] = s.rawLoad(t, addr)
+ }
+ off += t.Size()
+ }
+ } else {
+ for i, t := range results {
+ off = Rnd(off, t.Alignment())
+ ptr := s.constOffPtrSP(types.NewPtr(t), off)
+ res[i] = s.load(t, ptr)
+ off += t.Size()
+ }
+ }
+ off = Rnd(off, int64(Widthptr))
+
+ // Remember how much callee stack space we needed.
+ call.AuxInt = off
+
+ return res
+}
+
+// do *left = right for type t.
+func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
+ s.instrument(t, left, instrumentWrite)
+
+ if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
+ // Known to not have write barrier. Store the whole type.
+ s.vars[&memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
+ return
+ }
+
+ // store scalar fields first, so write barrier stores for
+ // pointer fields can be grouped together, and scalar values
+ // don't need to be live across the write barrier call.
+ // TODO: if the writebarrier pass knows how to reorder stores,
+ // we can do a single store here as long as skip==0.
+ s.storeTypeScalars(t, left, right, skip)
+ if skip&skipPtr == 0 && t.HasPointers() {
+ s.storeTypePtrs(t, left, right)
+ }
+}
+
+// do *left = right for all scalar (non-pointer) parts of t.
+func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
+ switch {
+ case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
+ s.store(t, left, right)
+ case t.IsPtrShaped():
+ if t.IsPtr() && t.Elem().NotInHeap() {
+ s.store(t, left, right) // see issue 42032
+ }
+ // otherwise, no scalar fields.
+ case t.IsString():
+ if skip&skipLen != 0 {
+ return
+ }
+ len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right)
+ lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
+ s.store(types.Types[TINT], lenAddr, len)
+ case t.IsSlice():
+ if skip&skipLen == 0 {
+ len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right)
+ lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
+ s.store(types.Types[TINT], lenAddr, len)
+ }
+ if skip&skipCap == 0 {
+ cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right)
+ capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
+ s.store(types.Types[TINT], capAddr, cap)
+ }
+ case t.IsInterface():
+ // itab field doesn't need a write barrier (even though it is a pointer).
+ itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
+ s.store(types.Types[TUINTPTR], left, itab)
+ case t.IsStruct():
+ n := t.NumFields()
+ for i := 0; i < n; i++ {
+ ft := t.FieldType(i)
+ addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
+ val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
+ s.storeTypeScalars(ft, addr, val, 0)
+ }
+ case t.IsArray() && t.NumElem() == 0:
+ // nothing
+ case t.IsArray() && t.NumElem() == 1:
+ s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
+ default:
+ s.Fatalf("bad write barrier type %v", t)
+ }
+}
+
+// do *left = right for all pointer parts of t.
+func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
+ switch {
+ case t.IsPtrShaped():
+ if t.IsPtr() && t.Elem().NotInHeap() {
+ break // see issue 42032
+ }
+ s.store(t, left, right)
+ case t.IsString():
+ ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
+ s.store(s.f.Config.Types.BytePtr, left, ptr)
+ case t.IsSlice():
+ elType := types.NewPtr(t.Elem())
+ ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
+ s.store(elType, left, ptr)
+ case t.IsInterface():
+ // itab field is treated as a scalar.
+ idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
+ idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
+ s.store(s.f.Config.Types.BytePtr, idataAddr, idata)
+ case t.IsStruct():
+ n := t.NumFields()
+ for i := 0; i < n; i++ {
+ ft := t.FieldType(i)
+ if !ft.HasPointers() {
+ continue
+ }
+ addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
+ val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
+ s.storeTypePtrs(ft, addr, val)
+ }
+ case t.IsArray() && t.NumElem() == 0:
+ // nothing
+ case t.IsArray() && t.NumElem() == 1:
+ s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
+ default:
+ s.Fatalf("bad write barrier type %v", t)
+ }
+}
+
+// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param for the call.
+// If forLateExpandedCall is true, it returns the argument value to pass to the call operation.
+// If forLateExpandedCall is false, then the value is stored at the specified stack offset, and the returned value is nil.
+func (s *state) putArg(n *Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
+ var a *ssa.Value
+ if forLateExpandedCall {
+ if !canSSAType(t) {
+ a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
+ } else {
+ a = s.expr(n)
+ }
+ } else {
+ s.storeArgWithBase(n, t, s.sp, off)
+ }
+ return ssa.Param{Type: t, Offset: int32(off)}, a
+}
+
+func (s *state) storeArgWithBase(n *Node, t *types.Type, base *ssa.Value, off int64) {
+ pt := types.NewPtr(t)
+ var addr *ssa.Value
+ if base == s.sp {
+ // Use special routine that avoids allocation on duplicate offsets.
+ addr = s.constOffPtrSP(pt, off)
+ } else {
+ addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
+ }
+
+ if !canSSAType(t) {
+ a := s.addr(n)
+ s.move(t, addr, a)
+ return
+ }
+
+ a := s.expr(n)
+ s.storeType(t, addr, a, 0, false)
+}
+
+// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
+// i,j,k may be nil, in which case they are set to their default value.
+// v may be a slice, string or pointer to an array.
+func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
+ t := v.Type
+ var ptr, len, cap *ssa.Value
+ switch {
+ case t.IsSlice():
+ ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
+ len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v)
+ cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v)
+ case t.IsString():
+ ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[TUINT8]), v)
+ len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v)
+ cap = len
+ case t.IsPtr():
+ if !t.Elem().IsArray() {
+ s.Fatalf("bad ptr to array in slice %v\n", t)
+ }
+ s.nilCheck(v)
+ ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
+ len = s.constInt(types.Types[TINT], t.Elem().NumElem())
+ cap = len
+ default:
+ s.Fatalf("bad type in slice %v\n", t)
+ }
+
+ // Set default values
+ if i == nil {
+ i = s.constInt(types.Types[TINT], 0)
+ }
+ if j == nil {
+ j = len
+ }
+ three := true
+ if k == nil {
+ three = false
+ k = cap
+ }
+
+ // Panic if slice indices are not in bounds.
+ // Make sure we check these in reverse order so that we're always
+ // comparing against a value known to be nonnegative. See issue 28797.
+ if three {
+ if k != cap {
+ kind := ssa.BoundsSlice3Alen
+ if t.IsSlice() {
+ kind = ssa.BoundsSlice3Acap
+ }
+ k = s.boundsCheck(k, cap, kind, bounded)
+ }
+ if j != k {
+ j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded)
+ }
+ i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded)
+ } else {
+ if j != k {
+ kind := ssa.BoundsSliceAlen
+ if t.IsSlice() {
+ kind = ssa.BoundsSliceAcap
+ }
+ j = s.boundsCheck(j, k, kind, bounded)
+ }
+ i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded)
+ }
+
+ // Word-sized integer operations.
+ subOp := s.ssaOp(OSUB, types.Types[TINT])
+ mulOp := s.ssaOp(OMUL, types.Types[TINT])
+ andOp := s.ssaOp(OAND, types.Types[TINT])
+
+ // Calculate the length (rlen) and capacity (rcap) of the new slice.
+ // For strings the capacity of the result is unimportant. However,
+ // we use rcap to test if we've generated a zero-length slice.
+ // Use length of strings for that.
+ rlen := s.newValue2(subOp, types.Types[TINT], j, i)
+ rcap := rlen
+ if j != k && !t.IsString() {
+ rcap = s.newValue2(subOp, types.Types[TINT], k, i)
+ }
+
+ if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
+ // No pointer arithmetic necessary.
+ return ptr, rlen, rcap
+ }
+
+ // Calculate the base pointer (rptr) for the new slice.
+ //
+ // Generate the following code assuming that indexes are in bounds.
+ // The masking is to make sure that we don't generate a slice
+ // that points to the next object in memory. We cannot just set
+ // the pointer to nil because then we would create a nil slice or
+ // string.
+ //
+ // rcap = k - i
+ // rlen = j - i
+ // rptr = ptr + (mask(rcap) & (i * stride))
+ //
+ // Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
+ // of the element type.
+ stride := s.constInt(types.Types[TINT], ptr.Type.Elem().Width)
+
+ // The delta is the number of bytes to offset ptr by.
+ delta := s.newValue2(mulOp, types.Types[TINT], i, stride)
+
+ // If we're slicing to the point where the capacity is zero,
+ // zero out the delta.
+ mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap)
+ delta = s.newValue2(andOp, types.Types[TINT], delta, mask)
+
+ // Compute rptr = ptr + delta.
+ rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
+
+ return rptr, rlen, rcap
+}
+
+type u642fcvtTab struct {
+ leq, cvt2F, and, rsh, or, add ssa.Op
+ one func(*state, *types.Type, int64) *ssa.Value
+}
+
+var u64_f64 = u642fcvtTab{
+ leq: ssa.OpLeq64,
+ cvt2F: ssa.OpCvt64to64F,
+ and: ssa.OpAnd64,
+ rsh: ssa.OpRsh64Ux64,
+ or: ssa.OpOr64,
+ add: ssa.OpAdd64F,
+ one: (*state).constInt64,
+}
+
+var u64_f32 = u642fcvtTab{
+ leq: ssa.OpLeq64,
+ cvt2F: ssa.OpCvt64to32F,
+ and: ssa.OpAnd64,
+ rsh: ssa.OpRsh64Ux64,
+ or: ssa.OpOr64,
+ add: ssa.OpAdd32F,
+ one: (*state).constInt64,
+}
+
+func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
+}
+
+func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
+}
+
+func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ // if x >= 0 {
+ // result = (floatY) x
+ // } else {
+ // y = uintX(x) ; y = x & 1
+ // z = uintX(x) ; z = z >> 1
+ // z = z >> 1
+ // z = z | y
+ // result = floatY(z)
+ // result = result + result
+ // }
+ //
+ // Code borrowed from old code generator.
+ // What's going on: large 64-bit "unsigned" looks like
+ // negative number to hardware's integer-to-float
+ // conversion. However, because the mantissa is only
+ // 63 bits, we don't need the LSB, so instead we do an
+ // unsigned right shift (divide by two), convert, and
+ // double. However, before we do that, we need to be
+ // sure that we do not lose a "1" if that made the
+ // difference in the resulting rounding. Therefore, we
+ // preserve it, and OR (not ADD) it back in. The case
+ // that matters is when the eleven discarded bits are
+ // equal to 10000000001; that rounds up, and the 1 cannot
+ // be lost else it would round down if the LSB of the
+ // candidate mantissa is 0.
+ cmp := s.newValue2(cvttab.leq, types.Types[TBOOL], s.zeroVal(ft), x)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bElse := s.f.NewBlock(ssa.BlockPlain)
+ bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+ b.AddEdgeTo(bThen)
+ s.startBlock(bThen)
+ a0 := s.newValue1(cvttab.cvt2F, tt, x)
+ s.vars[n] = a0
+ s.endBlock()
+ bThen.AddEdgeTo(bAfter)
+
+ b.AddEdgeTo(bElse)
+ s.startBlock(bElse)
+ one := cvttab.one(s, ft, 1)
+ y := s.newValue2(cvttab.and, ft, x, one)
+ z := s.newValue2(cvttab.rsh, ft, x, one)
+ z = s.newValue2(cvttab.or, ft, z, y)
+ a := s.newValue1(cvttab.cvt2F, tt, z)
+ a1 := s.newValue2(cvttab.add, tt, a, a)
+ s.vars[n] = a1
+ s.endBlock()
+ bElse.AddEdgeTo(bAfter)
+
+ s.startBlock(bAfter)
+ return s.variable(n, n.Type)
+}
+
+type u322fcvtTab struct {
+ cvtI2F, cvtF2F ssa.Op
+}
+
+var u32_f64 = u322fcvtTab{
+ cvtI2F: ssa.OpCvt32to64F,
+ cvtF2F: ssa.OpCopy,
+}
+
+var u32_f32 = u322fcvtTab{
+ cvtI2F: ssa.OpCvt32to32F,
+ cvtF2F: ssa.OpCvt64Fto32F,
+}
+
+func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
+}
+
+func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
+}
+
+func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ // if x >= 0 {
+ // result = floatY(x)
+ // } else {
+ // result = floatY(float64(x) + (1<<32))
+ // }
+ cmp := s.newValue2(ssa.OpLeq32, types.Types[TBOOL], s.zeroVal(ft), x)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bElse := s.f.NewBlock(ssa.BlockPlain)
+ bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+ b.AddEdgeTo(bThen)
+ s.startBlock(bThen)
+ a0 := s.newValue1(cvttab.cvtI2F, tt, x)
+ s.vars[n] = a0
+ s.endBlock()
+ bThen.AddEdgeTo(bAfter)
+
+ b.AddEdgeTo(bElse)
+ s.startBlock(bElse)
+ a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x)
+ twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32))
+ a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32)
+ a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
+
+ s.vars[n] = a3
+ s.endBlock()
+ bElse.AddEdgeTo(bAfter)
+
+ s.startBlock(bAfter)
+ return s.variable(n, n.Type)
+}
+
+// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
+func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
+ if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
+ s.Fatalf("node must be a map or a channel")
+ }
+ // if n == nil {
+ // return 0
+ // } else {
+ // // len
+ // return *((*int)n)
+ // // cap
+ // return *(((*int)n)+1)
+ // }
+ lenType := n.Type
+ nilValue := s.constNil(types.Types[TUINTPTR])
+ cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchUnlikely
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bElse := s.f.NewBlock(ssa.BlockPlain)
+ bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+ // length/capacity of a nil map/chan is zero
+ b.AddEdgeTo(bThen)
+ s.startBlock(bThen)
+ s.vars[n] = s.zeroVal(lenType)
+ s.endBlock()
+ bThen.AddEdgeTo(bAfter)
+
+ b.AddEdgeTo(bElse)
+ s.startBlock(bElse)
+ switch n.Op {
+ case OLEN:
+ // length is stored in the first word for map/chan
+ s.vars[n] = s.load(lenType, x)
+ case OCAP:
+ // capacity is stored in the second word for chan
+ sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
+ s.vars[n] = s.load(lenType, sw)
+ default:
+ s.Fatalf("op must be OLEN or OCAP")
+ }
+ s.endBlock()
+ bElse.AddEdgeTo(bAfter)
+
+ s.startBlock(bAfter)
+ return s.variable(n, lenType)
+}
+
+type f2uCvtTab struct {
+ ltf, cvt2U, subf, or ssa.Op
+ floatValue func(*state, *types.Type, float64) *ssa.Value
+ intValue func(*state, *types.Type, int64) *ssa.Value
+ cutoff uint64
+}
+
+var f32_u64 = f2uCvtTab{
+ ltf: ssa.OpLess32F,
+ cvt2U: ssa.OpCvt32Fto64,
+ subf: ssa.OpSub32F,
+ or: ssa.OpOr64,
+ floatValue: (*state).constFloat32,
+ intValue: (*state).constInt64,
+ cutoff: 1 << 63,
+}
+
+var f64_u64 = f2uCvtTab{
+ ltf: ssa.OpLess64F,
+ cvt2U: ssa.OpCvt64Fto64,
+ subf: ssa.OpSub64F,
+ or: ssa.OpOr64,
+ floatValue: (*state).constFloat64,
+ intValue: (*state).constInt64,
+ cutoff: 1 << 63,
+}
+
+var f32_u32 = f2uCvtTab{
+ ltf: ssa.OpLess32F,
+ cvt2U: ssa.OpCvt32Fto32,
+ subf: ssa.OpSub32F,
+ or: ssa.OpOr32,
+ floatValue: (*state).constFloat32,
+ intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
+ cutoff: 1 << 31,
+}
+
+var f64_u32 = f2uCvtTab{
+ ltf: ssa.OpLess64F,
+ cvt2U: ssa.OpCvt64Fto32,
+ subf: ssa.OpSub64F,
+ or: ssa.OpOr32,
+ floatValue: (*state).constFloat64,
+ intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
+ cutoff: 1 << 31,
+}
+
+func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.floatToUint(&f32_u64, n, x, ft, tt)
+}
+func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.floatToUint(&f64_u64, n, x, ft, tt)
+}
+
+func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.floatToUint(&f32_u32, n, x, ft, tt)
+}
+
+func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.floatToUint(&f64_u32, n, x, ft, tt)
+}
+
+func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ // cutoff:=1<<(intY_Size-1)
+ // if x < floatX(cutoff) {
+ // result = uintY(x)
+ // } else {
+ // y = x - floatX(cutoff)
+ // z = uintY(y)
+ // result = z | -(cutoff)
+ // }
+ cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
+ cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bElse := s.f.NewBlock(ssa.BlockPlain)
+ bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+ b.AddEdgeTo(bThen)
+ s.startBlock(bThen)
+ a0 := s.newValue1(cvttab.cvt2U, tt, x)
+ s.vars[n] = a0
+ s.endBlock()
+ bThen.AddEdgeTo(bAfter)
+
+ b.AddEdgeTo(bElse)
+ s.startBlock(bElse)
+ y := s.newValue2(cvttab.subf, ft, x, cutoff)
+ y = s.newValue1(cvttab.cvt2U, tt, y)
+ z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
+ a1 := s.newValue2(cvttab.or, tt, y, z)
+ s.vars[n] = a1
+ s.endBlock()
+ bElse.AddEdgeTo(bAfter)
+
+ s.startBlock(bAfter)
+ return s.variable(n, n.Type)
+}
+
+// dottype generates SSA for a type assertion node.
+// commaok indicates whether to panic or return a bool.
+// If commaok is false, resok will be nil.
+func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
+ iface := s.expr(n.Left) // input interface
+ target := s.expr(n.Right) // target type
+ byteptr := s.f.Config.Types.BytePtr
+
+ if n.Type.IsInterface() {
+ if n.Type.IsEmptyInterface() {
+ // Converting to an empty interface.
+ // Input could be an empty or nonempty interface.
+ if Debug_typeassert > 0 {
+ Warnl(n.Pos, "type assertion inlined")
+ }
+
+ // Get itab/type field from input.
+ itab := s.newValue1(ssa.OpITab, byteptr, iface)
+ // Conversion succeeds iff that field is not nil.
+ cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr))
+
+ if n.Left.Type.IsEmptyInterface() && commaok {
+ // Converting empty interface to empty interface with ,ok is just a nil check.
+ return iface, cond
+ }
+
+ // Branch on nilness.
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cond)
+ b.Likely = ssa.BranchLikely
+ bOk := s.f.NewBlock(ssa.BlockPlain)
+ bFail := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bOk)
+ b.AddEdgeTo(bFail)
+
+ if !commaok {
+ // On failure, panic by calling panicnildottype.
+ s.startBlock(bFail)
+ s.rtcall(panicnildottype, false, nil, target)
+
+ // On success, return (perhaps modified) input interface.
+ s.startBlock(bOk)
+ if n.Left.Type.IsEmptyInterface() {
+ res = iface // Use input interface unchanged.
+ return
+ }
+ // Load type out of itab, build interface with existing idata.
+ off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
+ typ := s.load(byteptr, off)
+ idata := s.newValue1(ssa.OpIData, byteptr, iface)
+ res = s.newValue2(ssa.OpIMake, n.Type, typ, idata)
+ return
+ }
+
+ s.startBlock(bOk)
+ // nonempty -> empty
+ // Need to load type from itab
+ off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
+ s.vars[&typVar] = s.load(byteptr, off)
+ s.endBlock()
+
+ // itab is nil, might as well use that as the nil result.
+ s.startBlock(bFail)
+ s.vars[&typVar] = itab
+ s.endBlock()
+
+ // Merge point.
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ bOk.AddEdgeTo(bEnd)
+ bFail.AddEdgeTo(bEnd)
+ s.startBlock(bEnd)
+ idata := s.newValue1(ssa.OpIData, byteptr, iface)
+ res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata)
+ resok = cond
+ delete(s.vars, &typVar)
+ return
+ }
+ // converting to a nonempty interface needs a runtime call.
+ if Debug_typeassert > 0 {
+ Warnl(n.Pos, "type assertion not inlined")
+ }
+ if n.Left.Type.IsEmptyInterface() {
+ if commaok {
+ call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
+ return call[0], call[1]
+ }
+ return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil
+ }
+ if commaok {
+ call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
+ return call[0], call[1]
+ }
+ return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil
+ }
+
+ if Debug_typeassert > 0 {
+ Warnl(n.Pos, "type assertion inlined")
+ }
+
+ // Converting to a concrete type.
+ direct := isdirectiface(n.Type)
+ itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
+ if Debug_typeassert > 0 {
+ Warnl(n.Pos, "type assertion inlined")
+ }
+ var targetITab *ssa.Value
+ if n.Left.Type.IsEmptyInterface() {
+ // Looking for pointer to target type.
+ targetITab = target
+ } else {
+ // Looking for pointer to itab for target type and source interface.
+ targetITab = s.expr(n.List.First())
+ }
+
+ var tmp *Node // temporary for use with large types
+ var addr *ssa.Value // address of tmp
+ if commaok && !canSSAType(n.Type) {
+ // unSSAable type, use temporary.
+ // TODO: get rid of some of these temporaries.
+ tmp = tempAt(n.Pos, s.curfn, n.Type)
+ s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
+ addr = s.addr(tmp)
+ }
+
+ cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cond)
+ b.Likely = ssa.BranchLikely
+
+ bOk := s.f.NewBlock(ssa.BlockPlain)
+ bFail := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bOk)
+ b.AddEdgeTo(bFail)
+
+ if !commaok {
+ // on failure, panic by calling panicdottype
+ s.startBlock(bFail)
+ taddr := s.expr(n.Right.Right)
+ if n.Left.Type.IsEmptyInterface() {
+ s.rtcall(panicdottypeE, false, nil, itab, target, taddr)
+ } else {
+ s.rtcall(panicdottypeI, false, nil, itab, target, taddr)
+ }
+
+ // on success, return data from interface
+ s.startBlock(bOk)
+ if direct {
+ return s.newValue1(ssa.OpIData, n.Type, iface), nil
+ }
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
+ return s.load(n.Type, p), nil
+ }
+
+ // commaok is the more complicated case because we have
+ // a control flow merge point.
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ // Note that we need a new valVar each time (unlike okVar where we can
+ // reuse the variable) because it might have a different type every time.
+ valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}}
+
+ // type assertion succeeded
+ s.startBlock(bOk)
+ if tmp == nil {
+ if direct {
+ s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
+ } else {
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
+ s.vars[valVar] = s.load(n.Type, p)
+ }
+ } else {
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
+ s.move(n.Type, addr, p)
+ }
+ s.vars[&okVar] = s.constBool(true)
+ s.endBlock()
+ bOk.AddEdgeTo(bEnd)
+
+ // type assertion failed
+ s.startBlock(bFail)
+ if tmp == nil {
+ s.vars[valVar] = s.zeroVal(n.Type)
+ } else {
+ s.zero(n.Type, addr)
+ }
+ s.vars[&okVar] = s.constBool(false)
+ s.endBlock()
+ bFail.AddEdgeTo(bEnd)
+
+ // merge point
+ s.startBlock(bEnd)
+ if tmp == nil {
+ res = s.variable(valVar, n.Type)
+ delete(s.vars, valVar)
+ } else {
+ res = s.load(n.Type, addr)
+ s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
+ }
+ resok = s.variable(&okVar, types.Types[TBOOL])
+ delete(s.vars, &okVar)
+ return res, resok
+}
+
+// variable returns the value of a variable at the current location.
+func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
+ v := s.vars[name]
+ if v != nil {
+ return v
+ }
+ v = s.fwdVars[name]
+ if v != nil {
+ return v
+ }
+
+ if s.curBlock == s.f.Entry {
+ // No variable should be live at entry.
+ s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v)
+ }
+ // Make a FwdRef, which records a value that's live on block input.
+ // We'll find the matching definition as part of insertPhis.
+ v = s.newValue0A(ssa.OpFwdRef, t, name)
+ s.fwdVars[name] = v
+ s.addNamedValue(name, v)
+ return v
+}
+
+func (s *state) mem() *ssa.Value {
+ return s.variable(&memVar, types.TypeMem)
+}
+
+func (s *state) addNamedValue(n *Node, v *ssa.Value) {
+ if n.Class() == Pxxx {
+ // Don't track our dummy nodes (&memVar etc.).
+ return
+ }
+ if n.IsAutoTmp() {
+ // Don't track temporary variables.
+ return
+ }
+ if n.Class() == PPARAMOUT {
+ // Don't track named output values. This prevents return values
+ // from being assigned too early. See #14591 and #14762. TODO: allow this.
+ return
+ }
+ if n.Class() == PAUTO && n.Xoffset != 0 {
+ s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset)
+ }
+ loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
+ values, ok := s.f.NamedValues[loc]
+ if !ok {
+ s.f.Names = append(s.f.Names, loc)
+ }
+ s.f.NamedValues[loc] = append(values, v)
+}
+
+// Generate a disconnected call to a runtime routine and a return.
+func gencallret(pp *Progs, sym *obj.LSym) *obj.Prog {
+ p := pp.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = sym
+ p = pp.Prog(obj.ARET)
+ return p
+}
+
+// Branch is an unresolved branch.
+type Branch struct {
+ P *obj.Prog // branch instruction
+ B *ssa.Block // target
+}
+
+// SSAGenState contains state needed during Prog generation.
+type SSAGenState struct {
+ pp *Progs
+
+ // Branches remembers all the branch instructions we've seen
+ // and where they would like to go.
+ Branches []Branch
+
+ // bstart remembers where each block starts (indexed by block ID)
+ bstart []*obj.Prog
+
+ // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8.
+ ScratchFpMem *Node
+
+ maxarg int64 // largest frame size for arguments to calls made by the function
+
+ // Map from GC safe points to liveness index, generated by
+ // liveness analysis.
+ livenessMap LivenessMap
+
+ // lineRunStart records the beginning of the current run of instructions
+ // within a single block sharing the same line number
+ // Used to move statement marks to the beginning of such runs.
+ lineRunStart *obj.Prog
+
+ // wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
+ OnWasmStackSkipped int
+}
+
+// Prog appends a new Prog.
+func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
+ p := s.pp.Prog(as)
+ if ssa.LosesStmtMark(as) {
+ return p
+ }
+ // Float a statement start to the beginning of any same-line run.
+ // lineRunStart is reset at block boundaries, which appears to work well.
+ if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() {
+ s.lineRunStart = p
+ } else if p.Pos.IsStmt() == src.PosIsStmt {
+ s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt()
+ p.Pos = p.Pos.WithNotStmt()
+ }
+ return p
+}
+
+// Pc returns the current Prog.
+func (s *SSAGenState) Pc() *obj.Prog {
+ return s.pp.next
+}
+
+// SetPos sets the current source position.
+func (s *SSAGenState) SetPos(pos src.XPos) {
+ s.pp.pos = pos
+}
+
+// Br emits a single branch instruction and returns the instruction.
+// Not all architectures need the returned instruction, but otherwise
+// the boilerplate is common to all.
+func (s *SSAGenState) Br(op obj.As, target *ssa.Block) *obj.Prog {
+ p := s.Prog(op)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, Branch{P: p, B: target})
+ return p
+}
+
+// DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics
+// that reduce "jumpy" line number churn when debugging.
+// Spill/fill/copy instructions from the register allocator,
+// phi functions, and instructions with a no-pos position
+// are examples of instructions that can cause churn.
+func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
+ // These are not statements
+ s.SetPos(v.Pos.WithNotStmt())
+ default:
+ p := v.Pos
+ if p != src.NoXPos {
+ // If the position is defined, update the position.
+ // Also convert default IsStmt to NotStmt; only
+ // explicit statement boundaries should appear
+ // in the generated code.
+ if p.IsStmt() != src.PosIsStmt {
+ p = p.WithNotStmt()
+ // Calls use the pos attached to v, but copy the statement mark from SSAGenState
+ }
+ s.SetPos(p)
+ } else {
+ s.SetPos(s.pp.pos.WithNotStmt())
+ }
+ }
+}
+
+// byXoffset implements sort.Interface for []*Node using Xoffset as the ordering.
+type byXoffset []*Node
+
+func (s byXoffset) Len() int { return len(s) }
+func (s byXoffset) Less(i, j int) bool { return s[i].Xoffset < s[j].Xoffset }
+func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func emitStackObjects(e *ssafn, pp *Progs) {
+ var vars []*Node
+ for _, n := range e.curfn.Func.Dcl {
+ if livenessShouldTrack(n) && n.Name.Addrtaken() {
+ vars = append(vars, n)
+ }
+ }
+ if len(vars) == 0 {
+ return
+ }
+
+ // Sort variables from lowest to highest address.
+ sort.Sort(byXoffset(vars))
+
+ // Populate the stack object data.
+ // Format must match runtime/stack.go:stackObjectRecord.
+ x := e.curfn.Func.lsym.Func().StackObjects
+ off := 0
+ off = duintptr(x, off, uint64(len(vars)))
+ for _, v := range vars {
+ // Note: arguments and return values have non-negative Xoffset,
+ // in which case the offset is relative to argp.
+ // Locals have a negative Xoffset, in which case the offset is relative to varp.
+ off = duintptr(x, off, uint64(v.Xoffset))
+ if !typesym(v.Type).Siggen() {
+ e.Fatalf(v.Pos, "stack object's type symbol not generated for type %s", v.Type)
+ }
+ off = dsymptr(x, off, dtypesym(v.Type), 0)
+ }
+
+ // Emit a funcdata pointing at the stack object data.
+ p := pp.Prog(obj.AFUNCDATA)
+ Addrconst(&p.From, objabi.FUNCDATA_StackObjects)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = x
+
+ if debuglive != 0 {
+ for _, v := range vars {
+ Warnl(v.Pos, "stack object %v %s", v, v.Type.String())
+ }
+ }
+}
+
+// genssa appends entries to pp for each instruction in f.
+func genssa(f *ssa.Func, pp *Progs) {
+ var s SSAGenState
+
+ e := f.Frontend().(*ssafn)
+
+ s.livenessMap = liveness(e, f, pp)
+ emitStackObjects(e, pp)
+
+ openDeferInfo := e.curfn.Func.lsym.Func().OpenCodedDeferInfo
+ if openDeferInfo != nil {
+ // This function uses open-coded defers -- write out the funcdata
+ // info that we computed at the end of genssa.
+ p := pp.Prog(obj.AFUNCDATA)
+ Addrconst(&p.From, objabi.FUNCDATA_OpenCodedDeferInfo)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = openDeferInfo
+ }
+
+ // Remember where each block starts.
+ s.bstart = make([]*obj.Prog, f.NumBlocks())
+ s.pp = pp
+ var progToValue map[*obj.Prog]*ssa.Value
+ var progToBlock map[*obj.Prog]*ssa.Block
+ var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
+ if f.PrintOrHtmlSSA {
+ progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
+ progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
+ f.Logf("genssa %s\n", f.Name)
+ progToBlock[s.pp.next] = f.Blocks[0]
+ }
+
+ s.ScratchFpMem = e.scratchFpMem
+
+ if Ctxt.Flag_locationlists {
+ if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
+ f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
+ }
+ valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()]
+ for i := range valueToProgAfter {
+ valueToProgAfter[i] = nil
+ }
+ }
+
+ // If the very first instruction is not tagged as a statement,
+ // debuggers may attribute it to previous function in program.
+ firstPos := src.NoXPos
+ for _, v := range f.Entry.Values {
+ if v.Pos.IsStmt() == src.PosIsStmt {
+ firstPos = v.Pos
+ v.Pos = firstPos.WithDefaultStmt()
+ break
+ }
+ }
+
+ // inlMarks has an entry for each Prog that implements an inline mark.
+ // It maps from that Prog to the global inlining id of the inlined body
+ // which should unwind to this Prog's location.
+ var inlMarks map[*obj.Prog]int32
+ var inlMarkList []*obj.Prog
+
+ // inlMarksByPos maps from a (column 1) source position to the set of
+ // Progs that are in the set above and have that source position.
+ var inlMarksByPos map[src.XPos][]*obj.Prog
+
+ // Emit basic blocks
+ for i, b := range f.Blocks {
+ s.bstart[b.ID] = s.pp.next
+ s.lineRunStart = nil
+
+ // Attach a "default" liveness info. Normally this will be
+ // overwritten in the Values loop below for each Value. But
+ // for an empty block this will be used for its control
+ // instruction. We won't use the actual liveness map on a
+ // control instruction. Just mark it something that is
+ // preemptible, unless this function is "all unsafe".
+ s.pp.nextLive = LivenessIndex{-1, allUnsafe(f)}
+
+ // Emit values in block
+ thearch.SSAMarkMoves(&s, b)
+ for _, v := range b.Values {
+ x := s.pp.next
+ s.DebugFriendlySetPosFrom(v)
+
+ switch v.Op {
+ case ssa.OpInitMem:
+ // memory arg needs no code
+ case ssa.OpArg:
+ // input args need no code
+ case ssa.OpSP, ssa.OpSB:
+ // nothing to do
+ case ssa.OpSelect0, ssa.OpSelect1:
+ // nothing to do
+ case ssa.OpGetG:
+ // nothing to do when there's a g register,
+ // and checkLower complains if there's not
+ case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpVarKill:
+ // nothing to do; already used by liveness
+ case ssa.OpPhi:
+ CheckLoweredPhi(v)
+ case ssa.OpConvert:
+ // nothing to do; no-op conversion for liveness
+ if v.Args[0].Reg() != v.Reg() {
+ v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
+ }
+ case ssa.OpInlMark:
+ p := thearch.Ginsnop(s.pp)
+ if inlMarks == nil {
+ inlMarks = map[*obj.Prog]int32{}
+ inlMarksByPos = map[src.XPos][]*obj.Prog{}
+ }
+ inlMarks[p] = v.AuxInt32()
+ inlMarkList = append(inlMarkList, p)
+ pos := v.Pos.AtColumn1()
+ inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
+
+ default:
+ // Attach this safe point to the next
+ // instruction.
+ s.pp.nextLive = s.livenessMap.Get(v)
+
+ // Special case for first line in function; move it to the start.
+ if firstPos != src.NoXPos {
+ s.SetPos(firstPos)
+ firstPos = src.NoXPos
+ }
+ // let the backend handle it
+ thearch.SSAGenValue(&s, v)
+ }
+
+ if Ctxt.Flag_locationlists {
+ valueToProgAfter[v.ID] = s.pp.next
+ }
+
+ if f.PrintOrHtmlSSA {
+ for ; x != s.pp.next; x = x.Link {
+ progToValue[x] = v
+ }
+ }
+ }
+ // If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
+ if s.bstart[b.ID] == s.pp.next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
+ p := thearch.Ginsnop(s.pp)
+ p.Pos = p.Pos.WithIsStmt()
+ if b.Pos == src.NoXPos {
+ b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652.
+ if b.Pos == src.NoXPos {
+ b.Pos = pp.Text.Pos // Sometimes p.Pos is empty. See #35695.
+ }
+ }
+ b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
+ }
+ // Emit control flow instructions for block
+ var next *ssa.Block
+ if i < len(f.Blocks)-1 && Debug.N == 0 {
+ // If -N, leave next==nil so every block with successors
+ // ends in a JMP (except call blocks - plive doesn't like
+ // select{send,recv} followed by a JMP call). Helps keep
+ // line numbers for otherwise empty blocks.
+ next = f.Blocks[i+1]
+ }
+ x := s.pp.next
+ s.SetPos(b.Pos)
+ thearch.SSAGenBlock(&s, b, next)
+ if f.PrintOrHtmlSSA {
+ for ; x != s.pp.next; x = x.Link {
+ progToBlock[x] = b
+ }
+ }
+ }
+ if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit {
+ // We need the return address of a panic call to
+ // still be inside the function in question. So if
+ // it ends in a call which doesn't return, add a
+ // nop (which will never execute) after the call.
+ thearch.Ginsnop(pp)
+ }
+ if openDeferInfo != nil {
+ // When doing open-coded defers, generate a disconnected call to
+ // deferreturn and a return. This will be used to during panic
+ // recovery to unwind the stack and return back to the runtime.
+ s.pp.nextLive = s.livenessMap.deferreturn
+ gencallret(pp, Deferreturn)
+ }
+
+ if inlMarks != nil {
+ // We have some inline marks. Try to find other instructions we're
+ // going to emit anyway, and use those instructions instead of the
+ // inline marks.
+ for p := pp.Text; p != nil; p = p.Link {
+ if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || thearch.LinkArch.Family == sys.Wasm {
+ // Don't use 0-sized instructions as inline marks, because we need
+ // to identify inline mark instructions by pc offset.
+ // (Some of these instructions are sometimes zero-sized, sometimes not.
+ // We must not use anything that even might be zero-sized.)
+ // TODO: are there others?
+ continue
+ }
+ if _, ok := inlMarks[p]; ok {
+ // Don't use inline marks themselves. We don't know
+ // whether they will be zero-sized or not yet.
+ continue
+ }
+ pos := p.Pos.AtColumn1()
+ s := inlMarksByPos[pos]
+ if len(s) == 0 {
+ continue
+ }
+ for _, m := range s {
+ // We found an instruction with the same source position as
+ // some of the inline marks.
+ // Use this instruction instead.
+ p.Pos = p.Pos.WithIsStmt() // promote position to a statement
+ pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[m])
+ // Make the inline mark a real nop, so it doesn't generate any code.
+ m.As = obj.ANOP
+ m.Pos = src.NoXPos
+ m.From = obj.Addr{}
+ m.To = obj.Addr{}
+ }
+ delete(inlMarksByPos, pos)
+ }
+ // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
+ for _, p := range inlMarkList {
+ if p.As != obj.ANOP {
+ pp.curfn.Func.lsym.Func().AddInlMark(p, inlMarks[p])
+ }
+ }
+ }
+
+ if Ctxt.Flag_locationlists {
+ e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(Ctxt, f, Debug_locationlist > 1, stackOffset)
+ bstart := s.bstart
+ // Note that at this moment, Prog.Pc is a sequence number; it's
+ // not a real PC until after assembly, so this mapping has to
+ // be done later.
+ e.curfn.Func.DebugInfo.GetPC = func(b, v ssa.ID) int64 {
+ switch v {
+ case ssa.BlockStart.ID:
+ if b == f.Entry.ID {
+ return 0 // Start at the very beginning, at the assembler-generated prologue.
+ // this should only happen for function args (ssa.OpArg)
+ }
+ return bstart[b].Pc
+ case ssa.BlockEnd.ID:
+ return e.curfn.Func.lsym.Size
+ default:
+ return valueToProgAfter[v].Pc
+ }
+ }
+ }
+
+ // Resolve branches, and relax DefaultStmt into NotStmt
+ for _, br := range s.Branches {
+ br.P.To.SetTarget(s.bstart[br.B.ID])
+ if br.P.Pos.IsStmt() != src.PosIsStmt {
+ br.P.Pos = br.P.Pos.WithNotStmt()
+ } else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
+ br.P.Pos = br.P.Pos.WithNotStmt()
+ }
+
+ }
+
+ if e.log { // spew to stdout
+ filename := ""
+ for p := pp.Text; p != nil; p = p.Link {
+ if p.Pos.IsKnown() && p.InnermostFilename() != filename {
+ filename = p.InnermostFilename()
+ f.Logf("# %s\n", filename)
+ }
+
+ var s string
+ if v, ok := progToValue[p]; ok {
+ s = v.String()
+ } else if b, ok := progToBlock[p]; ok {
+ s = b.String()
+ } else {
+ s = " " // most value and branch strings are 2-3 characters long
+ }
+ f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
+ }
+ }
+ if f.HTMLWriter != nil { // spew to ssa.html
+ var buf bytes.Buffer
+ buf.WriteString("<code>")
+ buf.WriteString("<dl class=\"ssa-gen\">")
+ filename := ""
+ for p := pp.Text; p != nil; p = p.Link {
+ // Don't spam every line with the file name, which is often huge.
+ // Only print changes, and "unknown" is not a change.
+ if p.Pos.IsKnown() && p.InnermostFilename() != filename {
+ filename = p.InnermostFilename()
+ buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
+ buf.WriteString(html.EscapeString("# " + filename))
+ buf.WriteString("</dd>")
+ }
+
+ buf.WriteString("<dt class=\"ssa-prog-src\">")
+ if v, ok := progToValue[p]; ok {
+ buf.WriteString(v.HTML())
+ } else if b, ok := progToBlock[p]; ok {
+ buf.WriteString("<b>" + b.HTML() + "</b>")
+ }
+ buf.WriteString("</dt>")
+ buf.WriteString("<dd class=\"ssa-prog\">")
+ buf.WriteString(fmt.Sprintf("%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString())))
+ buf.WriteString("</dd>")
+ }
+ buf.WriteString("</dl>")
+ buf.WriteString("</code>")
+ f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
+ }
+
+ defframe(&s, e)
+
+ f.HTMLWriter.Close()
+ f.HTMLWriter = nil
+}
+
+func defframe(s *SSAGenState, e *ssafn) {
+ pp := s.pp
+
+ frame := Rnd(s.maxarg+e.stksize, int64(Widthreg))
+ if thearch.PadFrame != nil {
+ frame = thearch.PadFrame(frame)
+ }
+
+ // Fill in argument and frame size.
+ pp.Text.To.Type = obj.TYPE_TEXTSIZE
+ pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg)))
+ pp.Text.To.Offset = frame
+
+ // Insert code to zero ambiguously live variables so that the
+ // garbage collector only sees initialized values when it
+ // looks for pointers.
+ p := pp.Text
+ var lo, hi int64
+
+ // Opaque state for backend to use. Current backends use it to
+ // keep track of which helper registers have been zeroed.
+ var state uint32
+
+ // Iterate through declarations. They are sorted in decreasing Xoffset order.
+ for _, n := range e.curfn.Func.Dcl {
+ if !n.Name.Needzero() {
+ continue
+ }
+ if n.Class() != PAUTO {
+ e.Fatalf(n.Pos, "needzero class %d", n.Class())
+ }
+ if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 {
+ e.Fatalf(n.Pos, "var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset)
+ }
+
+ if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) {
+ // Merge with range we already have.
+ lo = n.Xoffset
+ continue
+ }
+
+ // Zero old range
+ p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
+
+ // Set new range.
+ lo = n.Xoffset
+ hi = lo + n.Type.Size()
+ }
+
+ // Zero final range.
+ thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
+}
+
+// For generating consecutive jump instructions to model a specific branching
+type IndexJump struct {
+ Jump obj.As
+ Index int
+}
+
+func (s *SSAGenState) oneJump(b *ssa.Block, jump *IndexJump) {
+ p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
+ p.Pos = b.Pos
+}
+
+// CombJump generates combinational instructions (2 at present) for a block jump,
+// thereby the behaviour of non-standard condition codes could be simulated
+func (s *SSAGenState) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
+ switch next {
+ case b.Succs[0].Block():
+ s.oneJump(b, &jumps[0][0])
+ s.oneJump(b, &jumps[0][1])
+ case b.Succs[1].Block():
+ s.oneJump(b, &jumps[1][0])
+ s.oneJump(b, &jumps[1][1])
+ default:
+ var q *obj.Prog
+ if b.Likely != ssa.BranchUnlikely {
+ s.oneJump(b, &jumps[1][0])
+ s.oneJump(b, &jumps[1][1])
+ q = s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.oneJump(b, &jumps[0][0])
+ s.oneJump(b, &jumps[0][1])
+ q = s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ q.Pos = b.Pos
+ }
+}
+
+// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
+func AddAux(a *obj.Addr, v *ssa.Value) {
+ AddAux2(a, v, v.AuxInt)
+}
+func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
+ if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
+ v.Fatalf("bad AddAux addr %v", a)
+ }
+ // add integer offset
+ a.Offset += offset
+
+ // If no additional symbol offset, we're done.
+ if v.Aux == nil {
+ return
+ }
+ // Add symbol's offset from its base register.
+ switch n := v.Aux.(type) {
+ case *ssa.AuxCall:
+ a.Name = obj.NAME_EXTERN
+ a.Sym = n.Fn
+ case *obj.LSym:
+ a.Name = obj.NAME_EXTERN
+ a.Sym = n
+ case *Node:
+ if n.Class() == PPARAM || n.Class() == PPARAMOUT {
+ a.Name = obj.NAME_PARAM
+ a.Sym = n.Orig.Sym.Linksym()
+ a.Offset += n.Xoffset
+ break
+ }
+ a.Name = obj.NAME_AUTO
+ a.Sym = n.Sym.Linksym()
+ a.Offset += n.Xoffset
+ default:
+ v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
+ }
+}
+
+// extendIndex extends v to a full int width.
+// panic with the given kind if v does not fit in an int (only on 32-bit archs).
+func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
+ size := idx.Type.Size()
+ if size == s.config.PtrSize {
+ return idx
+ }
+ if size > s.config.PtrSize {
+ // truncate 64-bit indexes on 32-bit pointer archs. Test the
+ // high word and branch to out-of-bounds failure if it is not 0.
+ var lo *ssa.Value
+ if idx.Type.IsSigned() {
+ lo = s.newValue1(ssa.OpInt64Lo, types.Types[TINT], idx)
+ } else {
+ lo = s.newValue1(ssa.OpInt64Lo, types.Types[TUINT], idx)
+ }
+ if bounded || Debug.B != 0 {
+ return lo
+ }
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ bPanic := s.f.NewBlock(ssa.BlockExit)
+ hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], idx)
+ cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0))
+ if !idx.Type.IsSigned() {
+ switch kind {
+ case ssa.BoundsIndex:
+ kind = ssa.BoundsIndexU
+ case ssa.BoundsSliceAlen:
+ kind = ssa.BoundsSliceAlenU
+ case ssa.BoundsSliceAcap:
+ kind = ssa.BoundsSliceAcapU
+ case ssa.BoundsSliceB:
+ kind = ssa.BoundsSliceBU
+ case ssa.BoundsSlice3Alen:
+ kind = ssa.BoundsSlice3AlenU
+ case ssa.BoundsSlice3Acap:
+ kind = ssa.BoundsSlice3AcapU
+ case ssa.BoundsSlice3B:
+ kind = ssa.BoundsSlice3BU
+ case ssa.BoundsSlice3C:
+ kind = ssa.BoundsSlice3CU
+ }
+ }
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+ b.AddEdgeTo(bNext)
+ b.AddEdgeTo(bPanic)
+
+ s.startBlock(bPanic)
+ mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem())
+ s.endBlock().SetControl(mem)
+ s.startBlock(bNext)
+
+ return lo
+ }
+
+ // Extend value to the required size
+ var op ssa.Op
+ if idx.Type.IsSigned() {
+ switch 10*size + s.config.PtrSize {
+ case 14:
+ op = ssa.OpSignExt8to32
+ case 18:
+ op = ssa.OpSignExt8to64
+ case 24:
+ op = ssa.OpSignExt16to32
+ case 28:
+ op = ssa.OpSignExt16to64
+ case 48:
+ op = ssa.OpSignExt32to64
+ default:
+ s.Fatalf("bad signed index extension %s", idx.Type)
+ }
+ } else {
+ switch 10*size + s.config.PtrSize {
+ case 14:
+ op = ssa.OpZeroExt8to32
+ case 18:
+ op = ssa.OpZeroExt8to64
+ case 24:
+ op = ssa.OpZeroExt16to32
+ case 28:
+ op = ssa.OpZeroExt16to64
+ case 48:
+ op = ssa.OpZeroExt32to64
+ default:
+ s.Fatalf("bad unsigned index extension %s", idx.Type)
+ }
+ }
+ return s.newValue1(op, types.Types[TINT], idx)
+}
+
+// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
+// Called during ssaGenValue.
+func CheckLoweredPhi(v *ssa.Value) {
+ if v.Op != ssa.OpPhi {
+ v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
+ }
+ if v.Type.IsMemory() {
+ return
+ }
+ f := v.Block.Func
+ loc := f.RegAlloc[v.ID]
+ for _, a := range v.Args {
+ if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
+ v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
+ }
+ }
+}
+
+// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block.
+// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
+// That register contains the closure pointer on closure entry.
+func CheckLoweredGetClosurePtr(v *ssa.Value) {
+ entry := v.Block.Func.Entry
+ if entry != v.Block || entry.Values[0] != v {
+ Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
+ }
+}
+
+// AutoVar returns a *Node and int64 representing the auto variable and offset within it
+// where v should be spilled.
+func AutoVar(v *ssa.Value) (*Node, int64) {
+ loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
+ if v.Type.Size() > loc.Type.Size() {
+ v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
+ }
+ return loc.N.(*Node), loc.Off
+}
+
+func AddrAuto(a *obj.Addr, v *ssa.Value) {
+ n, off := AutoVar(v)
+ a.Type = obj.TYPE_MEM
+ a.Sym = n.Sym.Linksym()
+ a.Reg = int16(thearch.REGSP)
+ a.Offset = n.Xoffset + off
+ if n.Class() == PPARAM || n.Class() == PPARAMOUT {
+ a.Name = obj.NAME_PARAM
+ } else {
+ a.Name = obj.NAME_AUTO
+ }
+}
+
+func (s *SSAGenState) AddrScratch(a *obj.Addr) {
+ if s.ScratchFpMem == nil {
+ panic("no scratch memory available; forgot to declare usesScratch for Op?")
+ }
+ a.Type = obj.TYPE_MEM
+ a.Name = obj.NAME_AUTO
+ a.Sym = s.ScratchFpMem.Sym.Linksym()
+ a.Reg = int16(thearch.REGSP)
+ a.Offset = s.ScratchFpMem.Xoffset
+}
+
+// Call returns a new CALL instruction for the SSA value v.
+// It uses PrepareCall to prepare the call.
+func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
+ pPosIsStmt := s.pp.pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
+ s.PrepareCall(v)
+
+ p := s.Prog(obj.ACALL)
+ if pPosIsStmt == src.PosIsStmt {
+ p.Pos = v.Pos.WithIsStmt()
+ } else {
+ p.Pos = v.Pos.WithNotStmt()
+ }
+ if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = sym.Fn
+ } else {
+ // TODO(mdempsky): Can these differences be eliminated?
+ switch thearch.LinkArch.Family {
+ case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
+ p.To.Type = obj.TYPE_REG
+ case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
+ p.To.Type = obj.TYPE_MEM
+ default:
+ Fatalf("unknown indirect call family")
+ }
+ p.To.Reg = v.Args[0].Reg()
+ }
+ return p
+}
+
+// PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
+// It must be called immediately before emitting the actual CALL instruction,
+// since it emits PCDATA for the stack map at the call (calls are safe points).
+func (s *SSAGenState) PrepareCall(v *ssa.Value) {
+ idx := s.livenessMap.Get(v)
+ if !idx.StackMapValid() {
+ // See Liveness.hasStackMap.
+ if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == typedmemclr || sym.Fn == typedmemmove) {
+ Fatalf("missing stack map index for %v", v.LongString())
+ }
+ }
+
+ call, ok := v.Aux.(*ssa.AuxCall)
+
+ if ok && call.Fn == Deferreturn {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert an actual hardware NOP that will have the right line number.
+ // This is different from obj.ANOP, which is a virtual no-op
+ // that doesn't make it into the instruction stream.
+ thearch.Ginsnopdefer(s.pp)
+ }
+
+ if ok {
+ // Record call graph information for nowritebarrierrec
+ // analysis.
+ if nowritebarrierrecCheck != nil {
+ nowritebarrierrecCheck.recordCall(s.pp.curfn, call.Fn, v.Pos)
+ }
+ }
+
+ if s.maxarg < v.AuxInt {
+ s.maxarg = v.AuxInt
+ }
+}
+
+// UseArgs records the fact that an instruction needs a certain amount of
+// callee args space for its use.
+func (s *SSAGenState) UseArgs(n int64) {
+ if s.maxarg < n {
+ s.maxarg = n
+ }
+}
+
+// fieldIdx finds the index of the field referred to by the ODOT node n.
+func fieldIdx(n *Node) int {
+ t := n.Left.Type
+ f := n.Sym
+ if !t.IsStruct() {
+ panic("ODOT's LHS is not a struct")
+ }
+
+ var i int
+ for _, t1 := range t.Fields().Slice() {
+ if t1.Sym != f {
+ i++
+ continue
+ }
+ if t1.Offset != n.Xoffset {
+ panic("field offset doesn't match")
+ }
+ return i
+ }
+ panic(fmt.Sprintf("can't find field in expr %v\n", n))
+
+ // TODO: keep the result of this function somewhere in the ODOT Node
+ // so we don't have to recompute it each time we need it.
+}
+
+// ssafn holds frontend information about a function that the backend is processing.
+// It also exports a bunch of compiler services for the ssa backend.
+type ssafn struct {
+ curfn *Node
+ strings map[string]*obj.LSym // map from constant string to data symbols
+ scratchFpMem *Node // temp for floating point register / memory moves on some architectures
+ stksize int64 // stack size for current frame
+ stkptrsize int64 // prefix of stack containing pointers
+ log bool // print ssa debug to the stdout
+}
+
+// StringData returns a symbol which
+// is the data component of a global string constant containing s.
+func (e *ssafn) StringData(s string) *obj.LSym {
+ if aux, ok := e.strings[s]; ok {
+ return aux
+ }
+ if e.strings == nil {
+ e.strings = make(map[string]*obj.LSym)
+ }
+ data := stringsym(e.curfn.Pos, s)
+ e.strings[s] = data
+ return data
+}
+
+func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
+ n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
+ return n
+}
+
+func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
+ ptrType := types.NewPtr(types.Types[TUINT8])
+ lenType := types.Types[TINT]
+ // Split this string up into two separate variables.
+ p := e.SplitSlot(&name, ".ptr", 0, ptrType)
+ l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
+ return p, l
+}
+
+func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
+ n := name.N.(*Node)
+ u := types.Types[TUINTPTR]
+ t := types.NewPtr(types.Types[TUINT8])
+ // Split this interface up into two separate variables.
+ f := ".itab"
+ if n.Type.IsEmptyInterface() {
+ f = ".type"
+ }
+ c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
+ d := e.SplitSlot(&name, ".data", u.Size(), t)
+ return c, d
+}
+
+func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
+ ptrType := types.NewPtr(name.Type.Elem())
+ lenType := types.Types[TINT]
+ p := e.SplitSlot(&name, ".ptr", 0, ptrType)
+ l := e.SplitSlot(&name, ".len", ptrType.Size(), lenType)
+ c := e.SplitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
+ return p, l, c
+}
+
+func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
+ s := name.Type.Size() / 2
+ var t *types.Type
+ if s == 8 {
+ t = types.Types[TFLOAT64]
+ } else {
+ t = types.Types[TFLOAT32]
+ }
+ r := e.SplitSlot(&name, ".real", 0, t)
+ i := e.SplitSlot(&name, ".imag", t.Size(), t)
+ return r, i
+}
+
+func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
+ var t *types.Type
+ if name.Type.IsSigned() {
+ t = types.Types[TINT32]
+ } else {
+ t = types.Types[TUINT32]
+ }
+ if thearch.LinkArch.ByteOrder == binary.BigEndian {
+ return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
+ }
+ return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[TUINT32])
+}
+
+func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
+ st := name.Type
+ // Note: the _ field may appear several times. But
+ // have no fear, identically-named but distinct Autos are
+ // ok, albeit maybe confusing for a debugger.
+ return e.SplitSlot(&name, "."+st.FieldName(i), st.FieldOff(i), st.FieldType(i))
+}
+
+func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
+ n := name.N.(*Node)
+ at := name.Type
+ if at.NumElem() != 1 {
+ e.Fatalf(n.Pos, "bad array size")
+ }
+ et := at.Elem()
+ return e.SplitSlot(&name, "[0]", 0, et)
+}
+
+func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
+ return itabsym(it, offset)
+}
+
+// SplitSlot returns a slot representing the data of parent starting at offset.
+func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
+ node := parent.N.(*Node)
+
+ if node.Class() != PAUTO || node.Name.Addrtaken() {
+ // addressed things and non-autos retain their parents (i.e., cannot truly be split)
+ return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
+ }
+
+ s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: localpkg}
+
+ n := &Node{
+ Name: new(Name),
+ Op: ONAME,
+ Pos: parent.N.(*Node).Pos,
+ }
+ n.Orig = n
+
+ s.Def = asTypesNode(n)
+ asNode(s.Def).Name.SetUsed(true)
+ n.Sym = s
+ n.Type = t
+ n.SetClass(PAUTO)
+ n.Esc = EscNever
+ n.Name.Curfn = e.curfn
+ e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n)
+ dowidth(t)
+ return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
+}
+
+func (e *ssafn) CanSSA(t *types.Type) bool {
+ return canSSAType(t)
+}
+
+func (e *ssafn) Line(pos src.XPos) string {
+ return linestr(pos)
+}
+
+// Log logs a message from the compiler.
+func (e *ssafn) Logf(msg string, args ...interface{}) {
+ if e.log {
+ fmt.Printf(msg, args...)
+ }
+}
+
+func (e *ssafn) Log() bool {
+ return e.log
+}
+
+// Fatal reports a compiler error and exits.
+func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
+ lineno = pos
+ nargs := append([]interface{}{e.curfn.funcname()}, args...)
+ Fatalf("'%s': "+msg, nargs...)
+}
+
+// Warnl reports a "warning", which is usually flag-triggered
+// logging output for the benefit of tests.
+func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
+ Warnl(pos, fmt_, args...)
+}
+
+func (e *ssafn) Debug_checknil() bool {
+ return Debug_checknil != 0
+}
+
+func (e *ssafn) UseWriteBarrier() bool {
+ return use_writebarrier
+}
+
+func (e *ssafn) Syslook(name string) *obj.LSym {
+ switch name {
+ case "goschedguarded":
+ return goschedguarded
+ case "writeBarrier":
+ return writeBarrier
+ case "gcWriteBarrier":
+ return gcWriteBarrier
+ case "typedmemmove":
+ return typedmemmove
+ case "typedmemclr":
+ return typedmemclr
+ }
+ e.Fatalf(src.NoXPos, "unknown Syslook func %v", name)
+ return nil
+}
+
+func (e *ssafn) SetWBPos(pos src.XPos) {
+ e.curfn.Func.setWBPos(pos)
+}
+
+func (e *ssafn) MyImportPath() string {
+ return myimportpath
+}
+
+func (n *Node) Typ() *types.Type {
+ return n.Type
+}
+func (n *Node) StorageClass() ssa.StorageClass {
+ switch n.Class() {
+ case PPARAM:
+ return ssa.ClassParam
+ case PPARAMOUT:
+ return ssa.ClassParamOut
+ case PAUTO:
+ return ssa.ClassAuto
+ default:
+ Fatalf("untranslatable storage class for %v: %s", n, n.Class())
+ return 0
+ }
+}
+
+func clobberBase(n *Node) *Node {
+ if n.Op == ODOT && n.Left.Type.NumFields() == 1 {
+ return clobberBase(n.Left)
+ }
+ if n.Op == OINDEX && n.Left.Type.IsArray() && n.Left.Type.NumElem() == 1 {
+ return clobberBase(n.Left)
+ }
+ return n
+}
diff --git a/src/cmd/compile/internal/gc/ssa_test.go b/src/cmd/compile/internal/gc/ssa_test.go
new file mode 100644
index 0000000..7f7c946
--- /dev/null
+++ b/src/cmd/compile/internal/gc/ssa_test.go
@@ -0,0 +1,191 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+// runGenTest runs a test-generator, then runs the generated test.
+// Generated test can either fail in compilation or execution.
+// The environment variable parameter(s) is passed to the run
+// of the generated test.
+func runGenTest(t *testing.T, filename, tmpname string, ev ...string) {
+ testenv.MustHaveGoRun(t)
+ gotool := testenv.GoToolPath(t)
+ var stdout, stderr bytes.Buffer
+ cmd := exec.Command(gotool, "run", filepath.Join("testdata", filename))
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("Failed: %v:\nOut: %s\nStderr: %s\n", err, &stdout, &stderr)
+ }
+ // Write stdout into a temporary file
+ tmpdir, ok := ioutil.TempDir("", tmpname)
+ if ok != nil {
+ t.Fatalf("Failed to create temporary directory")
+ }
+ defer os.RemoveAll(tmpdir)
+
+ rungo := filepath.Join(tmpdir, "run.go")
+ ok = ioutil.WriteFile(rungo, stdout.Bytes(), 0600)
+ if ok != nil {
+ t.Fatalf("Failed to create temporary file " + rungo)
+ }
+
+ stdout.Reset()
+ stderr.Reset()
+ cmd = exec.Command(gotool, "run", "-gcflags=-d=ssa/check/on", rungo)
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ cmd.Env = append(cmd.Env, ev...)
+ err := cmd.Run()
+ if err != nil {
+ t.Fatalf("Failed: %v:\nOut: %s\nStderr: %s\n", err, &stdout, &stderr)
+ }
+ if s := stderr.String(); s != "" {
+ t.Errorf("Stderr = %s\nWant empty", s)
+ }
+ if s := stdout.String(); s != "" {
+ t.Errorf("Stdout = %s\nWant empty", s)
+ }
+}
+
+func TestGenFlowGraph(t *testing.T) {
+ if testing.Short() {
+ t.Skip("not run in short mode.")
+ }
+ runGenTest(t, "flowgraph_generator1.go", "ssa_fg_tmp1")
+}
+
+// TestCode runs all the tests in the testdata directory as subtests.
+// These tests are special because we want to run them with different
+// compiler flags set (and thus they can't just be _test.go files in
+// this directory).
+func TestCode(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ gotool := testenv.GoToolPath(t)
+
+ // Make a temporary directory to work in.
+ tmpdir, err := ioutil.TempDir("", "TestCode")
+ if err != nil {
+ t.Fatalf("Failed to create temporary directory: %v", err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ // Find all the test functions (and the files containing them).
+ var srcs []string // files containing Test functions
+ type test struct {
+ name string // TestFoo
+ usesFloat bool // might use float operations
+ }
+ var tests []test
+ files, err := ioutil.ReadDir("testdata")
+ if err != nil {
+ t.Fatalf("can't read testdata directory: %v", err)
+ }
+ for _, f := range files {
+ if !strings.HasSuffix(f.Name(), "_test.go") {
+ continue
+ }
+ text, err := ioutil.ReadFile(filepath.Join("testdata", f.Name()))
+ if err != nil {
+ t.Fatalf("can't read testdata/%s: %v", f.Name(), err)
+ }
+ fset := token.NewFileSet()
+ code, err := parser.ParseFile(fset, f.Name(), text, 0)
+ if err != nil {
+ t.Fatalf("can't parse testdata/%s: %v", f.Name(), err)
+ }
+ srcs = append(srcs, filepath.Join("testdata", f.Name()))
+ foundTest := false
+ for _, d := range code.Decls {
+ fd, ok := d.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ if !strings.HasPrefix(fd.Name.Name, "Test") {
+ continue
+ }
+ if fd.Recv != nil {
+ continue
+ }
+ if fd.Type.Results != nil {
+ continue
+ }
+ if len(fd.Type.Params.List) != 1 {
+ continue
+ }
+ p := fd.Type.Params.List[0]
+ if len(p.Names) != 1 {
+ continue
+ }
+ s, ok := p.Type.(*ast.StarExpr)
+ if !ok {
+ continue
+ }
+ sel, ok := s.X.(*ast.SelectorExpr)
+ if !ok {
+ continue
+ }
+ base, ok := sel.X.(*ast.Ident)
+ if !ok {
+ continue
+ }
+ if base.Name != "testing" {
+ continue
+ }
+ if sel.Sel.Name != "T" {
+ continue
+ }
+ // Found a testing function.
+ tests = append(tests, test{name: fd.Name.Name, usesFloat: bytes.Contains(text, []byte("float"))})
+ foundTest = true
+ }
+ if !foundTest {
+ t.Fatalf("test file testdata/%s has no tests in it", f.Name())
+ }
+ }
+
+ flags := []string{""}
+ if runtime.GOARCH == "arm" || runtime.GOARCH == "mips" || runtime.GOARCH == "mips64" {
+ flags = append(flags, ",softfloat")
+ }
+ for _, flag := range flags {
+ args := []string{"test", "-c", "-gcflags=-d=ssa/check/on" + flag, "-o", filepath.Join(tmpdir, "code.test")}
+ args = append(args, srcs...)
+ out, err := exec.Command(gotool, args...).CombinedOutput()
+ if err != nil || len(out) != 0 {
+ t.Fatalf("Build failed: %v\n%s\n", err, out)
+ }
+
+ // Now we have a test binary. Run it with all the tests as subtests of this one.
+ for _, test := range tests {
+ test := test
+ if flag == ",softfloat" && !test.usesFloat {
+ // No point in running the soft float version if the test doesn't use floats.
+ continue
+ }
+ t.Run(fmt.Sprintf("%s%s", test.name[4:], flag), func(t *testing.T) {
+ out, err := exec.Command(filepath.Join(tmpdir, "code.test"), "-test.run="+test.name).CombinedOutput()
+ if err != nil || string(out) != "PASS\n" {
+ t.Errorf("Failed:\n%s\n", out)
+ }
+ })
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
new file mode 100644
index 0000000..defefd7
--- /dev/null
+++ b/src/cmd/compile/internal/gc/subr.go
@@ -0,0 +1,1918 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "crypto/md5"
+ "encoding/binary"
+ "fmt"
+ "os"
+ "runtime/debug"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+type Error struct {
+ pos src.XPos
+ msg string
+}
+
+var errors []Error
+
+// largeStack is info about a function whose stack frame is too large (rare).
+type largeStack struct {
+ locals int64
+ args int64
+ callee int64
+ pos src.XPos
+}
+
+var (
+ largeStackFramesMu sync.Mutex // protects largeStackFrames
+ largeStackFrames []largeStack
+)
+
+func errorexit() {
+ flusherrors()
+ if outfile != "" {
+ os.Remove(outfile)
+ }
+ os.Exit(2)
+}
+
+func adderrorname(n *Node) {
+ if n.Op != ODOT {
+ return
+ }
+ old := fmt.Sprintf("%v: undefined: %v\n", n.Line(), n.Left)
+ if len(errors) > 0 && errors[len(errors)-1].pos.Line() == n.Pos.Line() && errors[len(errors)-1].msg == old {
+ errors[len(errors)-1].msg = fmt.Sprintf("%v: undefined: %v in %v\n", n.Line(), n.Left, n)
+ }
+}
+
+func adderr(pos src.XPos, format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+ // Only add the position if know the position.
+ // See issue golang.org/issue/11361.
+ if pos.IsKnown() {
+ msg = fmt.Sprintf("%v: %s", linestr(pos), msg)
+ }
+ errors = append(errors, Error{
+ pos: pos,
+ msg: msg + "\n",
+ })
+}
+
+// byPos sorts errors by source position.
+type byPos []Error
+
+func (x byPos) Len() int { return len(x) }
+func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
+func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+// flusherrors sorts errors seen so far by line number, prints them to stdout,
+// and empties the errors array.
+func flusherrors() {
+ Ctxt.Bso.Flush()
+ if len(errors) == 0 {
+ return
+ }
+ sort.Stable(byPos(errors))
+ for i, err := range errors {
+ if i == 0 || err.msg != errors[i-1].msg {
+ fmt.Printf("%s", err.msg)
+ }
+ }
+ errors = errors[:0]
+}
+
+func hcrash() {
+ if Debug.h != 0 {
+ flusherrors()
+ if outfile != "" {
+ os.Remove(outfile)
+ }
+ var x *int
+ *x = 0
+ }
+}
+
+func linestr(pos src.XPos) string {
+ return Ctxt.OutermostPos(pos).Format(Debug.C == 0, Debug.L == 1)
+}
+
+// lasterror keeps track of the most recently issued error.
+// It is used to avoid multiple error messages on the same
+// line.
+var lasterror struct {
+ syntax src.XPos // source position of last syntax error
+ other src.XPos // source position of last non-syntax error
+ msg string // error message of last non-syntax error
+}
+
+// sameline reports whether two positions a, b are on the same line.
+func sameline(a, b src.XPos) bool {
+ p := Ctxt.PosTable.Pos(a)
+ q := Ctxt.PosTable.Pos(b)
+ return p.Base() == q.Base() && p.Line() == q.Line()
+}
+
+func yyerrorl(pos src.XPos, format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+
+ if strings.HasPrefix(msg, "syntax error") {
+ nsyntaxerrors++
+ // only one syntax error per line, no matter what error
+ if sameline(lasterror.syntax, pos) {
+ return
+ }
+ lasterror.syntax = pos
+ } else {
+ // only one of multiple equal non-syntax errors per line
+ // (flusherrors shows only one of them, so we filter them
+ // here as best as we can (they may not appear in order)
+ // so that we don't count them here and exit early, and
+ // then have nothing to show for.)
+ if sameline(lasterror.other, pos) && lasterror.msg == msg {
+ return
+ }
+ lasterror.other = pos
+ lasterror.msg = msg
+ }
+
+ adderr(pos, "%s", msg)
+
+ hcrash()
+ nerrors++
+ if nsavederrors+nerrors >= 10 && Debug.e == 0 {
+ flusherrors()
+ fmt.Printf("%v: too many errors\n", linestr(pos))
+ errorexit()
+ }
+}
+
+func yyerrorv(lang string, format string, args ...interface{}) {
+ what := fmt.Sprintf(format, args...)
+ yyerrorl(lineno, "%s requires %s or later (-lang was set to %s; check go.mod)", what, lang, flag_lang)
+}
+
+func yyerror(format string, args ...interface{}) {
+ yyerrorl(lineno, format, args...)
+}
+
+func Warn(fmt_ string, args ...interface{}) {
+ Warnl(lineno, fmt_, args...)
+}
+
+func Warnl(line src.XPos, fmt_ string, args ...interface{}) {
+ adderr(line, fmt_, args...)
+ if Debug.m != 0 {
+ flusherrors()
+ }
+}
+
+func Fatalf(fmt_ string, args ...interface{}) {
+ flusherrors()
+
+ if Debug_panic != 0 || nsavederrors+nerrors == 0 {
+ fmt.Printf("%v: internal compiler error: ", linestr(lineno))
+ fmt.Printf(fmt_, args...)
+ fmt.Printf("\n")
+
+ // If this is a released compiler version, ask for a bug report.
+ if strings.HasPrefix(objabi.Version, "go") {
+ fmt.Printf("\n")
+ fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
+ fmt.Printf("https://golang.org/issue/new\n")
+ } else {
+ // Not a release; dump a stack trace, too.
+ fmt.Println()
+ os.Stdout.Write(debug.Stack())
+ fmt.Println()
+ }
+ }
+
+ hcrash()
+ errorexit()
+}
+
+// hasUniquePos reports whether n has a unique position that can be
+// used for reporting error messages.
+//
+// It's primarily used to distinguish references to named objects,
+// whose Pos will point back to their declaration position rather than
+// their usage position.
+func hasUniquePos(n *Node) bool {
+ switch n.Op {
+ case ONAME, OPACK:
+ return false
+ case OLITERAL, OTYPE:
+ if n.Sym != nil {
+ return false
+ }
+ }
+
+ if !n.Pos.IsKnown() {
+ if Debug.K != 0 {
+ Warn("setlineno: unknown position (line 0)")
+ }
+ return false
+ }
+
+ return true
+}
+
+func setlineno(n *Node) src.XPos {
+ lno := lineno
+ if n != nil && hasUniquePos(n) {
+ lineno = n.Pos
+ }
+ return lno
+}
+
+func lookup(name string) *types.Sym {
+ return localpkg.Lookup(name)
+}
+
+// lookupN looks up the symbol starting with prefix and ending with
+// the decimal n. If prefix is too long, lookupN panics.
+func lookupN(prefix string, n int) *types.Sym {
+ var buf [20]byte // plenty long enough for all current users
+ copy(buf[:], prefix)
+ b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10)
+ return localpkg.LookupBytes(b)
+}
+
+// autolabel generates a new Name node for use with
+// an automatically generated label.
+// prefix is a short mnemonic (e.g. ".s" for switch)
+// to help with debugging.
+// It should begin with "." to avoid conflicts with
+// user labels.
+func autolabel(prefix string) *types.Sym {
+ if prefix[0] != '.' {
+ Fatalf("autolabel prefix must start with '.', have %q", prefix)
+ }
+ fn := Curfn
+ if Curfn == nil {
+ Fatalf("autolabel outside function")
+ }
+ n := fn.Func.Label
+ fn.Func.Label++
+ return lookupN(prefix, int(n))
+}
+
+// find all the exported symbols in package opkg
+// and make them available in the current package
+func importdot(opkg *types.Pkg, pack *Node) {
+ n := 0
+ for _, s := range opkg.Syms {
+ if s.Def == nil {
+ continue
+ }
+ if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
+ continue
+ }
+ s1 := lookup(s.Name)
+ if s1.Def != nil {
+ pkgerror := fmt.Sprintf("during import %q", opkg.Path)
+ redeclare(lineno, s1, pkgerror)
+ continue
+ }
+
+ s1.Def = s.Def
+ s1.Block = s.Block
+ if asNode(s1.Def).Name == nil {
+ Dump("s1def", asNode(s1.Def))
+ Fatalf("missing Name")
+ }
+ asNode(s1.Def).Name.Pack = pack
+ s1.Origpkg = opkg
+ n++
+ }
+
+ if n == 0 {
+ // can't possibly be used - there were no symbols
+ yyerrorl(pack.Pos, "imported and not used: %q", opkg.Path)
+ }
+}
+
+func nod(op Op, nleft, nright *Node) *Node {
+ return nodl(lineno, op, nleft, nright)
+}
+
+func nodl(pos src.XPos, op Op, nleft, nright *Node) *Node {
+ var n *Node
+ switch op {
+ case OCLOSURE, ODCLFUNC:
+ var x struct {
+ n Node
+ f Func
+ }
+ n = &x.n
+ n.Func = &x.f
+ case ONAME:
+ Fatalf("use newname instead")
+ case OLABEL, OPACK:
+ var x struct {
+ n Node
+ m Name
+ }
+ n = &x.n
+ n.Name = &x.m
+ default:
+ n = new(Node)
+ }
+ n.Op = op
+ n.Left = nleft
+ n.Right = nright
+ n.Pos = pos
+ n.Xoffset = BADWIDTH
+ n.Orig = n
+ return n
+}
+
+// newname returns a new ONAME Node associated with symbol s.
+func newname(s *types.Sym) *Node {
+ n := newnamel(lineno, s)
+ n.Name.Curfn = Curfn
+ return n
+}
+
+// newnamel returns a new ONAME Node associated with symbol s at position pos.
+// The caller is responsible for setting n.Name.Curfn.
+func newnamel(pos src.XPos, s *types.Sym) *Node {
+ if s == nil {
+ Fatalf("newnamel nil")
+ }
+
+ var x struct {
+ n Node
+ m Name
+ p Param
+ }
+ n := &x.n
+ n.Name = &x.m
+ n.Name.Param = &x.p
+
+ n.Op = ONAME
+ n.Pos = pos
+ n.Orig = n
+
+ n.Sym = s
+ return n
+}
+
+// nodSym makes a Node with Op op and with the Left field set to left
+// and the Sym field set to sym. This is for ODOT and friends.
+func nodSym(op Op, left *Node, sym *types.Sym) *Node {
+ return nodlSym(lineno, op, left, sym)
+}
+
+// nodlSym makes a Node with position Pos, with Op op, and with the Left field set to left
+// and the Sym field set to sym. This is for ODOT and friends.
+func nodlSym(pos src.XPos, op Op, left *Node, sym *types.Sym) *Node {
+ n := nodl(pos, op, left, nil)
+ n.Sym = sym
+ return n
+}
+
+// rawcopy returns a shallow copy of n.
+// Note: copy or sepcopy (rather than rawcopy) is usually the
+// correct choice (see comment with Node.copy, below).
+func (n *Node) rawcopy() *Node {
+ copy := *n
+ return &copy
+}
+
+// sepcopy returns a separate shallow copy of n, with the copy's
+// Orig pointing to itself.
+func (n *Node) sepcopy() *Node {
+ copy := *n
+ copy.Orig = &copy
+ return &copy
+}
+
+// copy returns shallow copy of n and adjusts the copy's Orig if
+// necessary: In general, if n.Orig points to itself, the copy's
+// Orig should point to itself as well. Otherwise, if n is modified,
+// the copy's Orig node appears modified, too, and then doesn't
+// represent the original node anymore.
+// (This caused the wrong complit Op to be used when printing error
+// messages; see issues #26855, #27765).
+func (n *Node) copy() *Node {
+ copy := *n
+ if n.Orig == n {
+ copy.Orig = &copy
+ }
+ return &copy
+}
+
+// methcmp sorts methods by symbol.
+type methcmp []*types.Field
+
+func (x methcmp) Len() int { return len(x) }
+func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x methcmp) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
+
+func nodintconst(v int64) *Node {
+ u := new(Mpint)
+ u.SetInt64(v)
+ return nodlit(Val{u})
+}
+
+func nodnil() *Node {
+ return nodlit(Val{new(NilVal)})
+}
+
+func nodbool(b bool) *Node {
+ return nodlit(Val{b})
+}
+
+func nodstr(s string) *Node {
+ return nodlit(Val{s})
+}
+
+// treecopy recursively copies n, with the exception of
+// ONAME, OLITERAL, OTYPE, and ONONAME leaves.
+// If pos.IsKnown(), it sets the source position of newly
+// allocated nodes to pos.
+func treecopy(n *Node, pos src.XPos) *Node {
+ if n == nil {
+ return nil
+ }
+
+ switch n.Op {
+ default:
+ m := n.sepcopy()
+ m.Left = treecopy(n.Left, pos)
+ m.Right = treecopy(n.Right, pos)
+ m.List.Set(listtreecopy(n.List.Slice(), pos))
+ if pos.IsKnown() {
+ m.Pos = pos
+ }
+ if m.Name != nil && n.Op != ODCLFIELD {
+ Dump("treecopy", n)
+ Fatalf("treecopy Name")
+ }
+ return m
+
+ case OPACK:
+ // OPACK nodes are never valid in const value declarations,
+ // but allow them like any other declared symbol to avoid
+ // crashing (golang.org/issue/11361).
+ fallthrough
+
+ case ONAME, ONONAME, OLITERAL, OTYPE:
+ return n
+
+ }
+}
+
+// isNil reports whether n represents the universal untyped zero value "nil".
+func (n *Node) isNil() bool {
+ // Check n.Orig because constant propagation may produce typed nil constants,
+ // which don't exist in the Go spec.
+ return Isconst(n.Orig, CTNIL)
+}
+
+func isptrto(t *types.Type, et types.EType) bool {
+ if t == nil {
+ return false
+ }
+ if !t.IsPtr() {
+ return false
+ }
+ t = t.Elem()
+ if t == nil {
+ return false
+ }
+ if t.Etype != et {
+ return false
+ }
+ return true
+}
+
+func (n *Node) isBlank() bool {
+ if n == nil {
+ return false
+ }
+ return n.Sym.IsBlank()
+}
+
+// methtype returns the underlying type, if any,
+// that owns methods with receiver parameter t.
+// The result is either a named type or an anonymous struct.
+func methtype(t *types.Type) *types.Type {
+ if t == nil {
+ return nil
+ }
+
+ // Strip away pointer if it's there.
+ if t.IsPtr() {
+ if t.Sym != nil {
+ return nil
+ }
+ t = t.Elem()
+ if t == nil {
+ return nil
+ }
+ }
+
+ // Must be a named type or anonymous struct.
+ if t.Sym == nil && !t.IsStruct() {
+ return nil
+ }
+
+ // Check types.
+ if issimple[t.Etype] {
+ return t
+ }
+ switch t.Etype {
+ case TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRING, TSTRUCT:
+ return t
+ }
+ return nil
+}
+
+// Is type src assignment compatible to type dst?
+// If so, return op code to use in conversion.
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
+func assignop(src, dst *types.Type) (Op, string) {
+ if src == dst {
+ return OCONVNOP, ""
+ }
+ if src == nil || dst == nil || src.Etype == TFORW || dst.Etype == TFORW || src.Orig == nil || dst.Orig == nil {
+ return OXXX, ""
+ }
+
+ // 1. src type is identical to dst.
+ if types.Identical(src, dst) {
+ return OCONVNOP, ""
+ }
+
+ // 2. src and dst have identical underlying types
+ // and either src or dst is not a named type or
+ // both are empty interface types.
+ // For assignable but different non-empty interface types,
+ // we want to recompute the itab. Recomputing the itab ensures
+ // that itabs are unique (thus an interface with a compile-time
+ // type I has an itab with interface type I).
+ if types.Identical(src.Orig, dst.Orig) {
+ if src.IsEmptyInterface() {
+ // Conversion between two empty interfaces
+ // requires no code.
+ return OCONVNOP, ""
+ }
+ if (src.Sym == nil || dst.Sym == nil) && !src.IsInterface() {
+ // Conversion between two types, at least one unnamed,
+ // needs no conversion. The exception is nonempty interfaces
+ // which need to have their itab updated.
+ return OCONVNOP, ""
+ }
+ }
+
+ // 3. dst is an interface type and src implements dst.
+ if dst.IsInterface() && src.Etype != TNIL {
+ var missing, have *types.Field
+ var ptr int
+ if implements(src, dst, &missing, &have, &ptr) {
+ return OCONVIFACE, ""
+ }
+
+ // we'll have complained about this method anyway, suppress spurious messages.
+ if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) {
+ return OCONVIFACE, ""
+ }
+
+ var why string
+ if isptrto(src, TINTER) {
+ why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
+ } else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
+ } else if have != nil && have.Sym == missing.Sym {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+
+ "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else if ptr != 0 {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
+ } else if have != nil {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+
+ "\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
+ }
+
+ return OXXX, why
+ }
+
+ if isptrto(dst, TINTER) {
+ why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
+ return OXXX, why
+ }
+
+ if src.IsInterface() && dst.Etype != TBLANK {
+ var missing, have *types.Field
+ var ptr int
+ var why string
+ if implements(dst, src, &missing, &have, &ptr) {
+ why = ": need type assertion"
+ }
+ return OXXX, why
+ }
+
+ // 4. src is a bidirectional channel value, dst is a channel type,
+ // src and dst have identical element types, and
+ // either src or dst is not a named type.
+ if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
+ if types.Identical(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) {
+ return OCONVNOP, ""
+ }
+ }
+
+ // 5. src is the predeclared identifier nil and dst is a nillable type.
+ if src.Etype == TNIL {
+ switch dst.Etype {
+ case TPTR,
+ TFUNC,
+ TMAP,
+ TCHAN,
+ TINTER,
+ TSLICE:
+ return OCONVNOP, ""
+ }
+ }
+
+ // 6. rule about untyped constants - already converted by defaultlit.
+
+ // 7. Any typed value can be assigned to the blank identifier.
+ if dst.Etype == TBLANK {
+ return OCONVNOP, ""
+ }
+
+ return OXXX, ""
+}
+
+// Can we convert a value of type src to a value of type dst?
+// If so, return op code to use in conversion (maybe OCONVNOP).
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
+// srcConstant indicates whether the value of type src is a constant.
+func convertop(srcConstant bool, src, dst *types.Type) (Op, string) {
+ if src == dst {
+ return OCONVNOP, ""
+ }
+ if src == nil || dst == nil {
+ return OXXX, ""
+ }
+
+ // Conversions from regular to go:notinheap are not allowed
+ // (unless it's unsafe.Pointer). These are runtime-specific
+ // rules.
+ // (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't.
+ if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
+ why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
+ return OXXX, why
+ }
+ // (b) Disallow string to []T where T is go:notinheap.
+ if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Etype == types.Bytetype.Etype || dst.Elem().Etype == types.Runetype.Etype) {
+ why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
+ return OXXX, why
+ }
+
+ // 1. src can be assigned to dst.
+ op, why := assignop(src, dst)
+ if op != OXXX {
+ return op, why
+ }
+
+ // The rules for interfaces are no different in conversions
+ // than assignments. If interfaces are involved, stop now
+ // with the good message from assignop.
+ // Otherwise clear the error.
+ if src.IsInterface() || dst.IsInterface() {
+ return OXXX, why
+ }
+
+ // 2. Ignoring struct tags, src and dst have identical underlying types.
+ if types.IdenticalIgnoreTags(src.Orig, dst.Orig) {
+ return OCONVNOP, ""
+ }
+
+ // 3. src and dst are unnamed pointer types and, ignoring struct tags,
+ // their base types have identical underlying types.
+ if src.IsPtr() && dst.IsPtr() && src.Sym == nil && dst.Sym == nil {
+ if types.IdenticalIgnoreTags(src.Elem().Orig, dst.Elem().Orig) {
+ return OCONVNOP, ""
+ }
+ }
+
+ // 4. src and dst are both integer or floating point types.
+ if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
+ if simtype[src.Etype] == simtype[dst.Etype] {
+ return OCONVNOP, ""
+ }
+ return OCONV, ""
+ }
+
+ // 5. src and dst are both complex types.
+ if src.IsComplex() && dst.IsComplex() {
+ if simtype[src.Etype] == simtype[dst.Etype] {
+ return OCONVNOP, ""
+ }
+ return OCONV, ""
+ }
+
+ // Special case for constant conversions: any numeric
+ // conversion is potentially okay. We'll validate further
+ // within evconst. See #38117.
+ if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) {
+ return OCONV, ""
+ }
+
+ // 6. src is an integer or has type []byte or []rune
+ // and dst is a string type.
+ if src.IsInteger() && dst.IsString() {
+ return ORUNESTR, ""
+ }
+
+ if src.IsSlice() && dst.IsString() {
+ if src.Elem().Etype == types.Bytetype.Etype {
+ return OBYTES2STR, ""
+ }
+ if src.Elem().Etype == types.Runetype.Etype {
+ return ORUNES2STR, ""
+ }
+ }
+
+ // 7. src is a string and dst is []byte or []rune.
+ // String to slice.
+ if src.IsString() && dst.IsSlice() {
+ if dst.Elem().Etype == types.Bytetype.Etype {
+ return OSTR2BYTES, ""
+ }
+ if dst.Elem().Etype == types.Runetype.Etype {
+ return OSTR2RUNES, ""
+ }
+ }
+
+ // 8. src is a pointer or uintptr and dst is unsafe.Pointer.
+ if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
+ return OCONVNOP, ""
+ }
+
+ // 9. src is unsafe.Pointer and dst is a pointer or uintptr.
+ if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
+ return OCONVNOP, ""
+ }
+
+ // src is map and dst is a pointer to corresponding hmap.
+ // This rule is needed for the implementation detail that
+ // go gc maps are implemented as a pointer to a hmap struct.
+ if src.Etype == TMAP && dst.IsPtr() &&
+ src.MapType().Hmap == dst.Elem() {
+ return OCONVNOP, ""
+ }
+
+ return OXXX, ""
+}
+
+func assignconv(n *Node, t *types.Type, context string) *Node {
+ return assignconvfn(n, t, func() string { return context })
+}
+
+// Convert node n for assignment to type t.
+func assignconvfn(n *Node, t *types.Type, context func() string) *Node {
+ if n == nil || n.Type == nil || n.Type.Broke() {
+ return n
+ }
+
+ if t.Etype == TBLANK && n.Type.Etype == TNIL {
+ yyerror("use of untyped nil")
+ }
+
+ n = convlit1(n, t, false, context)
+ if n.Type == nil {
+ return n
+ }
+ if t.Etype == TBLANK {
+ return n
+ }
+
+ // Convert ideal bool from comparison to plain bool
+ // if the next step is non-bool (like interface{}).
+ if n.Type == types.UntypedBool && !t.IsBoolean() {
+ if n.Op == ONAME || n.Op == OLITERAL {
+ r := nod(OCONVNOP, n, nil)
+ r.Type = types.Types[TBOOL]
+ r.SetTypecheck(1)
+ r.SetImplicit(true)
+ n = r
+ }
+ }
+
+ if types.Identical(n.Type, t) {
+ return n
+ }
+
+ op, why := assignop(n.Type, t)
+ if op == OXXX {
+ yyerror("cannot use %L as type %v in %s%s", n, t, context(), why)
+ op = OCONV
+ }
+
+ r := nod(op, n, nil)
+ r.Type = t
+ r.SetTypecheck(1)
+ r.SetImplicit(true)
+ r.Orig = n.Orig
+ return r
+}
+
+// IsMethod reports whether n is a method.
+// n must be a function or a method.
+func (n *Node) IsMethod() bool {
+ return n.Type.Recv() != nil
+}
+
+// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max].
+// n must be a slice expression. max is nil if n is a simple slice expression.
+func (n *Node) SliceBounds() (low, high, max *Node) {
+ if n.List.Len() == 0 {
+ return nil, nil, nil
+ }
+
+ switch n.Op {
+ case OSLICE, OSLICEARR, OSLICESTR:
+ s := n.List.Slice()
+ return s[0], s[1], nil
+ case OSLICE3, OSLICE3ARR:
+ s := n.List.Slice()
+ return s[0], s[1], s[2]
+ }
+ Fatalf("SliceBounds op %v: %v", n.Op, n)
+ return nil, nil, nil
+}
+
+// SetSliceBounds sets n's slice bounds, where n is a slice expression.
+// n must be a slice expression. If max is non-nil, n must be a full slice expression.
+func (n *Node) SetSliceBounds(low, high, max *Node) {
+ switch n.Op {
+ case OSLICE, OSLICEARR, OSLICESTR:
+ if max != nil {
+ Fatalf("SetSliceBounds %v given three bounds", n.Op)
+ }
+ s := n.List.Slice()
+ if s == nil {
+ if low == nil && high == nil {
+ return
+ }
+ n.List.Set2(low, high)
+ return
+ }
+ s[0] = low
+ s[1] = high
+ return
+ case OSLICE3, OSLICE3ARR:
+ s := n.List.Slice()
+ if s == nil {
+ if low == nil && high == nil && max == nil {
+ return
+ }
+ n.List.Set3(low, high, max)
+ return
+ }
+ s[0] = low
+ s[1] = high
+ s[2] = max
+ return
+ }
+ Fatalf("SetSliceBounds op %v: %v", n.Op, n)
+}
+
+// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR).
+// o must be a slicing op.
+func (o Op) IsSlice3() bool {
+ switch o {
+ case OSLICE, OSLICEARR, OSLICESTR:
+ return false
+ case OSLICE3, OSLICE3ARR:
+ return true
+ }
+ Fatalf("IsSlice3 op %v", o)
+ return false
+}
+
+// backingArrayPtrLen extracts the pointer and length from a slice or string.
+// This constructs two nodes referring to n, so n must be a cheapexpr.
+func (n *Node) backingArrayPtrLen() (ptr, len *Node) {
+ var init Nodes
+ c := cheapexpr(n, &init)
+ if c != n || init.Len() != 0 {
+ Fatalf("backingArrayPtrLen not cheap: %v", n)
+ }
+ ptr = nod(OSPTR, n, nil)
+ if n.Type.IsString() {
+ ptr.Type = types.Types[TUINT8].PtrTo()
+ } else {
+ ptr.Type = n.Type.Elem().PtrTo()
+ }
+ len = nod(OLEN, n, nil)
+ len.Type = types.Types[TINT]
+ return ptr, len
+}
+
+// labeledControl returns the control flow Node (for, switch, select)
+// associated with the label n, if any.
+func (n *Node) labeledControl() *Node {
+ if n.Op != OLABEL {
+ Fatalf("labeledControl %v", n.Op)
+ }
+ ctl := n.Name.Defn
+ if ctl == nil {
+ return nil
+ }
+ switch ctl.Op {
+ case OFOR, OFORUNTIL, OSWITCH, OSELECT:
+ return ctl
+ }
+ return nil
+}
+
+func syslook(name string) *Node {
+ s := Runtimepkg.Lookup(name)
+ if s == nil || s.Def == nil {
+ Fatalf("syslook: can't find runtime.%s", name)
+ }
+ return asNode(s.Def)
+}
+
+// typehash computes a hash value for type t to use in type switch statements.
+func typehash(t *types.Type) uint32 {
+ p := t.LongString()
+
+ // Using MD5 is overkill, but reduces accidental collisions.
+ h := md5.Sum([]byte(p))
+ return binary.LittleEndian.Uint32(h[:4])
+}
+
+// updateHasCall checks whether expression n contains any function
+// calls and sets the n.HasCall flag if so.
+func updateHasCall(n *Node) {
+ if n == nil {
+ return
+ }
+ n.SetHasCall(calcHasCall(n))
+}
+
+func calcHasCall(n *Node) bool {
+ if n.Ninit.Len() != 0 {
+ // TODO(mdempsky): This seems overly conservative.
+ return true
+ }
+
+ switch n.Op {
+ case OLITERAL, ONAME, OTYPE:
+ if n.HasCall() {
+ Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
+ }
+ return false
+ case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER:
+ return true
+ case OANDAND, OOROR:
+ // hard with instrumented code
+ if instrumenting {
+ return true
+ }
+ case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR,
+ ODEREF, ODOTPTR, ODOTTYPE, ODIV, OMOD:
+ // These ops might panic, make sure they are done
+ // before we start marshaling args for a call. See issue 16760.
+ return true
+
+ // When using soft-float, these ops might be rewritten to function calls
+ // so we ensure they are evaluated first.
+ case OADD, OSUB, ONEG, OMUL:
+ if thearch.SoftFloat && (isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) {
+ return true
+ }
+ case OLT, OEQ, ONE, OLE, OGE, OGT:
+ if thearch.SoftFloat && (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype]) {
+ return true
+ }
+ case OCONV:
+ if thearch.SoftFloat && ((isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) || (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype])) {
+ return true
+ }
+ }
+
+ if n.Left != nil && n.Left.HasCall() {
+ return true
+ }
+ if n.Right != nil && n.Right.HasCall() {
+ return true
+ }
+ return false
+}
+
+func badtype(op Op, tl, tr *types.Type) {
+ var s string
+ if tl != nil {
+ s += fmt.Sprintf("\n\t%v", tl)
+ }
+ if tr != nil {
+ s += fmt.Sprintf("\n\t%v", tr)
+ }
+
+ // common mistake: *struct and *interface.
+ if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
+ if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
+ s += "\n\t(*struct vs *interface)"
+ } else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
+ s += "\n\t(*interface vs *struct)"
+ }
+ }
+
+ yyerror("illegal types for operand: %v%s", op, s)
+}
+
+// brcom returns !(op).
+// For example, brcom(==) is !=.
+func brcom(op Op) Op {
+ switch op {
+ case OEQ:
+ return ONE
+ case ONE:
+ return OEQ
+ case OLT:
+ return OGE
+ case OGT:
+ return OLE
+ case OLE:
+ return OGT
+ case OGE:
+ return OLT
+ }
+ Fatalf("brcom: no com for %v\n", op)
+ return op
+}
+
+// brrev returns reverse(op).
+// For example, Brrev(<) is >.
+func brrev(op Op) Op {
+ switch op {
+ case OEQ:
+ return OEQ
+ case ONE:
+ return ONE
+ case OLT:
+ return OGT
+ case OGT:
+ return OLT
+ case OLE:
+ return OGE
+ case OGE:
+ return OLE
+ }
+ Fatalf("brrev: no rev for %v\n", op)
+ return op
+}
+
+// return side effect-free n, appending side effects to init.
+// result is assignable if n is.
+func safeexpr(n *Node, init *Nodes) *Node {
+ if n == nil {
+ return nil
+ }
+
+ if n.Ninit.Len() != 0 {
+ walkstmtlist(n.Ninit.Slice())
+ init.AppendNodes(&n.Ninit)
+ }
+
+ switch n.Op {
+ case ONAME, OLITERAL:
+ return n
+
+ case ODOT, OLEN, OCAP:
+ l := safeexpr(n.Left, init)
+ if l == n.Left {
+ return n
+ }
+ r := n.copy()
+ r.Left = l
+ r = typecheck(r, ctxExpr)
+ r = walkexpr(r, init)
+ return r
+
+ case ODOTPTR, ODEREF:
+ l := safeexpr(n.Left, init)
+ if l == n.Left {
+ return n
+ }
+ a := n.copy()
+ a.Left = l
+ a = walkexpr(a, init)
+ return a
+
+ case OINDEX, OINDEXMAP:
+ l := safeexpr(n.Left, init)
+ r := safeexpr(n.Right, init)
+ if l == n.Left && r == n.Right {
+ return n
+ }
+ a := n.copy()
+ a.Left = l
+ a.Right = r
+ a = walkexpr(a, init)
+ return a
+
+ case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
+ if isStaticCompositeLiteral(n) {
+ return n
+ }
+ }
+
+ // make a copy; must not be used as an lvalue
+ if islvalue(n) {
+ Fatalf("missing lvalue case in safeexpr: %v", n)
+ }
+ return cheapexpr(n, init)
+}
+
+func copyexpr(n *Node, t *types.Type, init *Nodes) *Node {
+ l := temp(t)
+ a := nod(OAS, l, n)
+ a = typecheck(a, ctxStmt)
+ a = walkexpr(a, init)
+ init.Append(a)
+ return l
+}
+
+// return side-effect free and cheap n, appending side effects to init.
+// result may not be assignable.
+func cheapexpr(n *Node, init *Nodes) *Node {
+ switch n.Op {
+ case ONAME, OLITERAL:
+ return n
+ }
+
+ return copyexpr(n, n.Type, init)
+}
+
+// Code to resolve elided DOTs in embedded types.
+
+// A Dlist stores a pointer to a TFIELD Type embedded within
+// a TSTRUCT or TINTER Type.
+type Dlist struct {
+ field *types.Field
+}
+
+// dotlist is used by adddot1 to record the path of embedded fields
+// used to access a target field or method.
+// Must be non-nil so that dotpath returns a non-nil slice even if d is zero.
+var dotlist = make([]Dlist, 10)
+
+// lookdot0 returns the number of fields or methods named s associated
+// with Type t. If exactly one exists, it will be returned in *save
+// (if save is not nil).
+func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) int {
+ u := t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+
+ c := 0
+ if u.IsStruct() || u.IsInterface() {
+ for _, f := range u.Fields().Slice() {
+ if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ u = t
+ if t.Sym != nil && t.IsPtr() && !t.Elem().IsPtr() {
+ // If t is a defined pointer type, then x.m is shorthand for (*x).m.
+ u = t.Elem()
+ }
+ u = methtype(u)
+ if u != nil {
+ for _, f := range u.Methods().Slice() {
+ if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ return c
+}
+
+// adddot1 returns the number of fields or methods named s at depth d in Type t.
+// If exactly one exists, it will be returned in *save (if save is not nil),
+// and dotlist will contain the path of embedded fields traversed to find it,
+// in reverse order. If none exist, more will indicate whether t contains any
+// embedded fields at depth d, so callers can decide whether to retry at
+// a greater depth.
+func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase bool) (c int, more bool) {
+ if t.Recur() {
+ return
+ }
+ t.SetRecur(true)
+ defer t.SetRecur(false)
+
+ var u *types.Type
+ d--
+ if d < 0 {
+ // We've reached our target depth. If t has any fields/methods
+ // named s, then we're done. Otherwise, we still need to check
+ // below for embedded fields.
+ c = lookdot0(s, t, save, ignorecase)
+ if c != 0 {
+ return c, false
+ }
+ }
+
+ u = t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+ if !u.IsStruct() && !u.IsInterface() {
+ return c, false
+ }
+
+ for _, f := range u.Fields().Slice() {
+ if f.Embedded == 0 || f.Sym == nil {
+ continue
+ }
+ if d < 0 {
+ // Found an embedded field at target depth.
+ return c, true
+ }
+ a, more1 := adddot1(s, f.Type, d, save, ignorecase)
+ if a != 0 && c == 0 {
+ dotlist[d].field = f
+ }
+ c += a
+ if more1 {
+ more = true
+ }
+ }
+
+ return c, more
+}
+
+// dotpath computes the unique shortest explicit selector path to fully qualify
+// a selection expression x.f, where x is of type t and f is the symbol s.
+// If no such path exists, dotpath returns nil.
+// If there are multiple shortest paths to the same depth, ambig is true.
+func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (path []Dlist, ambig bool) {
+ // The embedding of types within structs imposes a tree structure onto
+ // types: structs parent the types they embed, and types parent their
+ // fields or methods. Our goal here is to find the shortest path to
+ // a field or method named s in the subtree rooted at t. To accomplish
+ // that, we iteratively perform depth-first searches of increasing depth
+ // until we either find the named field/method or exhaust the tree.
+ for d := 0; ; d++ {
+ if d > len(dotlist) {
+ dotlist = append(dotlist, Dlist{})
+ }
+ if c, more := adddot1(s, t, d, save, ignorecase); c == 1 {
+ return dotlist[:d], false
+ } else if c > 1 {
+ return nil, true
+ } else if !more {
+ return nil, false
+ }
+ }
+}
+
+// in T.field
+// find missing fields that
+// will give shortest unique addressing.
+// modify the tree with missing type names.
+func adddot(n *Node) *Node {
+ n.Left = typecheck(n.Left, ctxType|ctxExpr)
+ if n.Left.Diag() {
+ n.SetDiag(true)
+ }
+ t := n.Left.Type
+ if t == nil {
+ return n
+ }
+
+ if n.Left.Op == OTYPE {
+ return n
+ }
+
+ s := n.Sym
+ if s == nil {
+ return n
+ }
+
+ switch path, ambig := dotpath(s, t, nil, false); {
+ case path != nil:
+ // rebuild elided dots
+ for c := len(path) - 1; c >= 0; c-- {
+ n.Left = nodSym(ODOT, n.Left, path[c].field.Sym)
+ n.Left.SetImplicit(true)
+ }
+ case ambig:
+ yyerror("ambiguous selector %v", n)
+ n.Left = nil
+ }
+
+ return n
+}
+
+// Code to help generate trampoline functions for methods on embedded
+// types. These are approx the same as the corresponding adddot
+// routines except that they expect to be called with unique tasks and
+// they return the actual methods.
+
+type Symlink struct {
+ field *types.Field
+}
+
+var slist []Symlink
+
+func expand0(t *types.Type) {
+ u := t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+
+ if u.IsInterface() {
+ for _, f := range u.Fields().Slice() {
+ if f.Sym.Uniq() {
+ continue
+ }
+ f.Sym.SetUniq(true)
+ slist = append(slist, Symlink{field: f})
+ }
+
+ return
+ }
+
+ u = methtype(t)
+ if u != nil {
+ for _, f := range u.Methods().Slice() {
+ if f.Sym.Uniq() {
+ continue
+ }
+ f.Sym.SetUniq(true)
+ slist = append(slist, Symlink{field: f})
+ }
+ }
+}
+
+func expand1(t *types.Type, top bool) {
+ if t.Recur() {
+ return
+ }
+ t.SetRecur(true)
+
+ if !top {
+ expand0(t)
+ }
+
+ u := t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+
+ if u.IsStruct() || u.IsInterface() {
+ for _, f := range u.Fields().Slice() {
+ if f.Embedded == 0 {
+ continue
+ }
+ if f.Sym == nil {
+ continue
+ }
+ expand1(f.Type, false)
+ }
+ }
+
+ t.SetRecur(false)
+}
+
+func expandmeth(t *types.Type) {
+ if t == nil || t.AllMethods().Len() != 0 {
+ return
+ }
+
+ // mark top-level method symbols
+ // so that expand1 doesn't consider them.
+ for _, f := range t.Methods().Slice() {
+ f.Sym.SetUniq(true)
+ }
+
+ // generate all reachable methods
+ slist = slist[:0]
+ expand1(t, true)
+
+ // check each method to be uniquely reachable
+ var ms []*types.Field
+ for i, sl := range slist {
+ slist[i].field = nil
+ sl.field.Sym.SetUniq(false)
+
+ var f *types.Field
+ path, _ := dotpath(sl.field.Sym, t, &f, false)
+ if path == nil {
+ continue
+ }
+
+ // dotpath may have dug out arbitrary fields, we only want methods.
+ if !f.IsMethod() {
+ continue
+ }
+
+ // add it to the base type method list
+ f = f.Copy()
+ f.Embedded = 1 // needs a trampoline
+ for _, d := range path {
+ if d.field.Type.IsPtr() {
+ f.Embedded = 2
+ break
+ }
+ }
+ ms = append(ms, f)
+ }
+
+ for _, f := range t.Methods().Slice() {
+ f.Sym.SetUniq(false)
+ }
+
+ ms = append(ms, t.Methods().Slice()...)
+ sort.Sort(methcmp(ms))
+ t.AllMethods().Set(ms)
+}
+
+// Given funarg struct list, return list of ODCLFIELD Node fn args.
+func structargs(tl *types.Type, mustname bool) []*Node {
+ var args []*Node
+ gen := 0
+ for _, t := range tl.Fields().Slice() {
+ s := t.Sym
+ if mustname && (s == nil || s.Name == "_") {
+ // invent a name so that we can refer to it in the trampoline
+ s = lookupN(".anon", gen)
+ gen++
+ }
+ a := symfield(s, t.Type)
+ a.Pos = t.Pos
+ a.SetIsDDD(t.IsDDD())
+ args = append(args, a)
+ }
+
+ return args
+}
+
+// Generate a wrapper function to convert from
+// a receiver of type T to a receiver of type U.
+// That is,
+//
+// func (t T) M() {
+// ...
+// }
+//
+// already exists; this function generates
+//
+// func (u U) M() {
+// u.M()
+// }
+//
+// where the types T and U are such that u.M() is valid
+// and calls the T.M method.
+// The resulting function is for use in method tables.
+//
+// rcvr - U
+// method - M func (t T)(), a TFIELD type struct
+// newnam - the eventual mangled name of this function
+func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
+ if false && Debug.r != 0 {
+ fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
+ }
+
+ // Only generate (*T).M wrappers for T.M in T's own package.
+ if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
+ rcvr.Elem().Sym != nil && rcvr.Elem().Sym.Pkg != localpkg {
+ return
+ }
+
+ // Only generate I.M wrappers for I in I's own package
+ // but keep doing it for error.Error (was issue #29304).
+ if rcvr.IsInterface() && rcvr.Sym != nil && rcvr.Sym.Pkg != localpkg && rcvr != types.Errortype {
+ return
+ }
+
+ lineno = autogeneratedPos
+ dclcontext = PEXTERN
+
+ tfn := nod(OTFUNC, nil, nil)
+ tfn.Left = namedfield(".this", rcvr)
+ tfn.List.Set(structargs(method.Type.Params(), true))
+ tfn.Rlist.Set(structargs(method.Type.Results(), false))
+
+ fn := dclfunc(newnam, tfn)
+ fn.Func.SetDupok(true)
+
+ nthis := asNode(tfn.Type.Recv().Nname)
+
+ methodrcvr := method.Type.Recv().Type
+
+ // generate nil pointer check for better error
+ if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
+ // generating wrapper from *T to T.
+ n := nod(OIF, nil, nil)
+ n.Left = nod(OEQ, nthis, nodnil())
+ call := nod(OCALL, syslook("panicwrap"), nil)
+ n.Nbody.Set1(call)
+ fn.Nbody.Append(n)
+ }
+
+ dot := adddot(nodSym(OXDOT, nthis, method.Sym))
+
+ // generate call
+ // It's not possible to use a tail call when dynamic linking on ppc64le. The
+ // bad scenario is when a local call is made to the wrapper: the wrapper will
+ // call the implementation, which might be in a different module and so set
+ // the TOC to the appropriate value for that module. But if it returns
+ // directly to the wrapper's caller, nothing will reset it to the correct
+ // value for that function.
+ if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && Ctxt.Flag_dynlink) {
+ // generate tail call: adjust pointer receiver and jump to embedded method.
+ dot = dot.Left // skip final .M
+ // TODO(mdempsky): Remove dependency on dotlist.
+ if !dotlist[0].field.Type.IsPtr() {
+ dot = nod(OADDR, dot, nil)
+ }
+ as := nod(OAS, nthis, convnop(dot, rcvr))
+ fn.Nbody.Append(as)
+ fn.Nbody.Append(nodSym(ORETJMP, nil, methodSym(methodrcvr, method.Sym)))
+ } else {
+ fn.Func.SetWrapper(true) // ignore frame for panic+recover matching
+ call := nod(OCALL, dot, nil)
+ call.List.Set(paramNnames(tfn.Type))
+ call.SetIsDDD(tfn.Type.IsVariadic())
+ if method.Type.NumResults() > 0 {
+ n := nod(ORETURN, nil, nil)
+ n.List.Set1(call)
+ call = n
+ }
+ fn.Nbody.Append(call)
+ }
+
+ if false && Debug.r != 0 {
+ dumplist("genwrapper body", fn.Nbody)
+ }
+
+ funcbody()
+ if debug_dclstack != 0 {
+ testdclstack()
+ }
+
+ fn = typecheck(fn, ctxStmt)
+
+ Curfn = fn
+ typecheckslice(fn.Nbody.Slice(), ctxStmt)
+
+ // Inline calls within (*T).M wrappers. This is safe because we only
+ // generate those wrappers within the same compilation unit as (T).M.
+ // TODO(mdempsky): Investigate why we can't enable this more generally.
+ if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil {
+ inlcalls(fn)
+ }
+ escapeFuncs([]*Node{fn}, false)
+
+ Curfn = nil
+ xtop = append(xtop, fn)
+}
+
+func paramNnames(ft *types.Type) []*Node {
+ args := make([]*Node, ft.NumParams())
+ for i, f := range ft.Params().FieldSlice() {
+ args[i] = asNode(f.Nname)
+ }
+ return args
+}
+
+func hashmem(t *types.Type) *Node {
+ sym := Runtimepkg.Lookup("memhash")
+
+ n := newname(sym)
+ setNodeNameFunc(n)
+ n.Type = functype(nil, []*Node{
+ anonfield(types.NewPtr(t)),
+ anonfield(types.Types[TUINTPTR]),
+ anonfield(types.Types[TUINTPTR]),
+ }, []*Node{
+ anonfield(types.Types[TUINTPTR]),
+ })
+ return n
+}
+
+func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field, followptr bool) {
+ if t == nil {
+ return nil, false
+ }
+
+ path, ambig := dotpath(s, t, &m, ignorecase)
+ if path == nil {
+ if ambig {
+ yyerror("%v.%v is ambiguous", t, s)
+ }
+ return nil, false
+ }
+
+ for _, d := range path {
+ if d.field.Type.IsPtr() {
+ followptr = true
+ break
+ }
+ }
+
+ if !m.IsMethod() {
+ yyerror("%v.%v is a field, not a method", t, s)
+ return nil, followptr
+ }
+
+ return m, followptr
+}
+
+func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool {
+ t0 := t
+ if t == nil {
+ return false
+ }
+
+ if t.IsInterface() {
+ i := 0
+ tms := t.Fields().Slice()
+ for _, im := range iface.Fields().Slice() {
+ for i < len(tms) && tms[i].Sym != im.Sym {
+ i++
+ }
+ if i == len(tms) {
+ *m = im
+ *samename = nil
+ *ptr = 0
+ return false
+ }
+ tm := tms[i]
+ if !types.Identical(tm.Type, im.Type) {
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return false
+ }
+ }
+
+ return true
+ }
+
+ t = methtype(t)
+ var tms []*types.Field
+ if t != nil {
+ expandmeth(t)
+ tms = t.AllMethods().Slice()
+ }
+ i := 0
+ for _, im := range iface.Fields().Slice() {
+ if im.Broke() {
+ continue
+ }
+ for i < len(tms) && tms[i].Sym != im.Sym {
+ i++
+ }
+ if i == len(tms) {
+ *m = im
+ *samename, _ = ifacelookdot(im.Sym, t, true)
+ *ptr = 0
+ return false
+ }
+ tm := tms[i]
+ if tm.Nointerface() || !types.Identical(tm.Type, im.Type) {
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return false
+ }
+ followptr := tm.Embedded == 2
+
+ // if pointer receiver in method,
+ // the method does not exist for value types.
+ rcvr := tm.Type.Recv().Type
+ if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !isifacemethod(tm.Type) {
+ if false && Debug.r != 0 {
+ yyerror("interface pointer mismatch")
+ }
+
+ *m = im
+ *samename = nil
+ *ptr = 1
+ return false
+ }
+ }
+
+ // We're going to emit an OCONVIFACE.
+ // Call itabname so that (t, iface)
+ // gets added to itabs early, which allows
+ // us to de-virtualize calls through this
+ // type/interface pair later. See peekitabs in reflect.go
+ if isdirectiface(t0) && !iface.IsEmptyInterface() {
+ itabname(t0, iface)
+ }
+ return true
+}
+
+func listtreecopy(l []*Node, pos src.XPos) []*Node {
+ var out []*Node
+ for _, n := range l {
+ out = append(out, treecopy(n, pos))
+ }
+ return out
+}
+
+func liststmt(l []*Node) *Node {
+ n := nod(OBLOCK, nil, nil)
+ n.List.Set(l)
+ if len(l) != 0 {
+ n.Pos = l[0].Pos
+ }
+ return n
+}
+
+func (l Nodes) asblock() *Node {
+ n := nod(OBLOCK, nil, nil)
+ n.List = l
+ if l.Len() != 0 {
+ n.Pos = l.First().Pos
+ }
+ return n
+}
+
+func ngotype(n *Node) *types.Sym {
+ if n.Type != nil {
+ return typenamesym(n.Type)
+ }
+ return nil
+}
+
+// The result of addinit MUST be assigned back to n, e.g.
+// n.Left = addinit(n.Left, init)
+func addinit(n *Node, init []*Node) *Node {
+ if len(init) == 0 {
+ return n
+ }
+ if n.mayBeShared() {
+ // Introduce OCONVNOP to hold init list.
+ n = nod(OCONVNOP, n, nil)
+ n.Type = n.Left.Type
+ n.SetTypecheck(1)
+ }
+
+ n.Ninit.Prepend(init...)
+ n.SetHasCall(true)
+ return n
+}
+
+// The linker uses the magic symbol prefixes "go." and "type."
+// Avoid potential confusion between import paths and symbols
+// by rejecting these reserved imports for now. Also, people
+// "can do weird things in GOPATH and we'd prefer they didn't
+// do _that_ weird thing" (per rsc). See also #4257.
+var reservedimports = []string{
+ "go",
+ "type",
+}
+
+func isbadimport(path string, allowSpace bool) bool {
+ if strings.Contains(path, "\x00") {
+ yyerror("import path contains NUL")
+ return true
+ }
+
+ for _, ri := range reservedimports {
+ if path == ri {
+ yyerror("import path %q is reserved and cannot be used", path)
+ return true
+ }
+ }
+
+ for _, r := range path {
+ if r == utf8.RuneError {
+ yyerror("import path contains invalid UTF-8 sequence: %q", path)
+ return true
+ }
+
+ if r < 0x20 || r == 0x7f {
+ yyerror("import path contains control character: %q", path)
+ return true
+ }
+
+ if r == '\\' {
+ yyerror("import path contains backslash; use slash: %q", path)
+ return true
+ }
+
+ if !allowSpace && unicode.IsSpace(r) {
+ yyerror("import path contains space character: %q", path)
+ return true
+ }
+
+ if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) {
+ yyerror("import path contains invalid character '%c': %q", r, path)
+ return true
+ }
+ }
+
+ return false
+}
+
+// Can this type be stored directly in an interface word?
+// Yes, if the representation is a single pointer.
+func isdirectiface(t *types.Type) bool {
+ if t.Broke() {
+ return false
+ }
+
+ switch t.Etype {
+ case TPTR:
+ // Pointers to notinheap types must be stored indirectly. See issue 42076.
+ return !t.Elem().NotInHeap()
+ case TCHAN,
+ TMAP,
+ TFUNC,
+ TUNSAFEPTR:
+ return true
+
+ case TARRAY:
+ // Array of 1 direct iface type can be direct.
+ return t.NumElem() == 1 && isdirectiface(t.Elem())
+
+ case TSTRUCT:
+ // Struct with 1 field of direct iface type can be direct.
+ return t.NumFields() == 1 && isdirectiface(t.Field(0).Type)
+ }
+
+ return false
+}
+
+// itabType loads the _type field from a runtime.itab struct.
+func itabType(itab *Node) *Node {
+ typ := nodSym(ODOTPTR, itab, nil)
+ typ.Type = types.NewPtr(types.Types[TUINT8])
+ typ.SetTypecheck(1)
+ typ.Xoffset = int64(Widthptr) // offset of _type in runtime.itab
+ typ.SetBounded(true) // guaranteed not to fault
+ return typ
+}
+
+// ifaceData loads the data field from an interface.
+// The concrete type must be known to have type t.
+// It follows the pointer if !isdirectiface(t).
+func ifaceData(pos src.XPos, n *Node, t *types.Type) *Node {
+ if t.IsInterface() {
+ Fatalf("ifaceData interface: %v", t)
+ }
+ ptr := nodlSym(pos, OIDATA, n, nil)
+ if isdirectiface(t) {
+ ptr.Type = t
+ ptr.SetTypecheck(1)
+ return ptr
+ }
+ ptr.Type = types.NewPtr(t)
+ ptr.SetTypecheck(1)
+ ind := nodl(pos, ODEREF, ptr, nil)
+ ind.Type = t
+ ind.SetTypecheck(1)
+ ind.SetBounded(true)
+ return ind
+}
+
+// typePos returns the position associated with t.
+// This is where t was declared or where it appeared as a type expression.
+func typePos(t *types.Type) src.XPos {
+ n := asNode(t.Nod)
+ if n == nil || !n.Pos.IsKnown() {
+ Fatalf("bad type: %v", t)
+ }
+ return n.Pos
+}
diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go
new file mode 100644
index 0000000..8d9fbe3
--- /dev/null
+++ b/src/cmd/compile/internal/gc/swt.go
@@ -0,0 +1,756 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "sort"
+)
+
+// typecheckswitch typechecks a switch statement.
+func typecheckswitch(n *Node) {
+ typecheckslice(n.Ninit.Slice(), ctxStmt)
+ if n.Left != nil && n.Left.Op == OTYPESW {
+ typecheckTypeSwitch(n)
+ } else {
+ typecheckExprSwitch(n)
+ }
+}
+
+func typecheckTypeSwitch(n *Node) {
+ n.Left.Right = typecheck(n.Left.Right, ctxExpr)
+ t := n.Left.Right.Type
+ if t != nil && !t.IsInterface() {
+ yyerrorl(n.Pos, "cannot type switch on non-interface value %L", n.Left.Right)
+ t = nil
+ }
+
+ // We don't actually declare the type switch's guarded
+ // declaration itself. So if there are no cases, we won't
+ // notice that it went unused.
+ if v := n.Left.Left; v != nil && !v.isBlank() && n.List.Len() == 0 {
+ yyerrorl(v.Pos, "%v declared but not used", v.Sym)
+ }
+
+ var defCase, nilCase *Node
+ var ts typeSet
+ for _, ncase := range n.List.Slice() {
+ ls := ncase.List.Slice()
+ if len(ls) == 0 { // default:
+ if defCase != nil {
+ yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line())
+ } else {
+ defCase = ncase
+ }
+ }
+
+ for i := range ls {
+ ls[i] = typecheck(ls[i], ctxExpr|ctxType)
+ n1 := ls[i]
+ if t == nil || n1.Type == nil {
+ continue
+ }
+
+ var missing, have *types.Field
+ var ptr int
+ switch {
+ case n1.isNil(): // case nil:
+ if nilCase != nil {
+ yyerrorl(ncase.Pos, "multiple nil cases in type switch (first at %v)", nilCase.Line())
+ } else {
+ nilCase = ncase
+ }
+ case n1.Op != OTYPE:
+ yyerrorl(ncase.Pos, "%L is not a type", n1)
+ case !n1.Type.IsInterface() && !implements(n1.Type, t, &missing, &have, &ptr) && !missing.Broke():
+ if have != nil && !have.Broke() {
+ yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
+ " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left.Right, n1.Type, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else if ptr != 0 {
+ yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
+ " (%v method has pointer receiver)", n.Left.Right, n1.Type, missing.Sym)
+ } else {
+ yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
+ " (missing %v method)", n.Left.Right, n1.Type, missing.Sym)
+ }
+ }
+
+ if n1.Op == OTYPE {
+ ts.add(ncase.Pos, n1.Type)
+ }
+ }
+
+ if ncase.Rlist.Len() != 0 {
+ // Assign the clause variable's type.
+ vt := t
+ if len(ls) == 1 {
+ if ls[0].Op == OTYPE {
+ vt = ls[0].Type
+ } else if ls[0].Op != OLITERAL { // TODO(mdempsky): Should be !ls[0].isNil()
+ // Invalid single-type case;
+ // mark variable as broken.
+ vt = nil
+ }
+ }
+
+ // TODO(mdempsky): It should be possible to
+ // still typecheck the case body.
+ if vt == nil {
+ continue
+ }
+
+ nvar := ncase.Rlist.First()
+ nvar.Type = vt
+ nvar = typecheck(nvar, ctxExpr|ctxAssign)
+ ncase.Rlist.SetFirst(nvar)
+ }
+
+ typecheckslice(ncase.Nbody.Slice(), ctxStmt)
+ }
+}
+
+type typeSet struct {
+ m map[string][]typeSetEntry
+}
+
+type typeSetEntry struct {
+ pos src.XPos
+ typ *types.Type
+}
+
+func (s *typeSet) add(pos src.XPos, typ *types.Type) {
+ if s.m == nil {
+ s.m = make(map[string][]typeSetEntry)
+ }
+
+ // LongString does not uniquely identify types, so we need to
+ // disambiguate collisions with types.Identical.
+ // TODO(mdempsky): Add a method that *is* unique.
+ ls := typ.LongString()
+ prevs := s.m[ls]
+ for _, prev := range prevs {
+ if types.Identical(typ, prev.typ) {
+ yyerrorl(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, linestr(prev.pos))
+ return
+ }
+ }
+ s.m[ls] = append(prevs, typeSetEntry{pos, typ})
+}
+
+func typecheckExprSwitch(n *Node) {
+ t := types.Types[TBOOL]
+ if n.Left != nil {
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Left = defaultlit(n.Left, nil)
+ t = n.Left.Type
+ }
+
+ var nilonly string
+ if t != nil {
+ switch {
+ case t.IsMap():
+ nilonly = "map"
+ case t.Etype == TFUNC:
+ nilonly = "func"
+ case t.IsSlice():
+ nilonly = "slice"
+
+ case !IsComparable(t):
+ if t.IsStruct() {
+ yyerrorl(n.Pos, "cannot switch on %L (struct containing %v cannot be compared)", n.Left, IncomparableField(t).Type)
+ } else {
+ yyerrorl(n.Pos, "cannot switch on %L", n.Left)
+ }
+ t = nil
+ }
+ }
+
+ var defCase *Node
+ var cs constSet
+ for _, ncase := range n.List.Slice() {
+ ls := ncase.List.Slice()
+ if len(ls) == 0 { // default:
+ if defCase != nil {
+ yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", defCase.Line())
+ } else {
+ defCase = ncase
+ }
+ }
+
+ for i := range ls {
+ setlineno(ncase)
+ ls[i] = typecheck(ls[i], ctxExpr)
+ ls[i] = defaultlit(ls[i], t)
+ n1 := ls[i]
+ if t == nil || n1.Type == nil {
+ continue
+ }
+
+ if nilonly != "" && !n1.isNil() {
+ yyerrorl(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left)
+ } else if t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type) {
+ yyerrorl(ncase.Pos, "invalid case %L in switch (incomparable type)", n1)
+ } else {
+ op1, _ := assignop(n1.Type, t)
+ op2, _ := assignop(t, n1.Type)
+ if op1 == OXXX && op2 == OXXX {
+ if n.Left != nil {
+ yyerrorl(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
+ } else {
+ yyerrorl(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
+ }
+ }
+ }
+
+ // Don't check for duplicate bools. Although the spec allows it,
+ // (1) the compiler hasn't checked it in the past, so compatibility mandates it, and
+ // (2) it would disallow useful things like
+ // case GOARCH == "arm" && GOARM == "5":
+ // case GOARCH == "arm":
+ // which would both evaluate to false for non-ARM compiles.
+ if !n1.Type.IsBoolean() {
+ cs.add(ncase.Pos, n1, "case", "switch")
+ }
+ }
+
+ typecheckslice(ncase.Nbody.Slice(), ctxStmt)
+ }
+}
+
+// walkswitch walks a switch statement.
+func walkswitch(sw *Node) {
+ // Guard against double walk, see #25776.
+ if sw.List.Len() == 0 && sw.Nbody.Len() > 0 {
+ return // Was fatal, but eliminating every possible source of double-walking is hard
+ }
+
+ if sw.Left != nil && sw.Left.Op == OTYPESW {
+ walkTypeSwitch(sw)
+ } else {
+ walkExprSwitch(sw)
+ }
+}
+
+// walkExprSwitch generates an AST implementing sw. sw is an
+// expression switch.
+func walkExprSwitch(sw *Node) {
+ lno := setlineno(sw)
+
+ cond := sw.Left
+ sw.Left = nil
+
+ // convert switch {...} to switch true {...}
+ if cond == nil {
+ cond = nodbool(true)
+ cond = typecheck(cond, ctxExpr)
+ cond = defaultlit(cond, nil)
+ }
+
+ // Given "switch string(byteslice)",
+ // with all cases being side-effect free,
+ // use a zero-cost alias of the byte slice.
+ // Do this before calling walkexpr on cond,
+ // because walkexpr will lower the string
+ // conversion into a runtime call.
+ // See issue 24937 for more discussion.
+ if cond.Op == OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
+ cond.Op = OBYTES2STRTMP
+ }
+
+ cond = walkexpr(cond, &sw.Ninit)
+ if cond.Op != OLITERAL {
+ cond = copyexpr(cond, cond.Type, &sw.Nbody)
+ }
+
+ lineno = lno
+
+ s := exprSwitch{
+ exprname: cond,
+ }
+
+ var defaultGoto *Node
+ var body Nodes
+ for _, ncase := range sw.List.Slice() {
+ label := autolabel(".s")
+ jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label))
+
+ // Process case dispatch.
+ if ncase.List.Len() == 0 {
+ if defaultGoto != nil {
+ Fatalf("duplicate default case not detected during typechecking")
+ }
+ defaultGoto = jmp
+ }
+
+ for _, n1 := range ncase.List.Slice() {
+ s.Add(ncase.Pos, n1, jmp)
+ }
+
+ // Process body.
+ body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label)))
+ body.Append(ncase.Nbody.Slice()...)
+ if fall, pos := hasFall(ncase.Nbody.Slice()); !fall {
+ br := nod(OBREAK, nil, nil)
+ br.Pos = pos
+ body.Append(br)
+ }
+ }
+ sw.List.Set(nil)
+
+ if defaultGoto == nil {
+ br := nod(OBREAK, nil, nil)
+ br.Pos = br.Pos.WithNotStmt()
+ defaultGoto = br
+ }
+
+ s.Emit(&sw.Nbody)
+ sw.Nbody.Append(defaultGoto)
+ sw.Nbody.AppendNodes(&body)
+ walkstmtlist(sw.Nbody.Slice())
+}
+
+// An exprSwitch walks an expression switch.
+type exprSwitch struct {
+ exprname *Node // value being switched on
+
+ done Nodes
+ clauses []exprClause
+}
+
+type exprClause struct {
+ pos src.XPos
+ lo, hi *Node
+ jmp *Node
+}
+
+func (s *exprSwitch) Add(pos src.XPos, expr, jmp *Node) {
+ c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
+ if okforcmp[s.exprname.Type.Etype] && expr.Op == OLITERAL {
+ s.clauses = append(s.clauses, c)
+ return
+ }
+
+ s.flush()
+ s.clauses = append(s.clauses, c)
+ s.flush()
+}
+
+func (s *exprSwitch) Emit(out *Nodes) {
+ s.flush()
+ out.AppendNodes(&s.done)
+}
+
+func (s *exprSwitch) flush() {
+ cc := s.clauses
+ s.clauses = nil
+ if len(cc) == 0 {
+ return
+ }
+
+ // Caution: If len(cc) == 1, then cc[0] might not an OLITERAL.
+ // The code below is structured to implicitly handle this case
+ // (e.g., sort.Slice doesn't need to invoke the less function
+ // when there's only a single slice element).
+
+ if s.exprname.Type.IsString() && len(cc) >= 2 {
+ // Sort strings by length and then by value. It is
+ // much cheaper to compare lengths than values, and
+ // all we need here is consistency. We respect this
+ // sorting below.
+ sort.Slice(cc, func(i, j int) bool {
+ si := cc[i].lo.StringVal()
+ sj := cc[j].lo.StringVal()
+ if len(si) != len(sj) {
+ return len(si) < len(sj)
+ }
+ return si < sj
+ })
+
+ // runLen returns the string length associated with a
+ // particular run of exprClauses.
+ runLen := func(run []exprClause) int64 { return int64(len(run[0].lo.StringVal())) }
+
+ // Collapse runs of consecutive strings with the same length.
+ var runs [][]exprClause
+ start := 0
+ for i := 1; i < len(cc); i++ {
+ if runLen(cc[start:]) != runLen(cc[i:]) {
+ runs = append(runs, cc[start:i])
+ start = i
+ }
+ }
+ runs = append(runs, cc[start:])
+
+ // Perform two-level binary search.
+ nlen := nod(OLEN, s.exprname, nil)
+ binarySearch(len(runs), &s.done,
+ func(i int) *Node {
+ return nod(OLE, nlen, nodintconst(runLen(runs[i-1])))
+ },
+ func(i int, nif *Node) {
+ run := runs[i]
+ nif.Left = nod(OEQ, nlen, nodintconst(runLen(run)))
+ s.search(run, &nif.Nbody)
+ },
+ )
+ return
+ }
+
+ sort.Slice(cc, func(i, j int) bool {
+ return compareOp(cc[i].lo.Val(), OLT, cc[j].lo.Val())
+ })
+
+ // Merge consecutive integer cases.
+ if s.exprname.Type.IsInteger() {
+ merged := cc[:1]
+ for _, c := range cc[1:] {
+ last := &merged[len(merged)-1]
+ if last.jmp == c.jmp && last.hi.Int64Val()+1 == c.lo.Int64Val() {
+ last.hi = c.lo
+ } else {
+ merged = append(merged, c)
+ }
+ }
+ cc = merged
+ }
+
+ s.search(cc, &s.done)
+}
+
+func (s *exprSwitch) search(cc []exprClause, out *Nodes) {
+ binarySearch(len(cc), out,
+ func(i int) *Node {
+ return nod(OLE, s.exprname, cc[i-1].hi)
+ },
+ func(i int, nif *Node) {
+ c := &cc[i]
+ nif.Left = c.test(s.exprname)
+ nif.Nbody.Set1(c.jmp)
+ },
+ )
+}
+
+func (c *exprClause) test(exprname *Node) *Node {
+ // Integer range.
+ if c.hi != c.lo {
+ low := nodl(c.pos, OGE, exprname, c.lo)
+ high := nodl(c.pos, OLE, exprname, c.hi)
+ return nodl(c.pos, OANDAND, low, high)
+ }
+
+ // Optimize "switch true { ...}" and "switch false { ... }".
+ if Isconst(exprname, CTBOOL) && !c.lo.Type.IsInterface() {
+ if exprname.BoolVal() {
+ return c.lo
+ } else {
+ return nodl(c.pos, ONOT, c.lo, nil)
+ }
+ }
+
+ return nodl(c.pos, OEQ, exprname, c.lo)
+}
+
+func allCaseExprsAreSideEffectFree(sw *Node) bool {
+ // In theory, we could be more aggressive, allowing any
+ // side-effect-free expressions in cases, but it's a bit
+ // tricky because some of that information is unavailable due
+ // to the introduction of temporaries during order.
+ // Restricting to constants is simple and probably powerful
+ // enough.
+
+ for _, ncase := range sw.List.Slice() {
+ if ncase.Op != OCASE {
+ Fatalf("switch string(byteslice) bad op: %v", ncase.Op)
+ }
+ for _, v := range ncase.List.Slice() {
+ if v.Op != OLITERAL {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// hasFall reports whether stmts ends with a "fallthrough" statement.
+func hasFall(stmts []*Node) (bool, src.XPos) {
+ // Search backwards for the index of the fallthrough
+ // statement. Do not assume it'll be in the last
+ // position, since in some cases (e.g. when the statement
+ // list contains autotmp_ variables), one or more OVARKILL
+ // nodes will be at the end of the list.
+
+ i := len(stmts) - 1
+ for i >= 0 && stmts[i].Op == OVARKILL {
+ i--
+ }
+ if i < 0 {
+ return false, src.NoXPos
+ }
+ return stmts[i].Op == OFALL, stmts[i].Pos
+}
+
+// walkTypeSwitch generates an AST that implements sw, where sw is a
+// type switch.
+func walkTypeSwitch(sw *Node) {
+ var s typeSwitch
+ s.facename = sw.Left.Right
+ sw.Left = nil
+
+ s.facename = walkexpr(s.facename, &sw.Ninit)
+ s.facename = copyexpr(s.facename, s.facename.Type, &sw.Nbody)
+ s.okname = temp(types.Types[TBOOL])
+
+ // Get interface descriptor word.
+ // For empty interfaces this will be the type.
+ // For non-empty interfaces this will be the itab.
+ itab := nod(OITAB, s.facename, nil)
+
+ // For empty interfaces, do:
+ // if e._type == nil {
+ // do nil case if it exists, otherwise default
+ // }
+ // h := e._type.hash
+ // Use a similar strategy for non-empty interfaces.
+ ifNil := nod(OIF, nil, nil)
+ ifNil.Left = nod(OEQ, itab, nodnil())
+ lineno = lineno.WithNotStmt() // disable statement marks after the first check.
+ ifNil.Left = typecheck(ifNil.Left, ctxExpr)
+ ifNil.Left = defaultlit(ifNil.Left, nil)
+ // ifNil.Nbody assigned at end.
+ sw.Nbody.Append(ifNil)
+
+ // Load hash from type or itab.
+ dotHash := nodSym(ODOTPTR, itab, nil)
+ dotHash.Type = types.Types[TUINT32]
+ dotHash.SetTypecheck(1)
+ if s.facename.Type.IsEmptyInterface() {
+ dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type
+ } else {
+ dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime.itab
+ }
+ dotHash.SetBounded(true) // guaranteed not to fault
+ s.hashname = copyexpr(dotHash, dotHash.Type, &sw.Nbody)
+
+ br := nod(OBREAK, nil, nil)
+ var defaultGoto, nilGoto *Node
+ var body Nodes
+ for _, ncase := range sw.List.Slice() {
+ var caseVar *Node
+ if ncase.Rlist.Len() != 0 {
+ caseVar = ncase.Rlist.First()
+ }
+
+ // For single-type cases with an interface type,
+ // we initialize the case variable as part of the type assertion.
+ // In other cases, we initialize it in the body.
+ var singleType *types.Type
+ if ncase.List.Len() == 1 && ncase.List.First().Op == OTYPE {
+ singleType = ncase.List.First().Type
+ }
+ caseVarInitialized := false
+
+ label := autolabel(".s")
+ jmp := npos(ncase.Pos, nodSym(OGOTO, nil, label))
+
+ if ncase.List.Len() == 0 { // default:
+ if defaultGoto != nil {
+ Fatalf("duplicate default case not detected during typechecking")
+ }
+ defaultGoto = jmp
+ }
+
+ for _, n1 := range ncase.List.Slice() {
+ if n1.isNil() { // case nil:
+ if nilGoto != nil {
+ Fatalf("duplicate nil case not detected during typechecking")
+ }
+ nilGoto = jmp
+ continue
+ }
+
+ if singleType != nil && singleType.IsInterface() {
+ s.Add(ncase.Pos, n1.Type, caseVar, jmp)
+ caseVarInitialized = true
+ } else {
+ s.Add(ncase.Pos, n1.Type, nil, jmp)
+ }
+ }
+
+ body.Append(npos(ncase.Pos, nodSym(OLABEL, nil, label)))
+ if caseVar != nil && !caseVarInitialized {
+ val := s.facename
+ if singleType != nil {
+ // We have a single concrete type. Extract the data.
+ if singleType.IsInterface() {
+ Fatalf("singleType interface should have been handled in Add")
+ }
+ val = ifaceData(ncase.Pos, s.facename, singleType)
+ }
+ l := []*Node{
+ nodl(ncase.Pos, ODCL, caseVar, nil),
+ nodl(ncase.Pos, OAS, caseVar, val),
+ }
+ typecheckslice(l, ctxStmt)
+ body.Append(l...)
+ }
+ body.Append(ncase.Nbody.Slice()...)
+ body.Append(br)
+ }
+ sw.List.Set(nil)
+
+ if defaultGoto == nil {
+ defaultGoto = br
+ }
+ if nilGoto == nil {
+ nilGoto = defaultGoto
+ }
+ ifNil.Nbody.Set1(nilGoto)
+
+ s.Emit(&sw.Nbody)
+ sw.Nbody.Append(defaultGoto)
+ sw.Nbody.AppendNodes(&body)
+
+ walkstmtlist(sw.Nbody.Slice())
+}
+
+// A typeSwitch walks a type switch.
+type typeSwitch struct {
+ // Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
+ facename *Node // value being type-switched on
+ hashname *Node // type hash of the value being type-switched on
+ okname *Node // boolean used for comma-ok type assertions
+
+ done Nodes
+ clauses []typeClause
+}
+
+type typeClause struct {
+ hash uint32
+ body Nodes
+}
+
+func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *Node) {
+ var body Nodes
+ if caseVar != nil {
+ l := []*Node{
+ nodl(pos, ODCL, caseVar, nil),
+ nodl(pos, OAS, caseVar, nil),
+ }
+ typecheckslice(l, ctxStmt)
+ body.Append(l...)
+ } else {
+ caseVar = nblank
+ }
+
+ // cv, ok = iface.(type)
+ as := nodl(pos, OAS2, nil, nil)
+ as.List.Set2(caseVar, s.okname) // cv, ok =
+ dot := nodl(pos, ODOTTYPE, s.facename, nil)
+ dot.Type = typ // iface.(type)
+ as.Rlist.Set1(dot)
+ as = typecheck(as, ctxStmt)
+ as = walkexpr(as, &body)
+ body.Append(as)
+
+ // if ok { goto label }
+ nif := nodl(pos, OIF, nil, nil)
+ nif.Left = s.okname
+ nif.Nbody.Set1(jmp)
+ body.Append(nif)
+
+ if !typ.IsInterface() {
+ s.clauses = append(s.clauses, typeClause{
+ hash: typehash(typ),
+ body: body,
+ })
+ return
+ }
+
+ s.flush()
+ s.done.AppendNodes(&body)
+}
+
+func (s *typeSwitch) Emit(out *Nodes) {
+ s.flush()
+ out.AppendNodes(&s.done)
+}
+
+func (s *typeSwitch) flush() {
+ cc := s.clauses
+ s.clauses = nil
+ if len(cc) == 0 {
+ return
+ }
+
+ sort.Slice(cc, func(i, j int) bool { return cc[i].hash < cc[j].hash })
+
+ // Combine adjacent cases with the same hash.
+ merged := cc[:1]
+ for _, c := range cc[1:] {
+ last := &merged[len(merged)-1]
+ if last.hash == c.hash {
+ last.body.AppendNodes(&c.body)
+ } else {
+ merged = append(merged, c)
+ }
+ }
+ cc = merged
+
+ binarySearch(len(cc), &s.done,
+ func(i int) *Node {
+ return nod(OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
+ },
+ func(i int, nif *Node) {
+ // TODO(mdempsky): Omit hash equality check if
+ // there's only one type.
+ c := cc[i]
+ nif.Left = nod(OEQ, s.hashname, nodintconst(int64(c.hash)))
+ nif.Nbody.AppendNodes(&c.body)
+ },
+ )
+}
+
+// binarySearch constructs a binary search tree for handling n cases,
+// and appends it to out. It's used for efficiently implementing
+// switch statements.
+//
+// less(i) should return a boolean expression. If it evaluates true,
+// then cases before i will be tested; otherwise, cases i and later.
+//
+// base(i, nif) should setup nif (an OIF node) to test case i. In
+// particular, it should set nif.Left and nif.Nbody.
+func binarySearch(n int, out *Nodes, less func(i int) *Node, base func(i int, nif *Node)) {
+ const binarySearchMin = 4 // minimum number of cases for binary search
+
+ var do func(lo, hi int, out *Nodes)
+ do = func(lo, hi int, out *Nodes) {
+ n := hi - lo
+ if n < binarySearchMin {
+ for i := lo; i < hi; i++ {
+ nif := nod(OIF, nil, nil)
+ base(i, nif)
+ lineno = lineno.WithNotStmt()
+ nif.Left = typecheck(nif.Left, ctxExpr)
+ nif.Left = defaultlit(nif.Left, nil)
+ out.Append(nif)
+ out = &nif.Rlist
+ }
+ return
+ }
+
+ half := lo + n/2
+ nif := nod(OIF, nil, nil)
+ nif.Left = less(half)
+ lineno = lineno.WithNotStmt()
+ nif.Left = typecheck(nif.Left, ctxExpr)
+ nif.Left = defaultlit(nif.Left, nil)
+ do(lo, half, &nif.Nbody)
+ do(half, hi, &nif.Rlist)
+ out.Append(nif)
+ }
+
+ do(0, n, out)
+}
diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go
new file mode 100644
index 0000000..7b4a315
--- /dev/null
+++ b/src/cmd/compile/internal/gc/syntax.go
@@ -0,0 +1,1196 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// “Abstract” syntax representation.
+
+package gc
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "sort"
+)
+
+// A Node is a single node in the syntax tree.
+// Actually the syntax tree is a syntax DAG, because there is only one
+// node with Op=ONAME for a given instance of a variable x.
+// The same is true for Op=OTYPE and Op=OLITERAL. See Node.mayBeShared.
+type Node struct {
+ // Tree structure.
+ // Generic recursive walks should follow these fields.
+ Left *Node
+ Right *Node
+ Ninit Nodes
+ Nbody Nodes
+ List Nodes
+ Rlist Nodes
+
+ // most nodes
+ Type *types.Type
+ Orig *Node // original form, for printing, and tracking copies of ONAMEs
+
+ // func
+ Func *Func
+
+ // ONAME, OTYPE, OPACK, OLABEL, some OLITERAL
+ Name *Name
+
+ Sym *types.Sym // various
+ E interface{} // Opt or Val, see methods below
+
+ // Various. Usually an offset into a struct. For example:
+ // - ONAME nodes that refer to local variables use it to identify their stack frame position.
+ // - ODOT, ODOTPTR, and ORESULT use it to indicate offset relative to their base address.
+ // - OSTRUCTKEY uses it to store the named field's offset.
+ // - Named OLITERALs use it to store their ambient iota value.
+ // - OINLMARK stores an index into the inlTree data structure.
+ // - OCLOSURE uses it to store ambient iota value, if any.
+ // Possibly still more uses. If you find any, document them.
+ Xoffset int64
+
+ Pos src.XPos
+
+ flags bitset32
+
+ Esc uint16 // EscXXX
+
+ Op Op
+ aux uint8
+}
+
+func (n *Node) ResetAux() {
+ n.aux = 0
+}
+
+func (n *Node) SubOp() Op {
+ switch n.Op {
+ case OASOP, ONAME:
+ default:
+ Fatalf("unexpected op: %v", n.Op)
+ }
+ return Op(n.aux)
+}
+
+func (n *Node) SetSubOp(op Op) {
+ switch n.Op {
+ case OASOP, ONAME:
+ default:
+ Fatalf("unexpected op: %v", n.Op)
+ }
+ n.aux = uint8(op)
+}
+
+func (n *Node) IndexMapLValue() bool {
+ if n.Op != OINDEXMAP {
+ Fatalf("unexpected op: %v", n.Op)
+ }
+ return n.aux != 0
+}
+
+func (n *Node) SetIndexMapLValue(b bool) {
+ if n.Op != OINDEXMAP {
+ Fatalf("unexpected op: %v", n.Op)
+ }
+ if b {
+ n.aux = 1
+ } else {
+ n.aux = 0
+ }
+}
+
+func (n *Node) TChanDir() types.ChanDir {
+ if n.Op != OTCHAN {
+ Fatalf("unexpected op: %v", n.Op)
+ }
+ return types.ChanDir(n.aux)
+}
+
+func (n *Node) SetTChanDir(dir types.ChanDir) {
+ if n.Op != OTCHAN {
+ Fatalf("unexpected op: %v", n.Op)
+ }
+ n.aux = uint8(dir)
+}
+
+func (n *Node) IsSynthetic() bool {
+ name := n.Sym.Name
+ return name[0] == '.' || name[0] == '~'
+}
+
+// IsAutoTmp indicates if n was created by the compiler as a temporary,
+// based on the setting of the .AutoTemp flag in n's Name.
+func (n *Node) IsAutoTmp() bool {
+ if n == nil || n.Op != ONAME {
+ return false
+ }
+ return n.Name.AutoTemp()
+}
+
+const (
+ nodeClass, _ = iota, 1 << iota // PPARAM, PAUTO, PEXTERN, etc; three bits; first in the list because frequently accessed
+ _, _ // second nodeClass bit
+ _, _ // third nodeClass bit
+ nodeWalkdef, _ // tracks state during typecheckdef; 2 == loop detected; two bits
+ _, _ // second nodeWalkdef bit
+ nodeTypecheck, _ // tracks state during typechecking; 2 == loop detected; two bits
+ _, _ // second nodeTypecheck bit
+ nodeInitorder, _ // tracks state during init1; two bits
+ _, _ // second nodeInitorder bit
+ _, nodeHasBreak
+ _, nodeNoInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only
+ _, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP
+ _, nodeIsDDD // is the argument variadic
+ _, nodeDiag // already printed error about this
+ _, nodeColas // OAS resulting from :=
+ _, nodeNonNil // guaranteed to be non-nil
+ _, nodeTransient // storage can be reused immediately after this statement
+ _, nodeBounded // bounds check unnecessary
+ _, nodeHasCall // expression contains a function call
+ _, nodeLikely // if statement condition likely
+ _, nodeHasVal // node.E contains a Val
+ _, nodeHasOpt // node.E contains an Opt
+ _, nodeEmbedded // ODCLFIELD embedded type
+)
+
+func (n *Node) Class() Class { return Class(n.flags.get3(nodeClass)) }
+func (n *Node) Walkdef() uint8 { return n.flags.get2(nodeWalkdef) }
+func (n *Node) Typecheck() uint8 { return n.flags.get2(nodeTypecheck) }
+func (n *Node) Initorder() uint8 { return n.flags.get2(nodeInitorder) }
+
+func (n *Node) HasBreak() bool { return n.flags&nodeHasBreak != 0 }
+func (n *Node) NoInline() bool { return n.flags&nodeNoInline != 0 }
+func (n *Node) Implicit() bool { return n.flags&nodeImplicit != 0 }
+func (n *Node) IsDDD() bool { return n.flags&nodeIsDDD != 0 }
+func (n *Node) Diag() bool { return n.flags&nodeDiag != 0 }
+func (n *Node) Colas() bool { return n.flags&nodeColas != 0 }
+func (n *Node) NonNil() bool { return n.flags&nodeNonNil != 0 }
+func (n *Node) Transient() bool { return n.flags&nodeTransient != 0 }
+func (n *Node) Bounded() bool { return n.flags&nodeBounded != 0 }
+func (n *Node) HasCall() bool { return n.flags&nodeHasCall != 0 }
+func (n *Node) Likely() bool { return n.flags&nodeLikely != 0 }
+func (n *Node) HasVal() bool { return n.flags&nodeHasVal != 0 }
+func (n *Node) HasOpt() bool { return n.flags&nodeHasOpt != 0 }
+func (n *Node) Embedded() bool { return n.flags&nodeEmbedded != 0 }
+
+func (n *Node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) }
+func (n *Node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) }
+func (n *Node) SetTypecheck(b uint8) { n.flags.set2(nodeTypecheck, b) }
+func (n *Node) SetInitorder(b uint8) { n.flags.set2(nodeInitorder, b) }
+
+func (n *Node) SetHasBreak(b bool) { n.flags.set(nodeHasBreak, b) }
+func (n *Node) SetNoInline(b bool) { n.flags.set(nodeNoInline, b) }
+func (n *Node) SetImplicit(b bool) { n.flags.set(nodeImplicit, b) }
+func (n *Node) SetIsDDD(b bool) { n.flags.set(nodeIsDDD, b) }
+func (n *Node) SetDiag(b bool) { n.flags.set(nodeDiag, b) }
+func (n *Node) SetColas(b bool) { n.flags.set(nodeColas, b) }
+func (n *Node) SetTransient(b bool) { n.flags.set(nodeTransient, b) }
+func (n *Node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) }
+func (n *Node) SetLikely(b bool) { n.flags.set(nodeLikely, b) }
+func (n *Node) SetHasVal(b bool) { n.flags.set(nodeHasVal, b) }
+func (n *Node) SetHasOpt(b bool) { n.flags.set(nodeHasOpt, b) }
+func (n *Node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) }
+
+// MarkNonNil marks a pointer n as being guaranteed non-nil,
+// on all code paths, at all times.
+// During conversion to SSA, non-nil pointers won't have nil checks
+// inserted before dereferencing. See state.exprPtr.
+func (n *Node) MarkNonNil() {
+ if !n.Type.IsPtr() && !n.Type.IsUnsafePtr() {
+ Fatalf("MarkNonNil(%v), type %v", n, n.Type)
+ }
+ n.flags.set(nodeNonNil, true)
+}
+
+// SetBounded indicates whether operation n does not need safety checks.
+// When n is an index or slice operation, n does not need bounds checks.
+// When n is a dereferencing operation, n does not need nil checks.
+// When n is a makeslice+copy operation, n does not need length and cap checks.
+func (n *Node) SetBounded(b bool) {
+ switch n.Op {
+ case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
+ // No bounds checks needed.
+ case ODOTPTR, ODEREF:
+ // No nil check needed.
+ case OMAKESLICECOPY:
+ // No length and cap checks needed
+ // since new slice and copied over slice data have same length.
+ default:
+ Fatalf("SetBounded(%v)", n)
+ }
+ n.flags.set(nodeBounded, b)
+}
+
+// MarkReadonly indicates that n is an ONAME with readonly contents.
+func (n *Node) MarkReadonly() {
+ if n.Op != ONAME {
+ Fatalf("Node.MarkReadonly %v", n.Op)
+ }
+ n.Name.SetReadonly(true)
+ // Mark the linksym as readonly immediately
+ // so that the SSA backend can use this information.
+ // It will be overridden later during dumpglobls.
+ n.Sym.Linksym().Type = objabi.SRODATA
+}
+
+// Val returns the Val for the node.
+func (n *Node) Val() Val {
+ if !n.HasVal() {
+ return Val{}
+ }
+ return Val{n.E}
+}
+
+// SetVal sets the Val for the node, which must not have been used with SetOpt.
+func (n *Node) SetVal(v Val) {
+ if n.HasOpt() {
+ Debug.h = 1
+ Dump("have Opt", n)
+ Fatalf("have Opt")
+ }
+ n.SetHasVal(true)
+ n.E = v.U
+}
+
+// Opt returns the optimizer data for the node.
+func (n *Node) Opt() interface{} {
+ if !n.HasOpt() {
+ return nil
+ }
+ return n.E
+}
+
+// SetOpt sets the optimizer data for the node, which must not have been used with SetVal.
+// SetOpt(nil) is ignored for Vals to simplify call sites that are clearing Opts.
+func (n *Node) SetOpt(x interface{}) {
+ if x == nil && n.HasVal() {
+ return
+ }
+ if n.HasVal() {
+ Debug.h = 1
+ Dump("have Val", n)
+ Fatalf("have Val")
+ }
+ n.SetHasOpt(true)
+ n.E = x
+}
+
+func (n *Node) Iota() int64 {
+ return n.Xoffset
+}
+
+func (n *Node) SetIota(x int64) {
+ n.Xoffset = x
+}
+
+// mayBeShared reports whether n may occur in multiple places in the AST.
+// Extra care must be taken when mutating such a node.
+func (n *Node) mayBeShared() bool {
+ switch n.Op {
+ case ONAME, OLITERAL, OTYPE:
+ return true
+ }
+ return false
+}
+
+// isMethodExpression reports whether n represents a method expression T.M.
+func (n *Node) isMethodExpression() bool {
+ return n.Op == ONAME && n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME
+}
+
+// funcname returns the name (without the package) of the function n.
+func (n *Node) funcname() string {
+ if n == nil || n.Func == nil || n.Func.Nname == nil {
+ return "<nil>"
+ }
+ return n.Func.Nname.Sym.Name
+}
+
+// pkgFuncName returns the name of the function referenced by n, with package prepended.
+// This differs from the compiler's internal convention where local functions lack a package
+// because the ultimate consumer of this is a human looking at an IDE; package is only empty
+// if the compilation package is actually the empty string.
+func (n *Node) pkgFuncName() string {
+ var s *types.Sym
+ if n == nil {
+ return "<nil>"
+ }
+ if n.Op == ONAME {
+ s = n.Sym
+ } else {
+ if n.Func == nil || n.Func.Nname == nil {
+ return "<nil>"
+ }
+ s = n.Func.Nname.Sym
+ }
+ pkg := s.Pkg
+
+ p := myimportpath
+ if pkg != nil && pkg.Path != "" {
+ p = pkg.Path
+ }
+ if p == "" {
+ return s.Name
+ }
+ return p + "." + s.Name
+}
+
+// The compiler needs *Node to be assignable to cmd/compile/internal/ssa.Sym.
+func (n *Node) CanBeAnSSASym() {
+}
+
+// Name holds Node fields used only by named nodes (ONAME, OTYPE, OPACK, OLABEL, some OLITERAL).
+type Name struct {
+ Pack *Node // real package for import . names
+ Pkg *types.Pkg // pkg for OPACK nodes
+ // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
+ // For a closure var, the ONAME node of the outer captured variable
+ Defn *Node
+ // The ODCLFUNC node (for a static function/method or a closure) in which
+ // local variable or param is declared.
+ Curfn *Node
+ Param *Param // additional fields for ONAME, OTYPE
+ Decldepth int32 // declaration loop depth, increased for every loop or label
+ // Unique number for ONAME nodes within a function. Function outputs
+ // (results) are numbered starting at one, followed by function inputs
+ // (parameters), and then local variables. Vargen is used to distinguish
+ // local variables/params with the same name.
+ Vargen int32
+ flags bitset16
+}
+
+const (
+ nameCaptured = 1 << iota // is the variable captured by a closure
+ nameReadonly
+ nameByval // is the variable captured by value or by reference
+ nameNeedzero // if it contains pointers, needs to be zeroed on function entry
+ nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap)
+ nameUsed // for variable declared and not used error
+ nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original at n.Name.Defn
+ nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy
+ nameAssigned // is the variable ever assigned to
+ nameAddrtaken // address taken, even if not moved to heap
+ nameInlFormal // PAUTO created by inliner, derived from callee formal
+ nameInlLocal // PAUTO created by inliner, derived from callee local
+ nameOpenDeferSlot // if temporary var storing info for open-coded defers
+ nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section
+)
+
+func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 }
+func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 }
+func (n *Name) Byval() bool { return n.flags&nameByval != 0 }
+func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 }
+func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 }
+func (n *Name) Used() bool { return n.flags&nameUsed != 0 }
+func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 }
+func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 }
+func (n *Name) Assigned() bool { return n.flags&nameAssigned != 0 }
+func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 }
+func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 }
+func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 }
+func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 }
+func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 }
+
+func (n *Name) SetCaptured(b bool) { n.flags.set(nameCaptured, b) }
+func (n *Name) SetReadonly(b bool) { n.flags.set(nameReadonly, b) }
+func (n *Name) SetByval(b bool) { n.flags.set(nameByval, b) }
+func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) }
+func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) }
+func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) }
+func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) }
+func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) }
+func (n *Name) SetAssigned(b bool) { n.flags.set(nameAssigned, b) }
+func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) }
+func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) }
+func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) }
+func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) }
+func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) }
+
+type Param struct {
+ Ntype *Node
+ Heapaddr *Node // temp holding heap address of param
+
+ // ONAME PAUTOHEAP
+ Stackcopy *Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
+
+ // ONAME closure linkage
+ // Consider:
+ //
+ // func f() {
+ // x := 1 // x1
+ // func() {
+ // use(x) // x2
+ // func() {
+ // use(x) // x3
+ // --- parser is here ---
+ // }()
+ // }()
+ // }
+ //
+ // There is an original declaration of x and then a chain of mentions of x
+ // leading into the current function. Each time x is mentioned in a new closure,
+ // we create a variable representing x for use in that specific closure,
+ // since the way you get to x is different in each closure.
+ //
+ // Let's number the specific variables as shown in the code:
+ // x1 is the original x, x2 is when mentioned in the closure,
+ // and x3 is when mentioned in the closure in the closure.
+ //
+ // We keep these linked (assume N > 1):
+ //
+ // - x1.Defn = original declaration statement for x (like most variables)
+ // - x1.Innermost = current innermost closure x (in this case x3), or nil for none
+ // - x1.IsClosureVar() = false
+ //
+ // - xN.Defn = x1, N > 1
+ // - xN.IsClosureVar() = true, N > 1
+ // - x2.Outer = nil
+ // - xN.Outer = x(N-1), N > 2
+ //
+ //
+ // When we look up x in the symbol table, we always get x1.
+ // Then we can use x1.Innermost (if not nil) to get the x
+ // for the innermost known closure function,
+ // but the first reference in a closure will find either no x1.Innermost
+ // or an x1.Innermost with .Funcdepth < Funcdepth.
+ // In that case, a new xN must be created, linked in with:
+ //
+ // xN.Defn = x1
+ // xN.Outer = x1.Innermost
+ // x1.Innermost = xN
+ //
+ // When we finish the function, we'll process its closure variables
+ // and find xN and pop it off the list using:
+ //
+ // x1 := xN.Defn
+ // x1.Innermost = xN.Outer
+ //
+ // We leave x1.Innermost set so that we can still get to the original
+ // variable quickly. Not shown here, but once we're
+ // done parsing a function and no longer need xN.Outer for the
+ // lexical x reference links as described above, funcLit
+ // recomputes xN.Outer as the semantic x reference link tree,
+ // even filling in x in intermediate closures that might not
+ // have mentioned it along the way to inner closures that did.
+ // See funcLit for details.
+ //
+ // During the eventual compilation, then, for closure variables we have:
+ //
+ // xN.Defn = original variable
+ // xN.Outer = variable captured in next outward scope
+ // to make closure where xN appears
+ //
+ // Because of the sharding of pieces of the node, x.Defn means x.Name.Defn
+ // and x.Innermost/Outer means x.Name.Param.Innermost/Outer.
+ Innermost *Node
+ Outer *Node
+
+ // OTYPE & ONAME //go:embed info,
+ // sharing storage to reduce gc.Param size.
+ // Extra is nil, or else *Extra is a *paramType or an *embedFileList.
+ Extra *interface{}
+}
+
+type paramType struct {
+ flag PragmaFlag
+ alias bool
+}
+
+type irEmbed struct {
+ Pos src.XPos
+ Patterns []string
+}
+
+type embedList []irEmbed
+
+// Pragma returns the PragmaFlag for p, which must be for an OTYPE.
+func (p *Param) Pragma() PragmaFlag {
+ if p.Extra == nil {
+ return 0
+ }
+ return (*p.Extra).(*paramType).flag
+}
+
+// SetPragma sets the PragmaFlag for p, which must be for an OTYPE.
+func (p *Param) SetPragma(flag PragmaFlag) {
+ if p.Extra == nil {
+ if flag == 0 {
+ return
+ }
+ p.Extra = new(interface{})
+ *p.Extra = &paramType{flag: flag}
+ return
+ }
+ (*p.Extra).(*paramType).flag = flag
+}
+
+// Alias reports whether p, which must be for an OTYPE, is a type alias.
+func (p *Param) Alias() bool {
+ if p.Extra == nil {
+ return false
+ }
+ t, ok := (*p.Extra).(*paramType)
+ if !ok {
+ return false
+ }
+ return t.alias
+}
+
+// SetAlias sets whether p, which must be for an OTYPE, is a type alias.
+func (p *Param) SetAlias(alias bool) {
+ if p.Extra == nil {
+ if !alias {
+ return
+ }
+ p.Extra = new(interface{})
+ *p.Extra = &paramType{alias: alias}
+ return
+ }
+ (*p.Extra).(*paramType).alias = alias
+}
+
+// EmbedList returns the list of embedded files for p,
+// which must be for an ONAME var.
+func (p *Param) EmbedList() []irEmbed {
+ if p.Extra == nil {
+ return nil
+ }
+ return *(*p.Extra).(*embedList)
+}
+
+// SetEmbedList sets the list of embedded files for p,
+// which must be for an ONAME var.
+func (p *Param) SetEmbedList(list []irEmbed) {
+ if p.Extra == nil {
+ if len(list) == 0 {
+ return
+ }
+ f := embedList(list)
+ p.Extra = new(interface{})
+ *p.Extra = &f
+ return
+ }
+ *(*p.Extra).(*embedList) = list
+}
+
+// Functions
+//
+// A simple function declaration is represented as an ODCLFUNC node f
+// and an ONAME node n. They're linked to one another through
+// f.Func.Nname == n and n.Name.Defn == f. When functions are
+// referenced by name in an expression, the function's ONAME node is
+// used directly.
+//
+// Function names have n.Class() == PFUNC. This distinguishes them
+// from variables of function type.
+//
+// Confusingly, n.Func and f.Func both exist, but commonly point to
+// different Funcs. (Exception: an OCALLPART's Func does point to its
+// ODCLFUNC's Func.)
+//
+// A method declaration is represented like functions, except n.Sym
+// will be the qualified method name (e.g., "T.m") and
+// f.Func.Shortname is the bare method name (e.g., "m").
+//
+// Method expressions are represented as ONAME/PFUNC nodes like
+// function names, but their Left and Right fields still point to the
+// type and method, respectively. They can be distinguished from
+// normal functions with isMethodExpression. Also, unlike function
+// name nodes, method expression nodes exist for each method
+// expression. The declaration ONAME can be accessed with
+// x.Type.Nname(), where x is the method expression ONAME node.
+//
+// Method values are represented by ODOTMETH/ODOTINTER when called
+// immediately, and OCALLPART otherwise. They are like method
+// expressions, except that for ODOTMETH/ODOTINTER the method name is
+// stored in Sym instead of Right.
+//
+// Closures are represented by OCLOSURE node c. They link back and
+// forth with the ODCLFUNC via Func.Closure; that is, c.Func.Closure
+// == f and f.Func.Closure == c.
+//
+// Function bodies are stored in f.Nbody, and inline function bodies
+// are stored in n.Func.Inl. Pragmas are stored in f.Func.Pragma.
+//
+// Imported functions skip the ODCLFUNC, so n.Name.Defn is nil. They
+// also use Dcl instead of Inldcl.
+
+// Func holds Node fields used only with function-like nodes.
+type Func struct {
+ Shortname *types.Sym
+ // Extra entry code for the function. For example, allocate and initialize
+ // memory for escaping parameters. However, just for OCLOSURE, Enter is a
+ // list of ONAME nodes of captured variables
+ Enter Nodes
+ Exit Nodes
+ // ONAME nodes for closure params, each should have closurevar set
+ Cvars Nodes
+ // ONAME nodes for all params/locals for this func/closure, does NOT
+ // include closurevars until transformclosure runs.
+ Dcl []*Node
+
+ // Parents records the parent scope of each scope within a
+ // function. The root scope (0) has no parent, so the i'th
+ // scope's parent is stored at Parents[i-1].
+ Parents []ScopeID
+
+ // Marks records scope boundary changes.
+ Marks []Mark
+
+ // Closgen tracks how many closures have been generated within
+ // this function. Used by closurename for creating unique
+ // function names.
+ Closgen int
+
+ FieldTrack map[*types.Sym]struct{}
+ DebugInfo *ssa.FuncDebug
+ Ntype *Node // signature
+ Top int // top context (ctxCallee, etc)
+ Closure *Node // OCLOSURE <-> ODCLFUNC (see header comment above)
+ Nname *Node // The ONAME node associated with an ODCLFUNC (both have same Type)
+ lsym *obj.LSym
+
+ Inl *Inline
+
+ Label int32 // largest auto-generated label in this function
+
+ Endlineno src.XPos
+ WBPos src.XPos // position of first write barrier; see SetWBPos
+
+ Pragma PragmaFlag // go:xxx function annotations
+
+ flags bitset16
+ numDefers int // number of defer calls in the function
+ numReturns int // number of explicit returns in the function
+
+ // nwbrCalls records the LSyms of functions called by this
+ // function for go:nowritebarrierrec analysis. Only filled in
+ // if nowritebarrierrecCheck != nil.
+ nwbrCalls *[]nowritebarrierrecCallSym
+}
+
+// An Inline holds fields used for function bodies that can be inlined.
+type Inline struct {
+ Cost int32 // heuristic cost of inlining this function
+
+ // Copies of Func.Dcl and Nbody for use during inlining.
+ Dcl []*Node
+ Body []*Node
+}
+
+// A Mark represents a scope boundary.
+type Mark struct {
+ // Pos is the position of the token that marks the scope
+ // change.
+ Pos src.XPos
+
+ // Scope identifies the innermost scope to the right of Pos.
+ Scope ScopeID
+}
+
+// A ScopeID represents a lexical scope within a function.
+type ScopeID int32
+
+const (
+ funcDupok = 1 << iota // duplicate definitions ok
+ funcWrapper // is method wrapper
+ funcNeedctxt // function uses context register (has closure variables)
+ funcReflectMethod // function calls reflect.Type.Method or MethodByName
+ // true if closure inside a function; false if a simple function or a
+ // closure in a global variable initialization
+ funcIsHiddenClosure
+ funcHasDefer // contains a defer statement
+ funcNilCheckDisabled // disable nil checks when compiling this function
+ funcInlinabilityChecked // inliner has already determined whether the function is inlinable
+ funcExportInline // include inline body in export data
+ funcInstrumentBody // add race/msan instrumentation during SSA construction
+ funcOpenCodedDeferDisallowed // can't do open-coded defers
+)
+
+func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 }
+func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 }
+func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 }
+func (f *Func) ReflectMethod() bool { return f.flags&funcReflectMethod != 0 }
+func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 }
+func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 }
+func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 }
+func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinabilityChecked != 0 }
+func (f *Func) ExportInline() bool { return f.flags&funcExportInline != 0 }
+func (f *Func) InstrumentBody() bool { return f.flags&funcInstrumentBody != 0 }
+func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 }
+
+func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) }
+func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) }
+func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) }
+func (f *Func) SetReflectMethod(b bool) { f.flags.set(funcReflectMethod, b) }
+func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) }
+func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) }
+func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) }
+func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilityChecked, b) }
+func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInline, b) }
+func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) }
+func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) }
+
+func (f *Func) setWBPos(pos src.XPos) {
+ if Debug_wb != 0 {
+ Warnl(pos, "write barrier")
+ }
+ if !f.WBPos.IsKnown() {
+ f.WBPos = pos
+ }
+}
+
+//go:generate stringer -type=Op -trimprefix=O
+
+type Op uint8
+
+// Node ops.
+const (
+ OXXX Op = iota
+
+ // names
+ ONAME // var or func name
+ // Unnamed arg or return value: f(int, string) (int, error) { etc }
+ // Also used for a qualified package identifier that hasn't been resolved yet.
+ ONONAME
+ OTYPE // type name
+ OPACK // import
+ OLITERAL // literal
+
+ // expressions
+ OADD // Left + Right
+ OSUB // Left - Right
+ OOR // Left | Right
+ OXOR // Left ^ Right
+ OADDSTR // +{List} (string addition, list elements are strings)
+ OADDR // &Left
+ OANDAND // Left && Right
+ OAPPEND // append(List); after walk, Left may contain elem type descriptor
+ OBYTES2STR // Type(Left) (Type is string, Left is a []byte)
+ OBYTES2STRTMP // Type(Left) (Type is string, Left is a []byte, ephemeral)
+ ORUNES2STR // Type(Left) (Type is string, Left is a []rune)
+ OSTR2BYTES // Type(Left) (Type is []byte, Left is a string)
+ OSTR2BYTESTMP // Type(Left) (Type is []byte, Left is a string, ephemeral)
+ OSTR2RUNES // Type(Left) (Type is []rune, Left is a string)
+ // Left = Right or (if Colas=true) Left := Right
+ // If Colas, then Ninit includes a DCL node for Left.
+ OAS
+ // List = Rlist (x, y, z = a, b, c) or (if Colas=true) List := Rlist
+ // If Colas, then Ninit includes DCL nodes for List
+ OAS2
+ OAS2DOTTYPE // List = Right (x, ok = I.(int))
+ OAS2FUNC // List = Right (x, y = f())
+ OAS2MAPR // List = Right (x, ok = m["foo"])
+ OAS2RECV // List = Right (x, ok = <-c)
+ OASOP // Left Etype= Right (x += y)
+ OCALL // Left(List) (function call, method call or type conversion)
+
+ // OCALLFUNC, OCALLMETH, and OCALLINTER have the same structure.
+ // Prior to walk, they are: Left(List), where List is all regular arguments.
+ // After walk, List is a series of assignments to temporaries,
+ // and Rlist is an updated set of arguments.
+ // Nbody is all OVARLIVE nodes that are attached to OCALLxxx.
+ // TODO(josharian/khr): Use Ninit instead of List for the assignments to temporaries. See CL 114797.
+ OCALLFUNC // Left(List/Rlist) (function call f(args))
+ OCALLMETH // Left(List/Rlist) (direct method call x.Method(args))
+ OCALLINTER // Left(List/Rlist) (interface method call x.Method(args))
+ OCALLPART // Left.Right (method expression x.Method, not called)
+ OCAP // cap(Left)
+ OCLOSE // close(Left)
+ OCLOSURE // func Type { Func.Closure.Nbody } (func literal)
+ OCOMPLIT // Right{List} (composite literal, not yet lowered to specific form)
+ OMAPLIT // Type{List} (composite literal, Type is map)
+ OSTRUCTLIT // Type{List} (composite literal, Type is struct)
+ OARRAYLIT // Type{List} (composite literal, Type is array)
+ OSLICELIT // Type{List} (composite literal, Type is slice) Right.Int64() = slice length.
+ OPTRLIT // &Left (left is composite literal)
+ OCONV // Type(Left) (type conversion)
+ OCONVIFACE // Type(Left) (type conversion, to interface)
+ OCONVNOP // Type(Left) (type conversion, no effect)
+ OCOPY // copy(Left, Right)
+ ODCL // var Left (declares Left of type Left.Type)
+
+ // Used during parsing but don't last.
+ ODCLFUNC // func f() or func (r) f()
+ ODCLFIELD // struct field, interface field, or func/method argument/return value.
+ ODCLCONST // const pi = 3.14
+ ODCLTYPE // type Int int or type Int = int
+
+ ODELETE // delete(List)
+ ODOT // Left.Sym (Left is of struct type)
+ ODOTPTR // Left.Sym (Left is of pointer to struct type)
+ ODOTMETH // Left.Sym (Left is non-interface, Right is method name)
+ ODOTINTER // Left.Sym (Left is interface, Right is method name)
+ OXDOT // Left.Sym (before rewrite to one of the preceding)
+ ODOTTYPE // Left.Right or Left.Type (.Right during parsing, .Type once resolved); after walk, .Right contains address of interface type descriptor and .Right.Right contains address of concrete type descriptor
+ ODOTTYPE2 // Left.Right or Left.Type (.Right during parsing, .Type once resolved; on rhs of OAS2DOTTYPE); after walk, .Right contains address of interface type descriptor
+ OEQ // Left == Right
+ ONE // Left != Right
+ OLT // Left < Right
+ OLE // Left <= Right
+ OGE // Left >= Right
+ OGT // Left > Right
+ ODEREF // *Left
+ OINDEX // Left[Right] (index of array or slice)
+ OINDEXMAP // Left[Right] (index of map)
+ OKEY // Left:Right (key:value in struct/array/map literal)
+ OSTRUCTKEY // Sym:Left (key:value in struct literal, after type checking)
+ OLEN // len(Left)
+ OMAKE // make(List) (before type checking converts to one of the following)
+ OMAKECHAN // make(Type, Left) (type is chan)
+ OMAKEMAP // make(Type, Left) (type is map)
+ OMAKESLICE // make(Type, Left, Right) (type is slice)
+ OMAKESLICECOPY // makeslicecopy(Type, Left, Right) (type is slice; Left is length and Right is the copied from slice)
+ // OMAKESLICECOPY is created by the order pass and corresponds to:
+ // s = make(Type, Left); copy(s, Right)
+ //
+ // Bounded can be set on the node when Left == len(Right) is known at compile time.
+ //
+ // This node is created so the walk pass can optimize this pattern which would
+ // otherwise be hard to detect after the order pass.
+ OMUL // Left * Right
+ ODIV // Left / Right
+ OMOD // Left % Right
+ OLSH // Left << Right
+ ORSH // Left >> Right
+ OAND // Left & Right
+ OANDNOT // Left &^ Right
+ ONEW // new(Left); corresponds to calls to new in source code
+ ONEWOBJ // runtime.newobject(n.Type); introduced by walk; Left is type descriptor
+ ONOT // !Left
+ OBITNOT // ^Left
+ OPLUS // +Left
+ ONEG // -Left
+ OOROR // Left || Right
+ OPANIC // panic(Left)
+ OPRINT // print(List)
+ OPRINTN // println(List)
+ OPAREN // (Left)
+ OSEND // Left <- Right
+ OSLICE // Left[List[0] : List[1]] (Left is untypechecked or slice)
+ OSLICEARR // Left[List[0] : List[1]] (Left is array)
+ OSLICESTR // Left[List[0] : List[1]] (Left is string)
+ OSLICE3 // Left[List[0] : List[1] : List[2]] (Left is untypedchecked or slice)
+ OSLICE3ARR // Left[List[0] : List[1] : List[2]] (Left is array)
+ OSLICEHEADER // sliceheader{Left, List[0], List[1]} (Left is unsafe.Pointer, List[0] is length, List[1] is capacity)
+ ORECOVER // recover()
+ ORECV // <-Left
+ ORUNESTR // Type(Left) (Type is string, Left is rune)
+ OSELRECV // Left = <-Right.Left: (appears as .Left of OCASE; Right.Op == ORECV)
+ OSELRECV2 // List = <-Right.Left: (appears as .Left of OCASE; count(List) == 2, Right.Op == ORECV)
+ OIOTA // iota
+ OREAL // real(Left)
+ OIMAG // imag(Left)
+ OCOMPLEX // complex(Left, Right) or complex(List[0]) where List[0] is a 2-result function call
+ OALIGNOF // unsafe.Alignof(Left)
+ OOFFSETOF // unsafe.Offsetof(Left)
+ OSIZEOF // unsafe.Sizeof(Left)
+
+ // statements
+ OBLOCK // { List } (block of code)
+ OBREAK // break [Sym]
+ // OCASE: case List: Nbody (List==nil means default)
+ // For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL
+ // for nil), and, if a type-switch variable is specified, Rlist is an
+ // ONAME for the version of the type-switch variable with the specified
+ // type.
+ OCASE
+ OCONTINUE // continue [Sym]
+ ODEFER // defer Left (Left must be call)
+ OEMPTY // no-op (empty statement)
+ OFALL // fallthrough
+ OFOR // for Ninit; Left; Right { Nbody }
+ // OFORUNTIL is like OFOR, but the test (Left) is applied after the body:
+ // Ninit
+ // top: { Nbody } // Execute the body at least once
+ // cont: Right
+ // if Left { // And then test the loop condition
+ // List // Before looping to top, execute List
+ // goto top
+ // }
+ // OFORUNTIL is created by walk. There's no way to write this in Go code.
+ OFORUNTIL
+ OGOTO // goto Sym
+ OIF // if Ninit; Left { Nbody } else { Rlist }
+ OLABEL // Sym:
+ OGO // go Left (Left must be call)
+ ORANGE // for List = range Right { Nbody }
+ ORETURN // return List
+ OSELECT // select { List } (List is list of OCASE)
+ OSWITCH // switch Ninit; Left { List } (List is a list of OCASE)
+ // OTYPESW: Left := Right.(type) (appears as .Left of OSWITCH)
+ // Left is nil if there is no type-switch variable
+ OTYPESW
+
+ // types
+ OTCHAN // chan int
+ OTMAP // map[string]int
+ OTSTRUCT // struct{}
+ OTINTER // interface{}
+ // OTFUNC: func() - Left is receiver field, List is list of param fields, Rlist is
+ // list of result fields.
+ OTFUNC
+ OTARRAY // []int, [8]int, [N]int or [...]int
+
+ // misc
+ ODDD // func f(args ...int) or f(l...) or var a = [...]int{0, 1, 2}.
+ OINLCALL // intermediary representation of an inlined call.
+ OEFACE // itable and data words of an empty-interface value.
+ OITAB // itable word of an interface value.
+ OIDATA // data word of an interface value in Left
+ OSPTR // base pointer of a slice or string.
+ OCLOSUREVAR // variable reference at beginning of closure function
+ OCFUNC // reference to c function pointer (not go func value)
+ OCHECKNIL // emit code to ensure pointer/interface not nil
+ OVARDEF // variable is about to be fully initialized
+ OVARKILL // variable is dead
+ OVARLIVE // variable is alive
+ ORESULT // result of a function call; Xoffset is stack offset
+ OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
+
+ // arch-specific opcodes
+ ORETJMP // return to other function
+ OGETG // runtime.getg() (read g pointer)
+
+ OEND
+)
+
+// Nodes is a pointer to a slice of *Node.
+// For fields that are not used in most nodes, this is used instead of
+// a slice to save space.
+type Nodes struct{ slice *[]*Node }
+
+// asNodes returns a slice of *Node as a Nodes value.
+func asNodes(s []*Node) Nodes {
+ return Nodes{&s}
+}
+
+// Slice returns the entries in Nodes as a slice.
+// Changes to the slice entries (as in s[i] = n) will be reflected in
+// the Nodes.
+func (n Nodes) Slice() []*Node {
+ if n.slice == nil {
+ return nil
+ }
+ return *n.slice
+}
+
+// Len returns the number of entries in Nodes.
+func (n Nodes) Len() int {
+ if n.slice == nil {
+ return 0
+ }
+ return len(*n.slice)
+}
+
+// Index returns the i'th element of Nodes.
+// It panics if n does not have at least i+1 elements.
+func (n Nodes) Index(i int) *Node {
+ return (*n.slice)[i]
+}
+
+// First returns the first element of Nodes (same as n.Index(0)).
+// It panics if n has no elements.
+func (n Nodes) First() *Node {
+ return (*n.slice)[0]
+}
+
+// Second returns the second element of Nodes (same as n.Index(1)).
+// It panics if n has fewer than two elements.
+func (n Nodes) Second() *Node {
+ return (*n.slice)[1]
+}
+
+// Set sets n to a slice.
+// This takes ownership of the slice.
+func (n *Nodes) Set(s []*Node) {
+ if len(s) == 0 {
+ n.slice = nil
+ } else {
+ // Copy s and take address of t rather than s to avoid
+ // allocation in the case where len(s) == 0 (which is
+ // over 3x more common, dynamically, for make.bash).
+ t := s
+ n.slice = &t
+ }
+}
+
+// Set1 sets n to a slice containing a single node.
+func (n *Nodes) Set1(n1 *Node) {
+ n.slice = &[]*Node{n1}
+}
+
+// Set2 sets n to a slice containing two nodes.
+func (n *Nodes) Set2(n1, n2 *Node) {
+ n.slice = &[]*Node{n1, n2}
+}
+
+// Set3 sets n to a slice containing three nodes.
+func (n *Nodes) Set3(n1, n2, n3 *Node) {
+ n.slice = &[]*Node{n1, n2, n3}
+}
+
+// MoveNodes sets n to the contents of n2, then clears n2.
+func (n *Nodes) MoveNodes(n2 *Nodes) {
+ n.slice = n2.slice
+ n2.slice = nil
+}
+
+// SetIndex sets the i'th element of Nodes to node.
+// It panics if n does not have at least i+1 elements.
+func (n Nodes) SetIndex(i int, node *Node) {
+ (*n.slice)[i] = node
+}
+
+// SetFirst sets the first element of Nodes to node.
+// It panics if n does not have at least one elements.
+func (n Nodes) SetFirst(node *Node) {
+ (*n.slice)[0] = node
+}
+
+// SetSecond sets the second element of Nodes to node.
+// It panics if n does not have at least two elements.
+func (n Nodes) SetSecond(node *Node) {
+ (*n.slice)[1] = node
+}
+
+// Addr returns the address of the i'th element of Nodes.
+// It panics if n does not have at least i+1 elements.
+func (n Nodes) Addr(i int) **Node {
+ return &(*n.slice)[i]
+}
+
+// Append appends entries to Nodes.
+func (n *Nodes) Append(a ...*Node) {
+ if len(a) == 0 {
+ return
+ }
+ if n.slice == nil {
+ s := make([]*Node, len(a))
+ copy(s, a)
+ n.slice = &s
+ return
+ }
+ *n.slice = append(*n.slice, a...)
+}
+
+// Prepend prepends entries to Nodes.
+// If a slice is passed in, this will take ownership of it.
+func (n *Nodes) Prepend(a ...*Node) {
+ if len(a) == 0 {
+ return
+ }
+ if n.slice == nil {
+ n.slice = &a
+ } else {
+ *n.slice = append(a, *n.slice...)
+ }
+}
+
+// AppendNodes appends the contents of *n2 to n, then clears n2.
+func (n *Nodes) AppendNodes(n2 *Nodes) {
+ switch {
+ case n2.slice == nil:
+ case n.slice == nil:
+ n.slice = n2.slice
+ default:
+ *n.slice = append(*n.slice, *n2.slice...)
+ }
+ n2.slice = nil
+}
+
+// inspect invokes f on each node in an AST in depth-first order.
+// If f(n) returns false, inspect skips visiting n's children.
+func inspect(n *Node, f func(*Node) bool) {
+ if n == nil || !f(n) {
+ return
+ }
+ inspectList(n.Ninit, f)
+ inspect(n.Left, f)
+ inspect(n.Right, f)
+ inspectList(n.List, f)
+ inspectList(n.Nbody, f)
+ inspectList(n.Rlist, f)
+}
+
+func inspectList(l Nodes, f func(*Node) bool) {
+ for _, n := range l.Slice() {
+ inspect(n, f)
+ }
+}
+
+// nodeQueue is a FIFO queue of *Node. The zero value of nodeQueue is
+// a ready-to-use empty queue.
+type nodeQueue struct {
+ ring []*Node
+ head, tail int
+}
+
+// empty reports whether q contains no Nodes.
+func (q *nodeQueue) empty() bool {
+ return q.head == q.tail
+}
+
+// pushRight appends n to the right of the queue.
+func (q *nodeQueue) pushRight(n *Node) {
+ if len(q.ring) == 0 {
+ q.ring = make([]*Node, 16)
+ } else if q.head+len(q.ring) == q.tail {
+ // Grow the ring.
+ nring := make([]*Node, len(q.ring)*2)
+ // Copy the old elements.
+ part := q.ring[q.head%len(q.ring):]
+ if q.tail-q.head <= len(part) {
+ part = part[:q.tail-q.head]
+ copy(nring, part)
+ } else {
+ pos := copy(nring, part)
+ copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
+ }
+ q.ring, q.head, q.tail = nring, 0, q.tail-q.head
+ }
+
+ q.ring[q.tail%len(q.ring)] = n
+ q.tail++
+}
+
+// popLeft pops a node from the left of the queue. It panics if q is
+// empty.
+func (q *nodeQueue) popLeft() *Node {
+ if q.empty() {
+ panic("dequeue empty")
+ }
+ n := q.ring[q.head%len(q.ring)]
+ q.head++
+ return n
+}
+
+// NodeSet is a set of Nodes.
+type NodeSet map[*Node]struct{}
+
+// Has reports whether s contains n.
+func (s NodeSet) Has(n *Node) bool {
+ _, isPresent := s[n]
+ return isPresent
+}
+
+// Add adds n to s.
+func (s *NodeSet) Add(n *Node) {
+ if *s == nil {
+ *s = make(map[*Node]struct{})
+ }
+ (*s)[n] = struct{}{}
+}
+
+// Sorted returns s sorted according to less.
+func (s NodeSet) Sorted(less func(*Node, *Node) bool) []*Node {
+ var res []*Node
+ for n := range s {
+ res = append(res, n)
+ }
+ sort.Slice(res, func(i, j int) bool { return less(res[i], res[j]) })
+ return res
+}
diff --git a/src/cmd/compile/internal/gc/testdata/addressed_test.go b/src/cmd/compile/internal/gc/testdata/addressed_test.go
new file mode 100644
index 0000000..cdabf97
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/addressed_test.go
@@ -0,0 +1,210 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "testing"
+)
+
+var output string
+
+func mypanic(t *testing.T, s string) {
+ t.Fatalf(s + "\n" + output)
+
+}
+
+func assertEqual(t *testing.T, x, y int) {
+ if x != y {
+ mypanic(t, fmt.Sprintf("assertEqual failed got %d, want %d", x, y))
+ }
+}
+
+func TestAddressed(t *testing.T) {
+ x := f1_ssa(2, 3)
+ output += fmt.Sprintln("*x is", *x)
+ output += fmt.Sprintln("Gratuitously use some stack")
+ output += fmt.Sprintln("*x is", *x)
+ assertEqual(t, *x, 9)
+
+ w := f3a_ssa(6)
+ output += fmt.Sprintln("*w is", *w)
+ output += fmt.Sprintln("Gratuitously use some stack")
+ output += fmt.Sprintln("*w is", *w)
+ assertEqual(t, *w, 6)
+
+ y := f3b_ssa(12)
+ output += fmt.Sprintln("*y.(*int) is", *y.(*int))
+ output += fmt.Sprintln("Gratuitously use some stack")
+ output += fmt.Sprintln("*y.(*int) is", *y.(*int))
+ assertEqual(t, *y.(*int), 12)
+
+ z := f3c_ssa(8)
+ output += fmt.Sprintln("*z.(*int) is", *z.(*int))
+ output += fmt.Sprintln("Gratuitously use some stack")
+ output += fmt.Sprintln("*z.(*int) is", *z.(*int))
+ assertEqual(t, *z.(*int), 8)
+
+ args(t)
+ test_autos(t)
+}
+
+//go:noinline
+func f1_ssa(x, y int) *int {
+ x = x*y + y
+ return &x
+}
+
+//go:noinline
+func f3a_ssa(x int) *int {
+ return &x
+}
+
+//go:noinline
+func f3b_ssa(x int) interface{} { // ./foo.go:15: internal error: f3b_ssa ~r1 (type interface {}) recorded as live on entry
+ return &x
+}
+
+//go:noinline
+func f3c_ssa(y int) interface{} {
+ x := y
+ return &x
+}
+
+type V struct {
+ p *V
+ w, x int64
+}
+
+func args(t *testing.T) {
+ v := V{p: nil, w: 1, x: 1}
+ a := V{p: &v, w: 2, x: 2}
+ b := V{p: &v, w: 0, x: 0}
+ i := v.args_ssa(a, b)
+ output += fmt.Sprintln("i=", i)
+ assertEqual(t, int(i), 2)
+}
+
+//go:noinline
+func (v V) args_ssa(a, b V) int64 {
+ if v.w == 0 {
+ return v.x
+ }
+ if v.w == 1 {
+ return a.x
+ }
+ if v.w == 2 {
+ return b.x
+ }
+ b.p.p = &a // v.p in caller = &a
+
+ return -1
+}
+
+func test_autos(t *testing.T) {
+ test(t, 11)
+ test(t, 12)
+ test(t, 13)
+ test(t, 21)
+ test(t, 22)
+ test(t, 23)
+ test(t, 31)
+ test(t, 32)
+}
+
+func test(t *testing.T, which int64) {
+ output += fmt.Sprintln("test", which)
+ v1 := V{w: 30, x: 3, p: nil}
+ v2, v3 := v1.autos_ssa(which, 10, 1, 20, 2)
+ if which != v2.val() {
+ output += fmt.Sprintln("Expected which=", which, "got v2.val()=", v2.val())
+ mypanic(t, "Failure of expected V value")
+ }
+ if v2.p.val() != v3.val() {
+ output += fmt.Sprintln("Expected v2.p.val()=", v2.p.val(), "got v3.val()=", v3.val())
+ mypanic(t, "Failure of expected V.p value")
+ }
+ if which != v3.p.p.p.p.p.p.p.val() {
+ output += fmt.Sprintln("Expected which=", which, "got v3.p.p.p.p.p.p.p.val()=", v3.p.p.p.p.p.p.p.val())
+ mypanic(t, "Failure of expected V.p value")
+ }
+}
+
+func (v V) val() int64 {
+ return v.w + v.x
+}
+
+// autos_ssa uses contents of v and parameters w1, w2, x1, x2
+// to initialize a bunch of locals, all of which have their
+// address taken to force heap allocation, and then based on
+// the value of which a pair of those locals are copied in
+// various ways to the two results y, and z, which are also
+// addressed. Which is expected to be one of 11-13, 21-23, 31, 32,
+// and y.val() should be equal to which and y.p.val() should
+// be equal to z.val(). Also, x(.p)**8 == x; that is, the
+// autos are all linked into a ring.
+//go:noinline
+func (v V) autos_ssa(which, w1, x1, w2, x2 int64) (y, z V) {
+ fill_ssa(v.w, v.x, &v, v.p) // gratuitous no-op to force addressing
+ var a, b, c, d, e, f, g, h V
+ fill_ssa(w1, x1, &a, &b)
+ fill_ssa(w1, x2, &b, &c)
+ fill_ssa(w1, v.x, &c, &d)
+ fill_ssa(w2, x1, &d, &e)
+ fill_ssa(w2, x2, &e, &f)
+ fill_ssa(w2, v.x, &f, &g)
+ fill_ssa(v.w, x1, &g, &h)
+ fill_ssa(v.w, x2, &h, &a)
+ switch which {
+ case 11:
+ y = a
+ z.getsI(&b)
+ case 12:
+ y.gets(&b)
+ z = c
+ case 13:
+ y.gets(&c)
+ z = d
+ case 21:
+ y.getsI(&d)
+ z.gets(&e)
+ case 22:
+ y = e
+ z = f
+ case 23:
+ y.gets(&f)
+ z.getsI(&g)
+ case 31:
+ y = g
+ z.gets(&h)
+ case 32:
+ y.getsI(&h)
+ z = a
+ default:
+
+ panic("")
+ }
+ return
+}
+
+// gets is an address-mentioning way of implementing
+// structure assignment.
+//go:noinline
+func (to *V) gets(from *V) {
+ *to = *from
+}
+
+// gets is an address-and-interface-mentioning way of
+// implementing structure assignment.
+//go:noinline
+func (to *V) getsI(from interface{}) {
+ *to = *from.(*V)
+}
+
+// fill_ssa initializes r with V{w:w, x:x, p:p}
+//go:noinline
+func fill_ssa(w, x int64, r, p *V) {
+ *r = V{w: w, x: x, p: p}
+}
diff --git a/src/cmd/compile/internal/gc/testdata/append_test.go b/src/cmd/compile/internal/gc/testdata/append_test.go
new file mode 100644
index 0000000..6663ce7
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/append_test.go
@@ -0,0 +1,61 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// append_ssa.go tests append operations.
+package main
+
+import "testing"
+
+//go:noinline
+func appendOne_ssa(a []int, x int) []int {
+ return append(a, x)
+}
+
+//go:noinline
+func appendThree_ssa(a []int, x, y, z int) []int {
+ return append(a, x, y, z)
+}
+
+func eqBytes(a, b []int) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func expect(t *testing.T, got, want []int) {
+ if eqBytes(got, want) {
+ return
+ }
+ t.Errorf("expected %v, got %v\n", want, got)
+}
+
+func testAppend(t *testing.T) {
+ var store [7]int
+ a := store[:0]
+
+ a = appendOne_ssa(a, 1)
+ expect(t, a, []int{1})
+ a = appendThree_ssa(a, 2, 3, 4)
+ expect(t, a, []int{1, 2, 3, 4})
+ a = appendThree_ssa(a, 5, 6, 7)
+ expect(t, a, []int{1, 2, 3, 4, 5, 6, 7})
+ if &a[0] != &store[0] {
+ t.Errorf("unnecessary grow")
+ }
+ a = appendOne_ssa(a, 8)
+ expect(t, a, []int{1, 2, 3, 4, 5, 6, 7, 8})
+ if &a[0] == &store[0] {
+ t.Errorf("didn't grow")
+ }
+}
+
+func TestAppend(t *testing.T) {
+ testAppend(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/arithBoundary_test.go b/src/cmd/compile/internal/gc/testdata/arithBoundary_test.go
new file mode 100644
index 0000000..777b7cd
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/arithBoundary_test.go
@@ -0,0 +1,694 @@
+// Code generated by gen/arithBoundaryGen.go. DO NOT EDIT.
+
+package main
+
+import "testing"
+
+type utd64 struct {
+ a, b uint64
+ add, sub, mul, div, mod uint64
+}
+type itd64 struct {
+ a, b int64
+ add, sub, mul, div, mod int64
+}
+type utd32 struct {
+ a, b uint32
+ add, sub, mul, div, mod uint32
+}
+type itd32 struct {
+ a, b int32
+ add, sub, mul, div, mod int32
+}
+type utd16 struct {
+ a, b uint16
+ add, sub, mul, div, mod uint16
+}
+type itd16 struct {
+ a, b int16
+ add, sub, mul, div, mod int16
+}
+type utd8 struct {
+ a, b uint8
+ add, sub, mul, div, mod uint8
+}
+type itd8 struct {
+ a, b int8
+ add, sub, mul, div, mod int8
+}
+
+//go:noinline
+func add_uint64_ssa(a, b uint64) uint64 {
+ return a + b
+}
+
+//go:noinline
+func sub_uint64_ssa(a, b uint64) uint64 {
+ return a - b
+}
+
+//go:noinline
+func div_uint64_ssa(a, b uint64) uint64 {
+ return a / b
+}
+
+//go:noinline
+func mod_uint64_ssa(a, b uint64) uint64 {
+ return a % b
+}
+
+//go:noinline
+func mul_uint64_ssa(a, b uint64) uint64 {
+ return a * b
+}
+
+//go:noinline
+func add_int64_ssa(a, b int64) int64 {
+ return a + b
+}
+
+//go:noinline
+func sub_int64_ssa(a, b int64) int64 {
+ return a - b
+}
+
+//go:noinline
+func div_int64_ssa(a, b int64) int64 {
+ return a / b
+}
+
+//go:noinline
+func mod_int64_ssa(a, b int64) int64 {
+ return a % b
+}
+
+//go:noinline
+func mul_int64_ssa(a, b int64) int64 {
+ return a * b
+}
+
+//go:noinline
+func add_uint32_ssa(a, b uint32) uint32 {
+ return a + b
+}
+
+//go:noinline
+func sub_uint32_ssa(a, b uint32) uint32 {
+ return a - b
+}
+
+//go:noinline
+func div_uint32_ssa(a, b uint32) uint32 {
+ return a / b
+}
+
+//go:noinline
+func mod_uint32_ssa(a, b uint32) uint32 {
+ return a % b
+}
+
+//go:noinline
+func mul_uint32_ssa(a, b uint32) uint32 {
+ return a * b
+}
+
+//go:noinline
+func add_int32_ssa(a, b int32) int32 {
+ return a + b
+}
+
+//go:noinline
+func sub_int32_ssa(a, b int32) int32 {
+ return a - b
+}
+
+//go:noinline
+func div_int32_ssa(a, b int32) int32 {
+ return a / b
+}
+
+//go:noinline
+func mod_int32_ssa(a, b int32) int32 {
+ return a % b
+}
+
+//go:noinline
+func mul_int32_ssa(a, b int32) int32 {
+ return a * b
+}
+
+//go:noinline
+func add_uint16_ssa(a, b uint16) uint16 {
+ return a + b
+}
+
+//go:noinline
+func sub_uint16_ssa(a, b uint16) uint16 {
+ return a - b
+}
+
+//go:noinline
+func div_uint16_ssa(a, b uint16) uint16 {
+ return a / b
+}
+
+//go:noinline
+func mod_uint16_ssa(a, b uint16) uint16 {
+ return a % b
+}
+
+//go:noinline
+func mul_uint16_ssa(a, b uint16) uint16 {
+ return a * b
+}
+
+//go:noinline
+func add_int16_ssa(a, b int16) int16 {
+ return a + b
+}
+
+//go:noinline
+func sub_int16_ssa(a, b int16) int16 {
+ return a - b
+}
+
+//go:noinline
+func div_int16_ssa(a, b int16) int16 {
+ return a / b
+}
+
+//go:noinline
+func mod_int16_ssa(a, b int16) int16 {
+ return a % b
+}
+
+//go:noinline
+func mul_int16_ssa(a, b int16) int16 {
+ return a * b
+}
+
+//go:noinline
+func add_uint8_ssa(a, b uint8) uint8 {
+ return a + b
+}
+
+//go:noinline
+func sub_uint8_ssa(a, b uint8) uint8 {
+ return a - b
+}
+
+//go:noinline
+func div_uint8_ssa(a, b uint8) uint8 {
+ return a / b
+}
+
+//go:noinline
+func mod_uint8_ssa(a, b uint8) uint8 {
+ return a % b
+}
+
+//go:noinline
+func mul_uint8_ssa(a, b uint8) uint8 {
+ return a * b
+}
+
+//go:noinline
+func add_int8_ssa(a, b int8) int8 {
+ return a + b
+}
+
+//go:noinline
+func sub_int8_ssa(a, b int8) int8 {
+ return a - b
+}
+
+//go:noinline
+func div_int8_ssa(a, b int8) int8 {
+ return a / b
+}
+
+//go:noinline
+func mod_int8_ssa(a, b int8) int8 {
+ return a % b
+}
+
+//go:noinline
+func mul_int8_ssa(a, b int8) int8 {
+ return a * b
+}
+
+var uint64_data []utd64 = []utd64{utd64{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ utd64{a: 0, b: 1, add: 1, sub: 18446744073709551615, mul: 0, div: 0, mod: 0},
+ utd64{a: 0, b: 4294967296, add: 4294967296, sub: 18446744069414584320, mul: 0, div: 0, mod: 0},
+ utd64{a: 0, b: 18446744073709551615, add: 18446744073709551615, sub: 1, mul: 0, div: 0, mod: 0},
+ utd64{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ utd64{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ utd64{a: 1, b: 4294967296, add: 4294967297, sub: 18446744069414584321, mul: 4294967296, div: 0, mod: 1},
+ utd64{a: 1, b: 18446744073709551615, add: 0, sub: 2, mul: 18446744073709551615, div: 0, mod: 1},
+ utd64{a: 4294967296, b: 0, add: 4294967296, sub: 4294967296, mul: 0},
+ utd64{a: 4294967296, b: 1, add: 4294967297, sub: 4294967295, mul: 4294967296, div: 4294967296, mod: 0},
+ utd64{a: 4294967296, b: 4294967296, add: 8589934592, sub: 0, mul: 0, div: 1, mod: 0},
+ utd64{a: 4294967296, b: 18446744073709551615, add: 4294967295, sub: 4294967297, mul: 18446744069414584320, div: 0, mod: 4294967296},
+ utd64{a: 18446744073709551615, b: 0, add: 18446744073709551615, sub: 18446744073709551615, mul: 0},
+ utd64{a: 18446744073709551615, b: 1, add: 0, sub: 18446744073709551614, mul: 18446744073709551615, div: 18446744073709551615, mod: 0},
+ utd64{a: 18446744073709551615, b: 4294967296, add: 4294967295, sub: 18446744069414584319, mul: 18446744069414584320, div: 4294967295, mod: 4294967295},
+ utd64{a: 18446744073709551615, b: 18446744073709551615, add: 18446744073709551614, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var int64_data []itd64 = []itd64{itd64{a: -9223372036854775808, b: -9223372036854775808, add: 0, sub: 0, mul: 0, div: 1, mod: 0},
+ itd64{a: -9223372036854775808, b: -9223372036854775807, add: 1, sub: -1, mul: -9223372036854775808, div: 1, mod: -1},
+ itd64{a: -9223372036854775808, b: -4294967296, add: 9223372032559808512, sub: -9223372032559808512, mul: 0, div: 2147483648, mod: 0},
+ itd64{a: -9223372036854775808, b: -1, add: 9223372036854775807, sub: -9223372036854775807, mul: -9223372036854775808, div: -9223372036854775808, mod: 0},
+ itd64{a: -9223372036854775808, b: 0, add: -9223372036854775808, sub: -9223372036854775808, mul: 0},
+ itd64{a: -9223372036854775808, b: 1, add: -9223372036854775807, sub: 9223372036854775807, mul: -9223372036854775808, div: -9223372036854775808, mod: 0},
+ itd64{a: -9223372036854775808, b: 4294967296, add: -9223372032559808512, sub: 9223372032559808512, mul: 0, div: -2147483648, mod: 0},
+ itd64{a: -9223372036854775808, b: 9223372036854775806, add: -2, sub: 2, mul: 0, div: -1, mod: -2},
+ itd64{a: -9223372036854775808, b: 9223372036854775807, add: -1, sub: 1, mul: -9223372036854775808, div: -1, mod: -1},
+ itd64{a: -9223372036854775807, b: -9223372036854775808, add: 1, sub: 1, mul: -9223372036854775808, div: 0, mod: -9223372036854775807},
+ itd64{a: -9223372036854775807, b: -9223372036854775807, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd64{a: -9223372036854775807, b: -4294967296, add: 9223372032559808513, sub: -9223372032559808511, mul: -4294967296, div: 2147483647, mod: -4294967295},
+ itd64{a: -9223372036854775807, b: -1, add: -9223372036854775808, sub: -9223372036854775806, mul: 9223372036854775807, div: 9223372036854775807, mod: 0},
+ itd64{a: -9223372036854775807, b: 0, add: -9223372036854775807, sub: -9223372036854775807, mul: 0},
+ itd64{a: -9223372036854775807, b: 1, add: -9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: -9223372036854775807, mod: 0},
+ itd64{a: -9223372036854775807, b: 4294967296, add: -9223372032559808511, sub: 9223372032559808513, mul: 4294967296, div: -2147483647, mod: -4294967295},
+ itd64{a: -9223372036854775807, b: 9223372036854775806, add: -1, sub: 3, mul: 9223372036854775806, div: -1, mod: -1},
+ itd64{a: -9223372036854775807, b: 9223372036854775807, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd64{a: -4294967296, b: -9223372036854775808, add: 9223372032559808512, sub: 9223372032559808512, mul: 0, div: 0, mod: -4294967296},
+ itd64{a: -4294967296, b: -9223372036854775807, add: 9223372032559808513, sub: 9223372032559808511, mul: -4294967296, div: 0, mod: -4294967296},
+ itd64{a: -4294967296, b: -4294967296, add: -8589934592, sub: 0, mul: 0, div: 1, mod: 0},
+ itd64{a: -4294967296, b: -1, add: -4294967297, sub: -4294967295, mul: 4294967296, div: 4294967296, mod: 0},
+ itd64{a: -4294967296, b: 0, add: -4294967296, sub: -4294967296, mul: 0},
+ itd64{a: -4294967296, b: 1, add: -4294967295, sub: -4294967297, mul: -4294967296, div: -4294967296, mod: 0},
+ itd64{a: -4294967296, b: 4294967296, add: 0, sub: -8589934592, mul: 0, div: -1, mod: 0},
+ itd64{a: -4294967296, b: 9223372036854775806, add: 9223372032559808510, sub: 9223372032559808514, mul: 8589934592, div: 0, mod: -4294967296},
+ itd64{a: -4294967296, b: 9223372036854775807, add: 9223372032559808511, sub: 9223372032559808513, mul: 4294967296, div: 0, mod: -4294967296},
+ itd64{a: -1, b: -9223372036854775808, add: 9223372036854775807, sub: 9223372036854775807, mul: -9223372036854775808, div: 0, mod: -1},
+ itd64{a: -1, b: -9223372036854775807, add: -9223372036854775808, sub: 9223372036854775806, mul: 9223372036854775807, div: 0, mod: -1},
+ itd64{a: -1, b: -4294967296, add: -4294967297, sub: 4294967295, mul: 4294967296, div: 0, mod: -1},
+ itd64{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd64{a: -1, b: 0, add: -1, sub: -1, mul: 0},
+ itd64{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd64{a: -1, b: 4294967296, add: 4294967295, sub: -4294967297, mul: -4294967296, div: 0, mod: -1},
+ itd64{a: -1, b: 9223372036854775806, add: 9223372036854775805, sub: -9223372036854775807, mul: -9223372036854775806, div: 0, mod: -1},
+ itd64{a: -1, b: 9223372036854775807, add: 9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: 0, mod: -1},
+ itd64{a: 0, b: -9223372036854775808, add: -9223372036854775808, sub: -9223372036854775808, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: -9223372036854775807, add: -9223372036854775807, sub: 9223372036854775807, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: -4294967296, add: -4294967296, sub: 4294967296, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ itd64{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: 4294967296, add: 4294967296, sub: -4294967296, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: 9223372036854775806, add: 9223372036854775806, sub: -9223372036854775806, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: 9223372036854775807, add: 9223372036854775807, sub: -9223372036854775807, mul: 0, div: 0, mod: 0},
+ itd64{a: 1, b: -9223372036854775808, add: -9223372036854775807, sub: -9223372036854775807, mul: -9223372036854775808, div: 0, mod: 1},
+ itd64{a: 1, b: -9223372036854775807, add: -9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: 0, mod: 1},
+ itd64{a: 1, b: -4294967296, add: -4294967295, sub: 4294967297, mul: -4294967296, div: 0, mod: 1},
+ itd64{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd64{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ itd64{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd64{a: 1, b: 4294967296, add: 4294967297, sub: -4294967295, mul: 4294967296, div: 0, mod: 1},
+ itd64{a: 1, b: 9223372036854775806, add: 9223372036854775807, sub: -9223372036854775805, mul: 9223372036854775806, div: 0, mod: 1},
+ itd64{a: 1, b: 9223372036854775807, add: -9223372036854775808, sub: -9223372036854775806, mul: 9223372036854775807, div: 0, mod: 1},
+ itd64{a: 4294967296, b: -9223372036854775808, add: -9223372032559808512, sub: -9223372032559808512, mul: 0, div: 0, mod: 4294967296},
+ itd64{a: 4294967296, b: -9223372036854775807, add: -9223372032559808511, sub: -9223372032559808513, mul: 4294967296, div: 0, mod: 4294967296},
+ itd64{a: 4294967296, b: -4294967296, add: 0, sub: 8589934592, mul: 0, div: -1, mod: 0},
+ itd64{a: 4294967296, b: -1, add: 4294967295, sub: 4294967297, mul: -4294967296, div: -4294967296, mod: 0},
+ itd64{a: 4294967296, b: 0, add: 4294967296, sub: 4294967296, mul: 0},
+ itd64{a: 4294967296, b: 1, add: 4294967297, sub: 4294967295, mul: 4294967296, div: 4294967296, mod: 0},
+ itd64{a: 4294967296, b: 4294967296, add: 8589934592, sub: 0, mul: 0, div: 1, mod: 0},
+ itd64{a: 4294967296, b: 9223372036854775806, add: -9223372032559808514, sub: -9223372032559808510, mul: -8589934592, div: 0, mod: 4294967296},
+ itd64{a: 4294967296, b: 9223372036854775807, add: -9223372032559808513, sub: -9223372032559808511, mul: -4294967296, div: 0, mod: 4294967296},
+ itd64{a: 9223372036854775806, b: -9223372036854775808, add: -2, sub: -2, mul: 0, div: 0, mod: 9223372036854775806},
+ itd64{a: 9223372036854775806, b: -9223372036854775807, add: -1, sub: -3, mul: 9223372036854775806, div: 0, mod: 9223372036854775806},
+ itd64{a: 9223372036854775806, b: -4294967296, add: 9223372032559808510, sub: -9223372032559808514, mul: 8589934592, div: -2147483647, mod: 4294967294},
+ itd64{a: 9223372036854775806, b: -1, add: 9223372036854775805, sub: 9223372036854775807, mul: -9223372036854775806, div: -9223372036854775806, mod: 0},
+ itd64{a: 9223372036854775806, b: 0, add: 9223372036854775806, sub: 9223372036854775806, mul: 0},
+ itd64{a: 9223372036854775806, b: 1, add: 9223372036854775807, sub: 9223372036854775805, mul: 9223372036854775806, div: 9223372036854775806, mod: 0},
+ itd64{a: 9223372036854775806, b: 4294967296, add: -9223372032559808514, sub: 9223372032559808510, mul: -8589934592, div: 2147483647, mod: 4294967294},
+ itd64{a: 9223372036854775806, b: 9223372036854775806, add: -4, sub: 0, mul: 4, div: 1, mod: 0},
+ itd64{a: 9223372036854775806, b: 9223372036854775807, add: -3, sub: -1, mul: -9223372036854775806, div: 0, mod: 9223372036854775806},
+ itd64{a: 9223372036854775807, b: -9223372036854775808, add: -1, sub: -1, mul: -9223372036854775808, div: 0, mod: 9223372036854775807},
+ itd64{a: 9223372036854775807, b: -9223372036854775807, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd64{a: 9223372036854775807, b: -4294967296, add: 9223372032559808511, sub: -9223372032559808513, mul: 4294967296, div: -2147483647, mod: 4294967295},
+ itd64{a: 9223372036854775807, b: -1, add: 9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: -9223372036854775807, mod: 0},
+ itd64{a: 9223372036854775807, b: 0, add: 9223372036854775807, sub: 9223372036854775807, mul: 0},
+ itd64{a: 9223372036854775807, b: 1, add: -9223372036854775808, sub: 9223372036854775806, mul: 9223372036854775807, div: 9223372036854775807, mod: 0},
+ itd64{a: 9223372036854775807, b: 4294967296, add: -9223372032559808513, sub: 9223372032559808511, mul: -4294967296, div: 2147483647, mod: 4294967295},
+ itd64{a: 9223372036854775807, b: 9223372036854775806, add: -3, sub: 1, mul: -9223372036854775806, div: 1, mod: 1},
+ itd64{a: 9223372036854775807, b: 9223372036854775807, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var uint32_data []utd32 = []utd32{utd32{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ utd32{a: 0, b: 1, add: 1, sub: 4294967295, mul: 0, div: 0, mod: 0},
+ utd32{a: 0, b: 4294967295, add: 4294967295, sub: 1, mul: 0, div: 0, mod: 0},
+ utd32{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ utd32{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ utd32{a: 1, b: 4294967295, add: 0, sub: 2, mul: 4294967295, div: 0, mod: 1},
+ utd32{a: 4294967295, b: 0, add: 4294967295, sub: 4294967295, mul: 0},
+ utd32{a: 4294967295, b: 1, add: 0, sub: 4294967294, mul: 4294967295, div: 4294967295, mod: 0},
+ utd32{a: 4294967295, b: 4294967295, add: 4294967294, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var int32_data []itd32 = []itd32{itd32{a: -2147483648, b: -2147483648, add: 0, sub: 0, mul: 0, div: 1, mod: 0},
+ itd32{a: -2147483648, b: -2147483647, add: 1, sub: -1, mul: -2147483648, div: 1, mod: -1},
+ itd32{a: -2147483648, b: -1, add: 2147483647, sub: -2147483647, mul: -2147483648, div: -2147483648, mod: 0},
+ itd32{a: -2147483648, b: 0, add: -2147483648, sub: -2147483648, mul: 0},
+ itd32{a: -2147483648, b: 1, add: -2147483647, sub: 2147483647, mul: -2147483648, div: -2147483648, mod: 0},
+ itd32{a: -2147483648, b: 2147483647, add: -1, sub: 1, mul: -2147483648, div: -1, mod: -1},
+ itd32{a: -2147483647, b: -2147483648, add: 1, sub: 1, mul: -2147483648, div: 0, mod: -2147483647},
+ itd32{a: -2147483647, b: -2147483647, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd32{a: -2147483647, b: -1, add: -2147483648, sub: -2147483646, mul: 2147483647, div: 2147483647, mod: 0},
+ itd32{a: -2147483647, b: 0, add: -2147483647, sub: -2147483647, mul: 0},
+ itd32{a: -2147483647, b: 1, add: -2147483646, sub: -2147483648, mul: -2147483647, div: -2147483647, mod: 0},
+ itd32{a: -2147483647, b: 2147483647, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd32{a: -1, b: -2147483648, add: 2147483647, sub: 2147483647, mul: -2147483648, div: 0, mod: -1},
+ itd32{a: -1, b: -2147483647, add: -2147483648, sub: 2147483646, mul: 2147483647, div: 0, mod: -1},
+ itd32{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd32{a: -1, b: 0, add: -1, sub: -1, mul: 0},
+ itd32{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd32{a: -1, b: 2147483647, add: 2147483646, sub: -2147483648, mul: -2147483647, div: 0, mod: -1},
+ itd32{a: 0, b: -2147483648, add: -2147483648, sub: -2147483648, mul: 0, div: 0, mod: 0},
+ itd32{a: 0, b: -2147483647, add: -2147483647, sub: 2147483647, mul: 0, div: 0, mod: 0},
+ itd32{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0},
+ itd32{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ itd32{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0},
+ itd32{a: 0, b: 2147483647, add: 2147483647, sub: -2147483647, mul: 0, div: 0, mod: 0},
+ itd32{a: 1, b: -2147483648, add: -2147483647, sub: -2147483647, mul: -2147483648, div: 0, mod: 1},
+ itd32{a: 1, b: -2147483647, add: -2147483646, sub: -2147483648, mul: -2147483647, div: 0, mod: 1},
+ itd32{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd32{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ itd32{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd32{a: 1, b: 2147483647, add: -2147483648, sub: -2147483646, mul: 2147483647, div: 0, mod: 1},
+ itd32{a: 2147483647, b: -2147483648, add: -1, sub: -1, mul: -2147483648, div: 0, mod: 2147483647},
+ itd32{a: 2147483647, b: -2147483647, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd32{a: 2147483647, b: -1, add: 2147483646, sub: -2147483648, mul: -2147483647, div: -2147483647, mod: 0},
+ itd32{a: 2147483647, b: 0, add: 2147483647, sub: 2147483647, mul: 0},
+ itd32{a: 2147483647, b: 1, add: -2147483648, sub: 2147483646, mul: 2147483647, div: 2147483647, mod: 0},
+ itd32{a: 2147483647, b: 2147483647, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var uint16_data []utd16 = []utd16{utd16{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ utd16{a: 0, b: 1, add: 1, sub: 65535, mul: 0, div: 0, mod: 0},
+ utd16{a: 0, b: 65535, add: 65535, sub: 1, mul: 0, div: 0, mod: 0},
+ utd16{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ utd16{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ utd16{a: 1, b: 65535, add: 0, sub: 2, mul: 65535, div: 0, mod: 1},
+ utd16{a: 65535, b: 0, add: 65535, sub: 65535, mul: 0},
+ utd16{a: 65535, b: 1, add: 0, sub: 65534, mul: 65535, div: 65535, mod: 0},
+ utd16{a: 65535, b: 65535, add: 65534, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var int16_data []itd16 = []itd16{itd16{a: -32768, b: -32768, add: 0, sub: 0, mul: 0, div: 1, mod: 0},
+ itd16{a: -32768, b: -32767, add: 1, sub: -1, mul: -32768, div: 1, mod: -1},
+ itd16{a: -32768, b: -1, add: 32767, sub: -32767, mul: -32768, div: -32768, mod: 0},
+ itd16{a: -32768, b: 0, add: -32768, sub: -32768, mul: 0},
+ itd16{a: -32768, b: 1, add: -32767, sub: 32767, mul: -32768, div: -32768, mod: 0},
+ itd16{a: -32768, b: 32766, add: -2, sub: 2, mul: 0, div: -1, mod: -2},
+ itd16{a: -32768, b: 32767, add: -1, sub: 1, mul: -32768, div: -1, mod: -1},
+ itd16{a: -32767, b: -32768, add: 1, sub: 1, mul: -32768, div: 0, mod: -32767},
+ itd16{a: -32767, b: -32767, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd16{a: -32767, b: -1, add: -32768, sub: -32766, mul: 32767, div: 32767, mod: 0},
+ itd16{a: -32767, b: 0, add: -32767, sub: -32767, mul: 0},
+ itd16{a: -32767, b: 1, add: -32766, sub: -32768, mul: -32767, div: -32767, mod: 0},
+ itd16{a: -32767, b: 32766, add: -1, sub: 3, mul: 32766, div: -1, mod: -1},
+ itd16{a: -32767, b: 32767, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd16{a: -1, b: -32768, add: 32767, sub: 32767, mul: -32768, div: 0, mod: -1},
+ itd16{a: -1, b: -32767, add: -32768, sub: 32766, mul: 32767, div: 0, mod: -1},
+ itd16{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd16{a: -1, b: 0, add: -1, sub: -1, mul: 0},
+ itd16{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd16{a: -1, b: 32766, add: 32765, sub: -32767, mul: -32766, div: 0, mod: -1},
+ itd16{a: -1, b: 32767, add: 32766, sub: -32768, mul: -32767, div: 0, mod: -1},
+ itd16{a: 0, b: -32768, add: -32768, sub: -32768, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: -32767, add: -32767, sub: 32767, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ itd16{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: 32766, add: 32766, sub: -32766, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: 32767, add: 32767, sub: -32767, mul: 0, div: 0, mod: 0},
+ itd16{a: 1, b: -32768, add: -32767, sub: -32767, mul: -32768, div: 0, mod: 1},
+ itd16{a: 1, b: -32767, add: -32766, sub: -32768, mul: -32767, div: 0, mod: 1},
+ itd16{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd16{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ itd16{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd16{a: 1, b: 32766, add: 32767, sub: -32765, mul: 32766, div: 0, mod: 1},
+ itd16{a: 1, b: 32767, add: -32768, sub: -32766, mul: 32767, div: 0, mod: 1},
+ itd16{a: 32766, b: -32768, add: -2, sub: -2, mul: 0, div: 0, mod: 32766},
+ itd16{a: 32766, b: -32767, add: -1, sub: -3, mul: 32766, div: 0, mod: 32766},
+ itd16{a: 32766, b: -1, add: 32765, sub: 32767, mul: -32766, div: -32766, mod: 0},
+ itd16{a: 32766, b: 0, add: 32766, sub: 32766, mul: 0},
+ itd16{a: 32766, b: 1, add: 32767, sub: 32765, mul: 32766, div: 32766, mod: 0},
+ itd16{a: 32766, b: 32766, add: -4, sub: 0, mul: 4, div: 1, mod: 0},
+ itd16{a: 32766, b: 32767, add: -3, sub: -1, mul: -32766, div: 0, mod: 32766},
+ itd16{a: 32767, b: -32768, add: -1, sub: -1, mul: -32768, div: 0, mod: 32767},
+ itd16{a: 32767, b: -32767, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd16{a: 32767, b: -1, add: 32766, sub: -32768, mul: -32767, div: -32767, mod: 0},
+ itd16{a: 32767, b: 0, add: 32767, sub: 32767, mul: 0},
+ itd16{a: 32767, b: 1, add: -32768, sub: 32766, mul: 32767, div: 32767, mod: 0},
+ itd16{a: 32767, b: 32766, add: -3, sub: 1, mul: -32766, div: 1, mod: 1},
+ itd16{a: 32767, b: 32767, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var uint8_data []utd8 = []utd8{utd8{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ utd8{a: 0, b: 1, add: 1, sub: 255, mul: 0, div: 0, mod: 0},
+ utd8{a: 0, b: 255, add: 255, sub: 1, mul: 0, div: 0, mod: 0},
+ utd8{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ utd8{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ utd8{a: 1, b: 255, add: 0, sub: 2, mul: 255, div: 0, mod: 1},
+ utd8{a: 255, b: 0, add: 255, sub: 255, mul: 0},
+ utd8{a: 255, b: 1, add: 0, sub: 254, mul: 255, div: 255, mod: 0},
+ utd8{a: 255, b: 255, add: 254, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var int8_data []itd8 = []itd8{itd8{a: -128, b: -128, add: 0, sub: 0, mul: 0, div: 1, mod: 0},
+ itd8{a: -128, b: -127, add: 1, sub: -1, mul: -128, div: 1, mod: -1},
+ itd8{a: -128, b: -1, add: 127, sub: -127, mul: -128, div: -128, mod: 0},
+ itd8{a: -128, b: 0, add: -128, sub: -128, mul: 0},
+ itd8{a: -128, b: 1, add: -127, sub: 127, mul: -128, div: -128, mod: 0},
+ itd8{a: -128, b: 126, add: -2, sub: 2, mul: 0, div: -1, mod: -2},
+ itd8{a: -128, b: 127, add: -1, sub: 1, mul: -128, div: -1, mod: -1},
+ itd8{a: -127, b: -128, add: 1, sub: 1, mul: -128, div: 0, mod: -127},
+ itd8{a: -127, b: -127, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd8{a: -127, b: -1, add: -128, sub: -126, mul: 127, div: 127, mod: 0},
+ itd8{a: -127, b: 0, add: -127, sub: -127, mul: 0},
+ itd8{a: -127, b: 1, add: -126, sub: -128, mul: -127, div: -127, mod: 0},
+ itd8{a: -127, b: 126, add: -1, sub: 3, mul: 126, div: -1, mod: -1},
+ itd8{a: -127, b: 127, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd8{a: -1, b: -128, add: 127, sub: 127, mul: -128, div: 0, mod: -1},
+ itd8{a: -1, b: -127, add: -128, sub: 126, mul: 127, div: 0, mod: -1},
+ itd8{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd8{a: -1, b: 0, add: -1, sub: -1, mul: 0},
+ itd8{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd8{a: -1, b: 126, add: 125, sub: -127, mul: -126, div: 0, mod: -1},
+ itd8{a: -1, b: 127, add: 126, sub: -128, mul: -127, div: 0, mod: -1},
+ itd8{a: 0, b: -128, add: -128, sub: -128, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: -127, add: -127, sub: 127, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ itd8{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: 126, add: 126, sub: -126, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: 127, add: 127, sub: -127, mul: 0, div: 0, mod: 0},
+ itd8{a: 1, b: -128, add: -127, sub: -127, mul: -128, div: 0, mod: 1},
+ itd8{a: 1, b: -127, add: -126, sub: -128, mul: -127, div: 0, mod: 1},
+ itd8{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd8{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ itd8{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd8{a: 1, b: 126, add: 127, sub: -125, mul: 126, div: 0, mod: 1},
+ itd8{a: 1, b: 127, add: -128, sub: -126, mul: 127, div: 0, mod: 1},
+ itd8{a: 126, b: -128, add: -2, sub: -2, mul: 0, div: 0, mod: 126},
+ itd8{a: 126, b: -127, add: -1, sub: -3, mul: 126, div: 0, mod: 126},
+ itd8{a: 126, b: -1, add: 125, sub: 127, mul: -126, div: -126, mod: 0},
+ itd8{a: 126, b: 0, add: 126, sub: 126, mul: 0},
+ itd8{a: 126, b: 1, add: 127, sub: 125, mul: 126, div: 126, mod: 0},
+ itd8{a: 126, b: 126, add: -4, sub: 0, mul: 4, div: 1, mod: 0},
+ itd8{a: 126, b: 127, add: -3, sub: -1, mul: -126, div: 0, mod: 126},
+ itd8{a: 127, b: -128, add: -1, sub: -1, mul: -128, div: 0, mod: 127},
+ itd8{a: 127, b: -127, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd8{a: 127, b: -1, add: 126, sub: -128, mul: -127, div: -127, mod: 0},
+ itd8{a: 127, b: 0, add: 127, sub: 127, mul: 0},
+ itd8{a: 127, b: 1, add: -128, sub: 126, mul: 127, div: 127, mod: 0},
+ itd8{a: 127, b: 126, add: -3, sub: 1, mul: -126, div: 1, mod: 1},
+ itd8{a: 127, b: 127, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+}
+
+//TestArithmeticBoundary tests boundary results for arithmetic operations.
+func TestArithmeticBoundary(t *testing.T) {
+
+ for _, v := range uint64_data {
+ if got := add_uint64_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_uint64 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_uint64_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_uint64 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_uint64_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_uint64 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_uint64_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_uint64 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_uint64_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_uint64 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range int64_data {
+ if got := add_int64_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_int64 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_int64_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_int64 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_int64_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_int64 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_int64_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_int64 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_int64_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_int64 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range uint32_data {
+ if got := add_uint32_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_uint32 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_uint32_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_uint32 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_uint32_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_uint32 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_uint32_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_uint32 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_uint32_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_uint32 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range int32_data {
+ if got := add_int32_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_int32 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_int32_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_int32 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_int32_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_int32 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_int32_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_int32 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_int32_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_int32 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range uint16_data {
+ if got := add_uint16_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_uint16 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_uint16_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_uint16 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_uint16_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_uint16 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_uint16_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_uint16 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_uint16_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_uint16 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range int16_data {
+ if got := add_int16_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_int16 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_int16_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_int16 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_int16_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_int16 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_int16_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_int16 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_int16_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_int16 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range uint8_data {
+ if got := add_uint8_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_uint8 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_uint8_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_uint8 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_uint8_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_uint8 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_uint8_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_uint8 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_uint8_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_uint8 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range int8_data {
+ if got := add_int8_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_int8 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_int8_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_int8 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_int8_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_int8 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_int8_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_int8 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_int8_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_int8 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/arithConst_test.go b/src/cmd/compile/internal/gc/testdata/arithConst_test.go
new file mode 100644
index 0000000..9f5ac61
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/arithConst_test.go
@@ -0,0 +1,9570 @@
+// Code generated by gen/arithConstGen.go. DO NOT EDIT.
+
+package main
+
+import "testing"
+
+//go:noinline
+func add_uint64_0(a uint64) uint64 { return a + 0 }
+
+//go:noinline
+func add_0_uint64(a uint64) uint64 { return 0 + a }
+
+//go:noinline
+func add_uint64_1(a uint64) uint64 { return a + 1 }
+
+//go:noinline
+func add_1_uint64(a uint64) uint64 { return 1 + a }
+
+//go:noinline
+func add_uint64_4294967296(a uint64) uint64 { return a + 4294967296 }
+
+//go:noinline
+func add_4294967296_uint64(a uint64) uint64 { return 4294967296 + a }
+
+//go:noinline
+func add_uint64_9223372036854775808(a uint64) uint64 { return a + 9223372036854775808 }
+
+//go:noinline
+func add_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 + a }
+
+//go:noinline
+func add_uint64_18446744073709551615(a uint64) uint64 { return a + 18446744073709551615 }
+
+//go:noinline
+func add_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 + a }
+
+//go:noinline
+func sub_uint64_0(a uint64) uint64 { return a - 0 }
+
+//go:noinline
+func sub_0_uint64(a uint64) uint64 { return 0 - a }
+
+//go:noinline
+func sub_uint64_1(a uint64) uint64 { return a - 1 }
+
+//go:noinline
+func sub_1_uint64(a uint64) uint64 { return 1 - a }
+
+//go:noinline
+func sub_uint64_4294967296(a uint64) uint64 { return a - 4294967296 }
+
+//go:noinline
+func sub_4294967296_uint64(a uint64) uint64 { return 4294967296 - a }
+
+//go:noinline
+func sub_uint64_9223372036854775808(a uint64) uint64 { return a - 9223372036854775808 }
+
+//go:noinline
+func sub_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 - a }
+
+//go:noinline
+func sub_uint64_18446744073709551615(a uint64) uint64 { return a - 18446744073709551615 }
+
+//go:noinline
+func sub_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 - a }
+
+//go:noinline
+func div_0_uint64(a uint64) uint64 { return 0 / a }
+
+//go:noinline
+func div_uint64_1(a uint64) uint64 { return a / 1 }
+
+//go:noinline
+func div_1_uint64(a uint64) uint64 { return 1 / a }
+
+//go:noinline
+func div_uint64_4294967296(a uint64) uint64 { return a / 4294967296 }
+
+//go:noinline
+func div_4294967296_uint64(a uint64) uint64 { return 4294967296 / a }
+
+//go:noinline
+func div_uint64_9223372036854775808(a uint64) uint64 { return a / 9223372036854775808 }
+
+//go:noinline
+func div_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 / a }
+
+//go:noinline
+func div_uint64_18446744073709551615(a uint64) uint64 { return a / 18446744073709551615 }
+
+//go:noinline
+func div_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 / a }
+
+//go:noinline
+func mul_uint64_0(a uint64) uint64 { return a * 0 }
+
+//go:noinline
+func mul_0_uint64(a uint64) uint64 { return 0 * a }
+
+//go:noinline
+func mul_uint64_1(a uint64) uint64 { return a * 1 }
+
+//go:noinline
+func mul_1_uint64(a uint64) uint64 { return 1 * a }
+
+//go:noinline
+func mul_uint64_4294967296(a uint64) uint64 { return a * 4294967296 }
+
+//go:noinline
+func mul_4294967296_uint64(a uint64) uint64 { return 4294967296 * a }
+
+//go:noinline
+func mul_uint64_9223372036854775808(a uint64) uint64 { return a * 9223372036854775808 }
+
+//go:noinline
+func mul_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 * a }
+
+//go:noinline
+func mul_uint64_18446744073709551615(a uint64) uint64 { return a * 18446744073709551615 }
+
+//go:noinline
+func mul_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 * a }
+
+//go:noinline
+func lsh_uint64_0(a uint64) uint64 { return a << 0 }
+
+//go:noinline
+func lsh_0_uint64(a uint64) uint64 { return 0 << a }
+
+//go:noinline
+func lsh_uint64_1(a uint64) uint64 { return a << 1 }
+
+//go:noinline
+func lsh_1_uint64(a uint64) uint64 { return 1 << a }
+
+//go:noinline
+func lsh_uint64_4294967296(a uint64) uint64 { return a << uint64(4294967296) }
+
+//go:noinline
+func lsh_4294967296_uint64(a uint64) uint64 { return 4294967296 << a }
+
+//go:noinline
+func lsh_uint64_9223372036854775808(a uint64) uint64 { return a << uint64(9223372036854775808) }
+
+//go:noinline
+func lsh_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 << a }
+
+//go:noinline
+func lsh_uint64_18446744073709551615(a uint64) uint64 { return a << uint64(18446744073709551615) }
+
+//go:noinline
+func lsh_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 << a }
+
+//go:noinline
+func rsh_uint64_0(a uint64) uint64 { return a >> 0 }
+
+//go:noinline
+func rsh_0_uint64(a uint64) uint64 { return 0 >> a }
+
+//go:noinline
+func rsh_uint64_1(a uint64) uint64 { return a >> 1 }
+
+//go:noinline
+func rsh_1_uint64(a uint64) uint64 { return 1 >> a }
+
+//go:noinline
+func rsh_uint64_4294967296(a uint64) uint64 { return a >> uint64(4294967296) }
+
+//go:noinline
+func rsh_4294967296_uint64(a uint64) uint64 { return 4294967296 >> a }
+
+//go:noinline
+func rsh_uint64_9223372036854775808(a uint64) uint64 { return a >> uint64(9223372036854775808) }
+
+//go:noinline
+func rsh_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 >> a }
+
+//go:noinline
+func rsh_uint64_18446744073709551615(a uint64) uint64 { return a >> uint64(18446744073709551615) }
+
+//go:noinline
+func rsh_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 >> a }
+
+//go:noinline
+func mod_0_uint64(a uint64) uint64 { return 0 % a }
+
+//go:noinline
+func mod_uint64_1(a uint64) uint64 { return a % 1 }
+
+//go:noinline
+func mod_1_uint64(a uint64) uint64 { return 1 % a }
+
+//go:noinline
+func mod_uint64_4294967296(a uint64) uint64 { return a % 4294967296 }
+
+//go:noinline
+func mod_4294967296_uint64(a uint64) uint64 { return 4294967296 % a }
+
+//go:noinline
+func mod_uint64_9223372036854775808(a uint64) uint64 { return a % 9223372036854775808 }
+
+//go:noinline
+func mod_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 % a }
+
+//go:noinline
+func mod_uint64_18446744073709551615(a uint64) uint64 { return a % 18446744073709551615 }
+
+//go:noinline
+func mod_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 % a }
+
+//go:noinline
+func and_uint64_0(a uint64) uint64 { return a & 0 }
+
+//go:noinline
+func and_0_uint64(a uint64) uint64 { return 0 & a }
+
+//go:noinline
+func and_uint64_1(a uint64) uint64 { return a & 1 }
+
+//go:noinline
+func and_1_uint64(a uint64) uint64 { return 1 & a }
+
+//go:noinline
+func and_uint64_4294967296(a uint64) uint64 { return a & 4294967296 }
+
+//go:noinline
+func and_4294967296_uint64(a uint64) uint64 { return 4294967296 & a }
+
+//go:noinline
+func and_uint64_9223372036854775808(a uint64) uint64 { return a & 9223372036854775808 }
+
+//go:noinline
+func and_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 & a }
+
+//go:noinline
+func and_uint64_18446744073709551615(a uint64) uint64 { return a & 18446744073709551615 }
+
+//go:noinline
+func and_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 & a }
+
+//go:noinline
+func or_uint64_0(a uint64) uint64 { return a | 0 }
+
+//go:noinline
+func or_0_uint64(a uint64) uint64 { return 0 | a }
+
+//go:noinline
+func or_uint64_1(a uint64) uint64 { return a | 1 }
+
+//go:noinline
+func or_1_uint64(a uint64) uint64 { return 1 | a }
+
+//go:noinline
+func or_uint64_4294967296(a uint64) uint64 { return a | 4294967296 }
+
+//go:noinline
+func or_4294967296_uint64(a uint64) uint64 { return 4294967296 | a }
+
+//go:noinline
+func or_uint64_9223372036854775808(a uint64) uint64 { return a | 9223372036854775808 }
+
+//go:noinline
+func or_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 | a }
+
+//go:noinline
+func or_uint64_18446744073709551615(a uint64) uint64 { return a | 18446744073709551615 }
+
+//go:noinline
+func or_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 | a }
+
+//go:noinline
+func xor_uint64_0(a uint64) uint64 { return a ^ 0 }
+
+//go:noinline
+func xor_0_uint64(a uint64) uint64 { return 0 ^ a }
+
+//go:noinline
+func xor_uint64_1(a uint64) uint64 { return a ^ 1 }
+
+//go:noinline
+func xor_1_uint64(a uint64) uint64 { return 1 ^ a }
+
+//go:noinline
+func xor_uint64_4294967296(a uint64) uint64 { return a ^ 4294967296 }
+
+//go:noinline
+func xor_4294967296_uint64(a uint64) uint64 { return 4294967296 ^ a }
+
+//go:noinline
+func xor_uint64_9223372036854775808(a uint64) uint64 { return a ^ 9223372036854775808 }
+
+//go:noinline
+func xor_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 ^ a }
+
+//go:noinline
+func xor_uint64_18446744073709551615(a uint64) uint64 { return a ^ 18446744073709551615 }
+
+//go:noinline
+func xor_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 ^ a }
+
+//go:noinline
+func mul_uint64_3(a uint64) uint64 { return a * 3 }
+
+//go:noinline
+func mul_3_uint64(a uint64) uint64 { return 3 * a }
+
+//go:noinline
+func mul_uint64_5(a uint64) uint64 { return a * 5 }
+
+//go:noinline
+func mul_5_uint64(a uint64) uint64 { return 5 * a }
+
+//go:noinline
+func mul_uint64_7(a uint64) uint64 { return a * 7 }
+
+//go:noinline
+func mul_7_uint64(a uint64) uint64 { return 7 * a }
+
+//go:noinline
+func mul_uint64_9(a uint64) uint64 { return a * 9 }
+
+//go:noinline
+func mul_9_uint64(a uint64) uint64 { return 9 * a }
+
+//go:noinline
+func mul_uint64_10(a uint64) uint64 { return a * 10 }
+
+//go:noinline
+func mul_10_uint64(a uint64) uint64 { return 10 * a }
+
+//go:noinline
+func mul_uint64_11(a uint64) uint64 { return a * 11 }
+
+//go:noinline
+func mul_11_uint64(a uint64) uint64 { return 11 * a }
+
+//go:noinline
+func mul_uint64_13(a uint64) uint64 { return a * 13 }
+
+//go:noinline
+func mul_13_uint64(a uint64) uint64 { return 13 * a }
+
+//go:noinline
+func mul_uint64_19(a uint64) uint64 { return a * 19 }
+
+//go:noinline
+func mul_19_uint64(a uint64) uint64 { return 19 * a }
+
+//go:noinline
+func mul_uint64_21(a uint64) uint64 { return a * 21 }
+
+//go:noinline
+func mul_21_uint64(a uint64) uint64 { return 21 * a }
+
+//go:noinline
+func mul_uint64_25(a uint64) uint64 { return a * 25 }
+
+//go:noinline
+func mul_25_uint64(a uint64) uint64 { return 25 * a }
+
+//go:noinline
+func mul_uint64_27(a uint64) uint64 { return a * 27 }
+
+//go:noinline
+func mul_27_uint64(a uint64) uint64 { return 27 * a }
+
+//go:noinline
+func mul_uint64_37(a uint64) uint64 { return a * 37 }
+
+//go:noinline
+func mul_37_uint64(a uint64) uint64 { return 37 * a }
+
+//go:noinline
+func mul_uint64_41(a uint64) uint64 { return a * 41 }
+
+//go:noinline
+func mul_41_uint64(a uint64) uint64 { return 41 * a }
+
+//go:noinline
+func mul_uint64_45(a uint64) uint64 { return a * 45 }
+
+//go:noinline
+func mul_45_uint64(a uint64) uint64 { return 45 * a }
+
+//go:noinline
+func mul_uint64_73(a uint64) uint64 { return a * 73 }
+
+//go:noinline
+func mul_73_uint64(a uint64) uint64 { return 73 * a }
+
+//go:noinline
+func mul_uint64_81(a uint64) uint64 { return a * 81 }
+
+//go:noinline
+func mul_81_uint64(a uint64) uint64 { return 81 * a }
+
+//go:noinline
+func add_int64_Neg9223372036854775808(a int64) int64 { return a + -9223372036854775808 }
+
+//go:noinline
+func add_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 + a }
+
+//go:noinline
+func add_int64_Neg9223372036854775807(a int64) int64 { return a + -9223372036854775807 }
+
+//go:noinline
+func add_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 + a }
+
+//go:noinline
+func add_int64_Neg4294967296(a int64) int64 { return a + -4294967296 }
+
+//go:noinline
+func add_Neg4294967296_int64(a int64) int64 { return -4294967296 + a }
+
+//go:noinline
+func add_int64_Neg1(a int64) int64 { return a + -1 }
+
+//go:noinline
+func add_Neg1_int64(a int64) int64 { return -1 + a }
+
+//go:noinline
+func add_int64_0(a int64) int64 { return a + 0 }
+
+//go:noinline
+func add_0_int64(a int64) int64 { return 0 + a }
+
+//go:noinline
+func add_int64_1(a int64) int64 { return a + 1 }
+
+//go:noinline
+func add_1_int64(a int64) int64 { return 1 + a }
+
+//go:noinline
+func add_int64_4294967296(a int64) int64 { return a + 4294967296 }
+
+//go:noinline
+func add_4294967296_int64(a int64) int64 { return 4294967296 + a }
+
+//go:noinline
+func add_int64_9223372036854775806(a int64) int64 { return a + 9223372036854775806 }
+
+//go:noinline
+func add_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 + a }
+
+//go:noinline
+func add_int64_9223372036854775807(a int64) int64 { return a + 9223372036854775807 }
+
+//go:noinline
+func add_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 + a }
+
+//go:noinline
+func sub_int64_Neg9223372036854775808(a int64) int64 { return a - -9223372036854775808 }
+
+//go:noinline
+func sub_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 - a }
+
+//go:noinline
+func sub_int64_Neg9223372036854775807(a int64) int64 { return a - -9223372036854775807 }
+
+//go:noinline
+func sub_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 - a }
+
+//go:noinline
+func sub_int64_Neg4294967296(a int64) int64 { return a - -4294967296 }
+
+//go:noinline
+func sub_Neg4294967296_int64(a int64) int64 { return -4294967296 - a }
+
+//go:noinline
+func sub_int64_Neg1(a int64) int64 { return a - -1 }
+
+//go:noinline
+func sub_Neg1_int64(a int64) int64 { return -1 - a }
+
+//go:noinline
+func sub_int64_0(a int64) int64 { return a - 0 }
+
+//go:noinline
+func sub_0_int64(a int64) int64 { return 0 - a }
+
+//go:noinline
+func sub_int64_1(a int64) int64 { return a - 1 }
+
+//go:noinline
+func sub_1_int64(a int64) int64 { return 1 - a }
+
+//go:noinline
+func sub_int64_4294967296(a int64) int64 { return a - 4294967296 }
+
+//go:noinline
+func sub_4294967296_int64(a int64) int64 { return 4294967296 - a }
+
+//go:noinline
+func sub_int64_9223372036854775806(a int64) int64 { return a - 9223372036854775806 }
+
+//go:noinline
+func sub_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 - a }
+
+//go:noinline
+func sub_int64_9223372036854775807(a int64) int64 { return a - 9223372036854775807 }
+
+//go:noinline
+func sub_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 - a }
+
+//go:noinline
+func div_int64_Neg9223372036854775808(a int64) int64 { return a / -9223372036854775808 }
+
+//go:noinline
+func div_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 / a }
+
+//go:noinline
+func div_int64_Neg9223372036854775807(a int64) int64 { return a / -9223372036854775807 }
+
+//go:noinline
+func div_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 / a }
+
+//go:noinline
+func div_int64_Neg4294967296(a int64) int64 { return a / -4294967296 }
+
+//go:noinline
+func div_Neg4294967296_int64(a int64) int64 { return -4294967296 / a }
+
+//go:noinline
+func div_int64_Neg1(a int64) int64 { return a / -1 }
+
+//go:noinline
+func div_Neg1_int64(a int64) int64 { return -1 / a }
+
+//go:noinline
+func div_0_int64(a int64) int64 { return 0 / a }
+
+//go:noinline
+func div_int64_1(a int64) int64 { return a / 1 }
+
+//go:noinline
+func div_1_int64(a int64) int64 { return 1 / a }
+
+//go:noinline
+func div_int64_4294967296(a int64) int64 { return a / 4294967296 }
+
+//go:noinline
+func div_4294967296_int64(a int64) int64 { return 4294967296 / a }
+
+//go:noinline
+func div_int64_9223372036854775806(a int64) int64 { return a / 9223372036854775806 }
+
+//go:noinline
+func div_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 / a }
+
+//go:noinline
+func div_int64_9223372036854775807(a int64) int64 { return a / 9223372036854775807 }
+
+//go:noinline
+func div_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 / a }
+
+//go:noinline
+func mul_int64_Neg9223372036854775808(a int64) int64 { return a * -9223372036854775808 }
+
+//go:noinline
+func mul_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 * a }
+
+//go:noinline
+func mul_int64_Neg9223372036854775807(a int64) int64 { return a * -9223372036854775807 }
+
+//go:noinline
+func mul_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 * a }
+
+//go:noinline
+func mul_int64_Neg4294967296(a int64) int64 { return a * -4294967296 }
+
+//go:noinline
+func mul_Neg4294967296_int64(a int64) int64 { return -4294967296 * a }
+
+//go:noinline
+func mul_int64_Neg1(a int64) int64 { return a * -1 }
+
+//go:noinline
+func mul_Neg1_int64(a int64) int64 { return -1 * a }
+
+//go:noinline
+func mul_int64_0(a int64) int64 { return a * 0 }
+
+//go:noinline
+func mul_0_int64(a int64) int64 { return 0 * a }
+
+//go:noinline
+func mul_int64_1(a int64) int64 { return a * 1 }
+
+//go:noinline
+func mul_1_int64(a int64) int64 { return 1 * a }
+
+//go:noinline
+func mul_int64_4294967296(a int64) int64 { return a * 4294967296 }
+
+//go:noinline
+func mul_4294967296_int64(a int64) int64 { return 4294967296 * a }
+
+//go:noinline
+func mul_int64_9223372036854775806(a int64) int64 { return a * 9223372036854775806 }
+
+//go:noinline
+func mul_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 * a }
+
+//go:noinline
+func mul_int64_9223372036854775807(a int64) int64 { return a * 9223372036854775807 }
+
+//go:noinline
+func mul_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 * a }
+
+//go:noinline
+func mod_int64_Neg9223372036854775808(a int64) int64 { return a % -9223372036854775808 }
+
+//go:noinline
+func mod_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 % a }
+
+//go:noinline
+func mod_int64_Neg9223372036854775807(a int64) int64 { return a % -9223372036854775807 }
+
+//go:noinline
+func mod_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 % a }
+
+//go:noinline
+func mod_int64_Neg4294967296(a int64) int64 { return a % -4294967296 }
+
+//go:noinline
+func mod_Neg4294967296_int64(a int64) int64 { return -4294967296 % a }
+
+//go:noinline
+func mod_int64_Neg1(a int64) int64 { return a % -1 }
+
+//go:noinline
+func mod_Neg1_int64(a int64) int64 { return -1 % a }
+
+//go:noinline
+func mod_0_int64(a int64) int64 { return 0 % a }
+
+//go:noinline
+func mod_int64_1(a int64) int64 { return a % 1 }
+
+//go:noinline
+func mod_1_int64(a int64) int64 { return 1 % a }
+
+//go:noinline
+func mod_int64_4294967296(a int64) int64 { return a % 4294967296 }
+
+//go:noinline
+func mod_4294967296_int64(a int64) int64 { return 4294967296 % a }
+
+//go:noinline
+func mod_int64_9223372036854775806(a int64) int64 { return a % 9223372036854775806 }
+
+//go:noinline
+func mod_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 % a }
+
+//go:noinline
+func mod_int64_9223372036854775807(a int64) int64 { return a % 9223372036854775807 }
+
+//go:noinline
+func mod_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 % a }
+
+//go:noinline
+func and_int64_Neg9223372036854775808(a int64) int64 { return a & -9223372036854775808 }
+
+//go:noinline
+func and_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 & a }
+
+//go:noinline
+func and_int64_Neg9223372036854775807(a int64) int64 { return a & -9223372036854775807 }
+
+//go:noinline
+func and_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 & a }
+
+//go:noinline
+func and_int64_Neg4294967296(a int64) int64 { return a & -4294967296 }
+
+//go:noinline
+func and_Neg4294967296_int64(a int64) int64 { return -4294967296 & a }
+
+//go:noinline
+func and_int64_Neg1(a int64) int64 { return a & -1 }
+
+//go:noinline
+func and_Neg1_int64(a int64) int64 { return -1 & a }
+
+//go:noinline
+func and_int64_0(a int64) int64 { return a & 0 }
+
+//go:noinline
+func and_0_int64(a int64) int64 { return 0 & a }
+
+//go:noinline
+func and_int64_1(a int64) int64 { return a & 1 }
+
+//go:noinline
+func and_1_int64(a int64) int64 { return 1 & a }
+
+//go:noinline
+func and_int64_4294967296(a int64) int64 { return a & 4294967296 }
+
+//go:noinline
+func and_4294967296_int64(a int64) int64 { return 4294967296 & a }
+
+//go:noinline
+func and_int64_9223372036854775806(a int64) int64 { return a & 9223372036854775806 }
+
+//go:noinline
+func and_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 & a }
+
+//go:noinline
+func and_int64_9223372036854775807(a int64) int64 { return a & 9223372036854775807 }
+
+//go:noinline
+func and_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 & a }
+
+//go:noinline
+func or_int64_Neg9223372036854775808(a int64) int64 { return a | -9223372036854775808 }
+
+//go:noinline
+func or_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 | a }
+
+//go:noinline
+func or_int64_Neg9223372036854775807(a int64) int64 { return a | -9223372036854775807 }
+
+//go:noinline
+func or_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 | a }
+
+//go:noinline
+func or_int64_Neg4294967296(a int64) int64 { return a | -4294967296 }
+
+//go:noinline
+func or_Neg4294967296_int64(a int64) int64 { return -4294967296 | a }
+
+//go:noinline
+func or_int64_Neg1(a int64) int64 { return a | -1 }
+
+//go:noinline
+func or_Neg1_int64(a int64) int64 { return -1 | a }
+
+//go:noinline
+func or_int64_0(a int64) int64 { return a | 0 }
+
+//go:noinline
+func or_0_int64(a int64) int64 { return 0 | a }
+
+//go:noinline
+func or_int64_1(a int64) int64 { return a | 1 }
+
+//go:noinline
+func or_1_int64(a int64) int64 { return 1 | a }
+
+//go:noinline
+func or_int64_4294967296(a int64) int64 { return a | 4294967296 }
+
+//go:noinline
+func or_4294967296_int64(a int64) int64 { return 4294967296 | a }
+
+//go:noinline
+func or_int64_9223372036854775806(a int64) int64 { return a | 9223372036854775806 }
+
+//go:noinline
+func or_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 | a }
+
+//go:noinline
+func or_int64_9223372036854775807(a int64) int64 { return a | 9223372036854775807 }
+
+//go:noinline
+func or_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 | a }
+
+//go:noinline
+func xor_int64_Neg9223372036854775808(a int64) int64 { return a ^ -9223372036854775808 }
+
+//go:noinline
+func xor_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 ^ a }
+
+//go:noinline
+func xor_int64_Neg9223372036854775807(a int64) int64 { return a ^ -9223372036854775807 }
+
+//go:noinline
+func xor_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 ^ a }
+
+//go:noinline
+func xor_int64_Neg4294967296(a int64) int64 { return a ^ -4294967296 }
+
+//go:noinline
+func xor_Neg4294967296_int64(a int64) int64 { return -4294967296 ^ a }
+
+//go:noinline
+func xor_int64_Neg1(a int64) int64 { return a ^ -1 }
+
+//go:noinline
+func xor_Neg1_int64(a int64) int64 { return -1 ^ a }
+
+//go:noinline
+func xor_int64_0(a int64) int64 { return a ^ 0 }
+
+//go:noinline
+func xor_0_int64(a int64) int64 { return 0 ^ a }
+
+//go:noinline
+func xor_int64_1(a int64) int64 { return a ^ 1 }
+
+//go:noinline
+func xor_1_int64(a int64) int64 { return 1 ^ a }
+
+//go:noinline
+func xor_int64_4294967296(a int64) int64 { return a ^ 4294967296 }
+
+//go:noinline
+func xor_4294967296_int64(a int64) int64 { return 4294967296 ^ a }
+
+//go:noinline
+func xor_int64_9223372036854775806(a int64) int64 { return a ^ 9223372036854775806 }
+
+//go:noinline
+func xor_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 ^ a }
+
+//go:noinline
+func xor_int64_9223372036854775807(a int64) int64 { return a ^ 9223372036854775807 }
+
+//go:noinline
+func xor_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 ^ a }
+
+//go:noinline
+func mul_int64_Neg9(a int64) int64 { return a * -9 }
+
+//go:noinline
+func mul_Neg9_int64(a int64) int64 { return -9 * a }
+
+//go:noinline
+func mul_int64_Neg5(a int64) int64 { return a * -5 }
+
+//go:noinline
+func mul_Neg5_int64(a int64) int64 { return -5 * a }
+
+//go:noinline
+func mul_int64_Neg3(a int64) int64 { return a * -3 }
+
+//go:noinline
+func mul_Neg3_int64(a int64) int64 { return -3 * a }
+
+//go:noinline
+func mul_int64_3(a int64) int64 { return a * 3 }
+
+//go:noinline
+func mul_3_int64(a int64) int64 { return 3 * a }
+
+//go:noinline
+func mul_int64_5(a int64) int64 { return a * 5 }
+
+//go:noinline
+func mul_5_int64(a int64) int64 { return 5 * a }
+
+//go:noinline
+func mul_int64_7(a int64) int64 { return a * 7 }
+
+//go:noinline
+func mul_7_int64(a int64) int64 { return 7 * a }
+
+//go:noinline
+func mul_int64_9(a int64) int64 { return a * 9 }
+
+//go:noinline
+func mul_9_int64(a int64) int64 { return 9 * a }
+
+//go:noinline
+func mul_int64_10(a int64) int64 { return a * 10 }
+
+//go:noinline
+func mul_10_int64(a int64) int64 { return 10 * a }
+
+//go:noinline
+func mul_int64_11(a int64) int64 { return a * 11 }
+
+//go:noinline
+func mul_11_int64(a int64) int64 { return 11 * a }
+
+//go:noinline
+func mul_int64_13(a int64) int64 { return a * 13 }
+
+//go:noinline
+func mul_13_int64(a int64) int64 { return 13 * a }
+
+//go:noinline
+func mul_int64_19(a int64) int64 { return a * 19 }
+
+//go:noinline
+func mul_19_int64(a int64) int64 { return 19 * a }
+
+//go:noinline
+func mul_int64_21(a int64) int64 { return a * 21 }
+
+//go:noinline
+func mul_21_int64(a int64) int64 { return 21 * a }
+
+//go:noinline
+func mul_int64_25(a int64) int64 { return a * 25 }
+
+//go:noinline
+func mul_25_int64(a int64) int64 { return 25 * a }
+
+//go:noinline
+func mul_int64_27(a int64) int64 { return a * 27 }
+
+//go:noinline
+func mul_27_int64(a int64) int64 { return 27 * a }
+
+//go:noinline
+func mul_int64_37(a int64) int64 { return a * 37 }
+
+//go:noinline
+func mul_37_int64(a int64) int64 { return 37 * a }
+
+//go:noinline
+func mul_int64_41(a int64) int64 { return a * 41 }
+
+//go:noinline
+func mul_41_int64(a int64) int64 { return 41 * a }
+
+//go:noinline
+func mul_int64_45(a int64) int64 { return a * 45 }
+
+//go:noinline
+func mul_45_int64(a int64) int64 { return 45 * a }
+
+//go:noinline
+func mul_int64_73(a int64) int64 { return a * 73 }
+
+//go:noinline
+func mul_73_int64(a int64) int64 { return 73 * a }
+
+//go:noinline
+func mul_int64_81(a int64) int64 { return a * 81 }
+
+//go:noinline
+func mul_81_int64(a int64) int64 { return 81 * a }
+
+//go:noinline
+func add_uint32_0(a uint32) uint32 { return a + 0 }
+
+//go:noinline
+func add_0_uint32(a uint32) uint32 { return 0 + a }
+
+//go:noinline
+func add_uint32_1(a uint32) uint32 { return a + 1 }
+
+//go:noinline
+func add_1_uint32(a uint32) uint32 { return 1 + a }
+
+//go:noinline
+func add_uint32_4294967295(a uint32) uint32 { return a + 4294967295 }
+
+//go:noinline
+func add_4294967295_uint32(a uint32) uint32 { return 4294967295 + a }
+
+//go:noinline
+func sub_uint32_0(a uint32) uint32 { return a - 0 }
+
+//go:noinline
+func sub_0_uint32(a uint32) uint32 { return 0 - a }
+
+//go:noinline
+func sub_uint32_1(a uint32) uint32 { return a - 1 }
+
+//go:noinline
+func sub_1_uint32(a uint32) uint32 { return 1 - a }
+
+//go:noinline
+func sub_uint32_4294967295(a uint32) uint32 { return a - 4294967295 }
+
+//go:noinline
+func sub_4294967295_uint32(a uint32) uint32 { return 4294967295 - a }
+
+//go:noinline
+func div_0_uint32(a uint32) uint32 { return 0 / a }
+
+//go:noinline
+func div_uint32_1(a uint32) uint32 { return a / 1 }
+
+//go:noinline
+func div_1_uint32(a uint32) uint32 { return 1 / a }
+
+//go:noinline
+func div_uint32_4294967295(a uint32) uint32 { return a / 4294967295 }
+
+//go:noinline
+func div_4294967295_uint32(a uint32) uint32 { return 4294967295 / a }
+
+//go:noinline
+func mul_uint32_0(a uint32) uint32 { return a * 0 }
+
+//go:noinline
+func mul_0_uint32(a uint32) uint32 { return 0 * a }
+
+//go:noinline
+func mul_uint32_1(a uint32) uint32 { return a * 1 }
+
+//go:noinline
+func mul_1_uint32(a uint32) uint32 { return 1 * a }
+
+//go:noinline
+func mul_uint32_4294967295(a uint32) uint32 { return a * 4294967295 }
+
+//go:noinline
+func mul_4294967295_uint32(a uint32) uint32 { return 4294967295 * a }
+
+//go:noinline
+func lsh_uint32_0(a uint32) uint32 { return a << 0 }
+
+//go:noinline
+func lsh_0_uint32(a uint32) uint32 { return 0 << a }
+
+//go:noinline
+func lsh_uint32_1(a uint32) uint32 { return a << 1 }
+
+//go:noinline
+func lsh_1_uint32(a uint32) uint32 { return 1 << a }
+
+//go:noinline
+func lsh_uint32_4294967295(a uint32) uint32 { return a << 4294967295 }
+
+//go:noinline
+func lsh_4294967295_uint32(a uint32) uint32 { return 4294967295 << a }
+
+//go:noinline
+func rsh_uint32_0(a uint32) uint32 { return a >> 0 }
+
+//go:noinline
+func rsh_0_uint32(a uint32) uint32 { return 0 >> a }
+
+//go:noinline
+func rsh_uint32_1(a uint32) uint32 { return a >> 1 }
+
+//go:noinline
+func rsh_1_uint32(a uint32) uint32 { return 1 >> a }
+
+//go:noinline
+func rsh_uint32_4294967295(a uint32) uint32 { return a >> 4294967295 }
+
+//go:noinline
+func rsh_4294967295_uint32(a uint32) uint32 { return 4294967295 >> a }
+
+//go:noinline
+func mod_0_uint32(a uint32) uint32 { return 0 % a }
+
+//go:noinline
+func mod_uint32_1(a uint32) uint32 { return a % 1 }
+
+//go:noinline
+func mod_1_uint32(a uint32) uint32 { return 1 % a }
+
+//go:noinline
+func mod_uint32_4294967295(a uint32) uint32 { return a % 4294967295 }
+
+//go:noinline
+func mod_4294967295_uint32(a uint32) uint32 { return 4294967295 % a }
+
+//go:noinline
+func and_uint32_0(a uint32) uint32 { return a & 0 }
+
+//go:noinline
+func and_0_uint32(a uint32) uint32 { return 0 & a }
+
+//go:noinline
+func and_uint32_1(a uint32) uint32 { return a & 1 }
+
+//go:noinline
+func and_1_uint32(a uint32) uint32 { return 1 & a }
+
+//go:noinline
+func and_uint32_4294967295(a uint32) uint32 { return a & 4294967295 }
+
+//go:noinline
+func and_4294967295_uint32(a uint32) uint32 { return 4294967295 & a }
+
+//go:noinline
+func or_uint32_0(a uint32) uint32 { return a | 0 }
+
+//go:noinline
+func or_0_uint32(a uint32) uint32 { return 0 | a }
+
+//go:noinline
+func or_uint32_1(a uint32) uint32 { return a | 1 }
+
+//go:noinline
+func or_1_uint32(a uint32) uint32 { return 1 | a }
+
+//go:noinline
+func or_uint32_4294967295(a uint32) uint32 { return a | 4294967295 }
+
+//go:noinline
+func or_4294967295_uint32(a uint32) uint32 { return 4294967295 | a }
+
+//go:noinline
+func xor_uint32_0(a uint32) uint32 { return a ^ 0 }
+
+//go:noinline
+func xor_0_uint32(a uint32) uint32 { return 0 ^ a }
+
+//go:noinline
+func xor_uint32_1(a uint32) uint32 { return a ^ 1 }
+
+//go:noinline
+func xor_1_uint32(a uint32) uint32 { return 1 ^ a }
+
+//go:noinline
+func xor_uint32_4294967295(a uint32) uint32 { return a ^ 4294967295 }
+
+//go:noinline
+func xor_4294967295_uint32(a uint32) uint32 { return 4294967295 ^ a }
+
+//go:noinline
+func mul_uint32_3(a uint32) uint32 { return a * 3 }
+
+//go:noinline
+func mul_3_uint32(a uint32) uint32 { return 3 * a }
+
+//go:noinline
+func mul_uint32_5(a uint32) uint32 { return a * 5 }
+
+//go:noinline
+func mul_5_uint32(a uint32) uint32 { return 5 * a }
+
+//go:noinline
+func mul_uint32_7(a uint32) uint32 { return a * 7 }
+
+//go:noinline
+func mul_7_uint32(a uint32) uint32 { return 7 * a }
+
+//go:noinline
+func mul_uint32_9(a uint32) uint32 { return a * 9 }
+
+//go:noinline
+func mul_9_uint32(a uint32) uint32 { return 9 * a }
+
+//go:noinline
+func mul_uint32_10(a uint32) uint32 { return a * 10 }
+
+//go:noinline
+func mul_10_uint32(a uint32) uint32 { return 10 * a }
+
+//go:noinline
+func mul_uint32_11(a uint32) uint32 { return a * 11 }
+
+//go:noinline
+func mul_11_uint32(a uint32) uint32 { return 11 * a }
+
+//go:noinline
+func mul_uint32_13(a uint32) uint32 { return a * 13 }
+
+//go:noinline
+func mul_13_uint32(a uint32) uint32 { return 13 * a }
+
+//go:noinline
+func mul_uint32_19(a uint32) uint32 { return a * 19 }
+
+//go:noinline
+func mul_19_uint32(a uint32) uint32 { return 19 * a }
+
+//go:noinline
+func mul_uint32_21(a uint32) uint32 { return a * 21 }
+
+//go:noinline
+func mul_21_uint32(a uint32) uint32 { return 21 * a }
+
+//go:noinline
+func mul_uint32_25(a uint32) uint32 { return a * 25 }
+
+//go:noinline
+func mul_25_uint32(a uint32) uint32 { return 25 * a }
+
+//go:noinline
+func mul_uint32_27(a uint32) uint32 { return a * 27 }
+
+//go:noinline
+func mul_27_uint32(a uint32) uint32 { return 27 * a }
+
+//go:noinline
+func mul_uint32_37(a uint32) uint32 { return a * 37 }
+
+//go:noinline
+func mul_37_uint32(a uint32) uint32 { return 37 * a }
+
+//go:noinline
+func mul_uint32_41(a uint32) uint32 { return a * 41 }
+
+//go:noinline
+func mul_41_uint32(a uint32) uint32 { return 41 * a }
+
+//go:noinline
+func mul_uint32_45(a uint32) uint32 { return a * 45 }
+
+//go:noinline
+func mul_45_uint32(a uint32) uint32 { return 45 * a }
+
+//go:noinline
+func mul_uint32_73(a uint32) uint32 { return a * 73 }
+
+//go:noinline
+func mul_73_uint32(a uint32) uint32 { return 73 * a }
+
+//go:noinline
+func mul_uint32_81(a uint32) uint32 { return a * 81 }
+
+//go:noinline
+func mul_81_uint32(a uint32) uint32 { return 81 * a }
+
+//go:noinline
+func add_int32_Neg2147483648(a int32) int32 { return a + -2147483648 }
+
+//go:noinline
+func add_Neg2147483648_int32(a int32) int32 { return -2147483648 + a }
+
+//go:noinline
+func add_int32_Neg2147483647(a int32) int32 { return a + -2147483647 }
+
+//go:noinline
+func add_Neg2147483647_int32(a int32) int32 { return -2147483647 + a }
+
+//go:noinline
+func add_int32_Neg1(a int32) int32 { return a + -1 }
+
+//go:noinline
+func add_Neg1_int32(a int32) int32 { return -1 + a }
+
+//go:noinline
+func add_int32_0(a int32) int32 { return a + 0 }
+
+//go:noinline
+func add_0_int32(a int32) int32 { return 0 + a }
+
+//go:noinline
+func add_int32_1(a int32) int32 { return a + 1 }
+
+//go:noinline
+func add_1_int32(a int32) int32 { return 1 + a }
+
+//go:noinline
+func add_int32_2147483647(a int32) int32 { return a + 2147483647 }
+
+//go:noinline
+func add_2147483647_int32(a int32) int32 { return 2147483647 + a }
+
+//go:noinline
+func sub_int32_Neg2147483648(a int32) int32 { return a - -2147483648 }
+
+//go:noinline
+func sub_Neg2147483648_int32(a int32) int32 { return -2147483648 - a }
+
+//go:noinline
+func sub_int32_Neg2147483647(a int32) int32 { return a - -2147483647 }
+
+//go:noinline
+func sub_Neg2147483647_int32(a int32) int32 { return -2147483647 - a }
+
+//go:noinline
+func sub_int32_Neg1(a int32) int32 { return a - -1 }
+
+//go:noinline
+func sub_Neg1_int32(a int32) int32 { return -1 - a }
+
+//go:noinline
+func sub_int32_0(a int32) int32 { return a - 0 }
+
+//go:noinline
+func sub_0_int32(a int32) int32 { return 0 - a }
+
+//go:noinline
+func sub_int32_1(a int32) int32 { return a - 1 }
+
+//go:noinline
+func sub_1_int32(a int32) int32 { return 1 - a }
+
+//go:noinline
+func sub_int32_2147483647(a int32) int32 { return a - 2147483647 }
+
+//go:noinline
+func sub_2147483647_int32(a int32) int32 { return 2147483647 - a }
+
+//go:noinline
+func div_int32_Neg2147483648(a int32) int32 { return a / -2147483648 }
+
+//go:noinline
+func div_Neg2147483648_int32(a int32) int32 { return -2147483648 / a }
+
+//go:noinline
+func div_int32_Neg2147483647(a int32) int32 { return a / -2147483647 }
+
+//go:noinline
+func div_Neg2147483647_int32(a int32) int32 { return -2147483647 / a }
+
+//go:noinline
+func div_int32_Neg1(a int32) int32 { return a / -1 }
+
+//go:noinline
+func div_Neg1_int32(a int32) int32 { return -1 / a }
+
+//go:noinline
+func div_0_int32(a int32) int32 { return 0 / a }
+
+//go:noinline
+func div_int32_1(a int32) int32 { return a / 1 }
+
+//go:noinline
+func div_1_int32(a int32) int32 { return 1 / a }
+
+//go:noinline
+func div_int32_2147483647(a int32) int32 { return a / 2147483647 }
+
+//go:noinline
+func div_2147483647_int32(a int32) int32 { return 2147483647 / a }
+
+//go:noinline
+func mul_int32_Neg2147483648(a int32) int32 { return a * -2147483648 }
+
+//go:noinline
+func mul_Neg2147483648_int32(a int32) int32 { return -2147483648 * a }
+
+//go:noinline
+func mul_int32_Neg2147483647(a int32) int32 { return a * -2147483647 }
+
+//go:noinline
+func mul_Neg2147483647_int32(a int32) int32 { return -2147483647 * a }
+
+//go:noinline
+func mul_int32_Neg1(a int32) int32 { return a * -1 }
+
+//go:noinline
+func mul_Neg1_int32(a int32) int32 { return -1 * a }
+
+//go:noinline
+func mul_int32_0(a int32) int32 { return a * 0 }
+
+//go:noinline
+func mul_0_int32(a int32) int32 { return 0 * a }
+
+//go:noinline
+func mul_int32_1(a int32) int32 { return a * 1 }
+
+//go:noinline
+func mul_1_int32(a int32) int32 { return 1 * a }
+
+//go:noinline
+func mul_int32_2147483647(a int32) int32 { return a * 2147483647 }
+
+//go:noinline
+func mul_2147483647_int32(a int32) int32 { return 2147483647 * a }
+
+//go:noinline
+func mod_int32_Neg2147483648(a int32) int32 { return a % -2147483648 }
+
+//go:noinline
+func mod_Neg2147483648_int32(a int32) int32 { return -2147483648 % a }
+
+//go:noinline
+func mod_int32_Neg2147483647(a int32) int32 { return a % -2147483647 }
+
+//go:noinline
+func mod_Neg2147483647_int32(a int32) int32 { return -2147483647 % a }
+
+//go:noinline
+func mod_int32_Neg1(a int32) int32 { return a % -1 }
+
+//go:noinline
+func mod_Neg1_int32(a int32) int32 { return -1 % a }
+
+//go:noinline
+func mod_0_int32(a int32) int32 { return 0 % a }
+
+//go:noinline
+func mod_int32_1(a int32) int32 { return a % 1 }
+
+//go:noinline
+func mod_1_int32(a int32) int32 { return 1 % a }
+
+//go:noinline
+func mod_int32_2147483647(a int32) int32 { return a % 2147483647 }
+
+//go:noinline
+func mod_2147483647_int32(a int32) int32 { return 2147483647 % a }
+
+//go:noinline
+func and_int32_Neg2147483648(a int32) int32 { return a & -2147483648 }
+
+//go:noinline
+func and_Neg2147483648_int32(a int32) int32 { return -2147483648 & a }
+
+//go:noinline
+func and_int32_Neg2147483647(a int32) int32 { return a & -2147483647 }
+
+//go:noinline
+func and_Neg2147483647_int32(a int32) int32 { return -2147483647 & a }
+
+//go:noinline
+func and_int32_Neg1(a int32) int32 { return a & -1 }
+
+//go:noinline
+func and_Neg1_int32(a int32) int32 { return -1 & a }
+
+//go:noinline
+func and_int32_0(a int32) int32 { return a & 0 }
+
+//go:noinline
+func and_0_int32(a int32) int32 { return 0 & a }
+
+//go:noinline
+func and_int32_1(a int32) int32 { return a & 1 }
+
+//go:noinline
+func and_1_int32(a int32) int32 { return 1 & a }
+
+//go:noinline
+func and_int32_2147483647(a int32) int32 { return a & 2147483647 }
+
+//go:noinline
+func and_2147483647_int32(a int32) int32 { return 2147483647 & a }
+
+//go:noinline
+func or_int32_Neg2147483648(a int32) int32 { return a | -2147483648 }
+
+//go:noinline
+func or_Neg2147483648_int32(a int32) int32 { return -2147483648 | a }
+
+//go:noinline
+func or_int32_Neg2147483647(a int32) int32 { return a | -2147483647 }
+
+//go:noinline
+func or_Neg2147483647_int32(a int32) int32 { return -2147483647 | a }
+
+//go:noinline
+func or_int32_Neg1(a int32) int32 { return a | -1 }
+
+//go:noinline
+func or_Neg1_int32(a int32) int32 { return -1 | a }
+
+//go:noinline
+func or_int32_0(a int32) int32 { return a | 0 }
+
+//go:noinline
+func or_0_int32(a int32) int32 { return 0 | a }
+
+//go:noinline
+func or_int32_1(a int32) int32 { return a | 1 }
+
+//go:noinline
+func or_1_int32(a int32) int32 { return 1 | a }
+
+//go:noinline
+func or_int32_2147483647(a int32) int32 { return a | 2147483647 }
+
+//go:noinline
+func or_2147483647_int32(a int32) int32 { return 2147483647 | a }
+
+//go:noinline
+func xor_int32_Neg2147483648(a int32) int32 { return a ^ -2147483648 }
+
+//go:noinline
+func xor_Neg2147483648_int32(a int32) int32 { return -2147483648 ^ a }
+
+//go:noinline
+func xor_int32_Neg2147483647(a int32) int32 { return a ^ -2147483647 }
+
+//go:noinline
+func xor_Neg2147483647_int32(a int32) int32 { return -2147483647 ^ a }
+
+//go:noinline
+func xor_int32_Neg1(a int32) int32 { return a ^ -1 }
+
+//go:noinline
+func xor_Neg1_int32(a int32) int32 { return -1 ^ a }
+
+//go:noinline
+func xor_int32_0(a int32) int32 { return a ^ 0 }
+
+//go:noinline
+func xor_0_int32(a int32) int32 { return 0 ^ a }
+
+//go:noinline
+func xor_int32_1(a int32) int32 { return a ^ 1 }
+
+//go:noinline
+func xor_1_int32(a int32) int32 { return 1 ^ a }
+
+//go:noinline
+func xor_int32_2147483647(a int32) int32 { return a ^ 2147483647 }
+
+//go:noinline
+func xor_2147483647_int32(a int32) int32 { return 2147483647 ^ a }
+
+//go:noinline
+func mul_int32_Neg9(a int32) int32 { return a * -9 }
+
+//go:noinline
+func mul_Neg9_int32(a int32) int32 { return -9 * a }
+
+//go:noinline
+func mul_int32_Neg5(a int32) int32 { return a * -5 }
+
+//go:noinline
+func mul_Neg5_int32(a int32) int32 { return -5 * a }
+
+//go:noinline
+func mul_int32_Neg3(a int32) int32 { return a * -3 }
+
+//go:noinline
+func mul_Neg3_int32(a int32) int32 { return -3 * a }
+
+//go:noinline
+func mul_int32_3(a int32) int32 { return a * 3 }
+
+//go:noinline
+func mul_3_int32(a int32) int32 { return 3 * a }
+
+//go:noinline
+func mul_int32_5(a int32) int32 { return a * 5 }
+
+//go:noinline
+func mul_5_int32(a int32) int32 { return 5 * a }
+
+//go:noinline
+func mul_int32_7(a int32) int32 { return a * 7 }
+
+//go:noinline
+func mul_7_int32(a int32) int32 { return 7 * a }
+
+//go:noinline
+func mul_int32_9(a int32) int32 { return a * 9 }
+
+//go:noinline
+func mul_9_int32(a int32) int32 { return 9 * a }
+
+//go:noinline
+func mul_int32_10(a int32) int32 { return a * 10 }
+
+//go:noinline
+func mul_10_int32(a int32) int32 { return 10 * a }
+
+//go:noinline
+func mul_int32_11(a int32) int32 { return a * 11 }
+
+//go:noinline
+func mul_11_int32(a int32) int32 { return 11 * a }
+
+//go:noinline
+func mul_int32_13(a int32) int32 { return a * 13 }
+
+//go:noinline
+func mul_13_int32(a int32) int32 { return 13 * a }
+
+//go:noinline
+func mul_int32_19(a int32) int32 { return a * 19 }
+
+//go:noinline
+func mul_19_int32(a int32) int32 { return 19 * a }
+
+//go:noinline
+func mul_int32_21(a int32) int32 { return a * 21 }
+
+//go:noinline
+func mul_21_int32(a int32) int32 { return 21 * a }
+
+//go:noinline
+func mul_int32_25(a int32) int32 { return a * 25 }
+
+//go:noinline
+func mul_25_int32(a int32) int32 { return 25 * a }
+
+//go:noinline
+func mul_int32_27(a int32) int32 { return a * 27 }
+
+//go:noinline
+func mul_27_int32(a int32) int32 { return 27 * a }
+
+//go:noinline
+func mul_int32_37(a int32) int32 { return a * 37 }
+
+//go:noinline
+func mul_37_int32(a int32) int32 { return 37 * a }
+
+//go:noinline
+func mul_int32_41(a int32) int32 { return a * 41 }
+
+//go:noinline
+func mul_41_int32(a int32) int32 { return 41 * a }
+
+//go:noinline
+func mul_int32_45(a int32) int32 { return a * 45 }
+
+//go:noinline
+func mul_45_int32(a int32) int32 { return 45 * a }
+
+//go:noinline
+func mul_int32_73(a int32) int32 { return a * 73 }
+
+//go:noinline
+func mul_73_int32(a int32) int32 { return 73 * a }
+
+//go:noinline
+func mul_int32_81(a int32) int32 { return a * 81 }
+
+//go:noinline
+func mul_81_int32(a int32) int32 { return 81 * a }
+
+//go:noinline
+func add_uint16_0(a uint16) uint16 { return a + 0 }
+
+//go:noinline
+func add_0_uint16(a uint16) uint16 { return 0 + a }
+
+//go:noinline
+func add_uint16_1(a uint16) uint16 { return a + 1 }
+
+//go:noinline
+func add_1_uint16(a uint16) uint16 { return 1 + a }
+
+//go:noinline
+func add_uint16_65535(a uint16) uint16 { return a + 65535 }
+
+//go:noinline
+func add_65535_uint16(a uint16) uint16 { return 65535 + a }
+
+//go:noinline
+func sub_uint16_0(a uint16) uint16 { return a - 0 }
+
+//go:noinline
+func sub_0_uint16(a uint16) uint16 { return 0 - a }
+
+//go:noinline
+func sub_uint16_1(a uint16) uint16 { return a - 1 }
+
+//go:noinline
+func sub_1_uint16(a uint16) uint16 { return 1 - a }
+
+//go:noinline
+func sub_uint16_65535(a uint16) uint16 { return a - 65535 }
+
+//go:noinline
+func sub_65535_uint16(a uint16) uint16 { return 65535 - a }
+
+//go:noinline
+func div_0_uint16(a uint16) uint16 { return 0 / a }
+
+//go:noinline
+func div_uint16_1(a uint16) uint16 { return a / 1 }
+
+//go:noinline
+func div_1_uint16(a uint16) uint16 { return 1 / a }
+
+//go:noinline
+func div_uint16_65535(a uint16) uint16 { return a / 65535 }
+
+//go:noinline
+func div_65535_uint16(a uint16) uint16 { return 65535 / a }
+
+//go:noinline
+func mul_uint16_0(a uint16) uint16 { return a * 0 }
+
+//go:noinline
+func mul_0_uint16(a uint16) uint16 { return 0 * a }
+
+//go:noinline
+func mul_uint16_1(a uint16) uint16 { return a * 1 }
+
+//go:noinline
+func mul_1_uint16(a uint16) uint16 { return 1 * a }
+
+//go:noinline
+func mul_uint16_65535(a uint16) uint16 { return a * 65535 }
+
+//go:noinline
+func mul_65535_uint16(a uint16) uint16 { return 65535 * a }
+
+//go:noinline
+func lsh_uint16_0(a uint16) uint16 { return a << 0 }
+
+//go:noinline
+func lsh_0_uint16(a uint16) uint16 { return 0 << a }
+
+//go:noinline
+func lsh_uint16_1(a uint16) uint16 { return a << 1 }
+
+//go:noinline
+func lsh_1_uint16(a uint16) uint16 { return 1 << a }
+
+//go:noinline
+func lsh_uint16_65535(a uint16) uint16 { return a << 65535 }
+
+//go:noinline
+func lsh_65535_uint16(a uint16) uint16 { return 65535 << a }
+
+//go:noinline
+func rsh_uint16_0(a uint16) uint16 { return a >> 0 }
+
+//go:noinline
+func rsh_0_uint16(a uint16) uint16 { return 0 >> a }
+
+//go:noinline
+func rsh_uint16_1(a uint16) uint16 { return a >> 1 }
+
+//go:noinline
+func rsh_1_uint16(a uint16) uint16 { return 1 >> a }
+
+//go:noinline
+func rsh_uint16_65535(a uint16) uint16 { return a >> 65535 }
+
+//go:noinline
+func rsh_65535_uint16(a uint16) uint16 { return 65535 >> a }
+
+//go:noinline
+func mod_0_uint16(a uint16) uint16 { return 0 % a }
+
+//go:noinline
+func mod_uint16_1(a uint16) uint16 { return a % 1 }
+
+//go:noinline
+func mod_1_uint16(a uint16) uint16 { return 1 % a }
+
+//go:noinline
+func mod_uint16_65535(a uint16) uint16 { return a % 65535 }
+
+//go:noinline
+func mod_65535_uint16(a uint16) uint16 { return 65535 % a }
+
+//go:noinline
+func and_uint16_0(a uint16) uint16 { return a & 0 }
+
+//go:noinline
+func and_0_uint16(a uint16) uint16 { return 0 & a }
+
+//go:noinline
+func and_uint16_1(a uint16) uint16 { return a & 1 }
+
+//go:noinline
+func and_1_uint16(a uint16) uint16 { return 1 & a }
+
+//go:noinline
+func and_uint16_65535(a uint16) uint16 { return a & 65535 }
+
+//go:noinline
+func and_65535_uint16(a uint16) uint16 { return 65535 & a }
+
+//go:noinline
+func or_uint16_0(a uint16) uint16 { return a | 0 }
+
+//go:noinline
+func or_0_uint16(a uint16) uint16 { return 0 | a }
+
+//go:noinline
+func or_uint16_1(a uint16) uint16 { return a | 1 }
+
+//go:noinline
+func or_1_uint16(a uint16) uint16 { return 1 | a }
+
+//go:noinline
+func or_uint16_65535(a uint16) uint16 { return a | 65535 }
+
+//go:noinline
+func or_65535_uint16(a uint16) uint16 { return 65535 | a }
+
+//go:noinline
+func xor_uint16_0(a uint16) uint16 { return a ^ 0 }
+
+//go:noinline
+func xor_0_uint16(a uint16) uint16 { return 0 ^ a }
+
+//go:noinline
+func xor_uint16_1(a uint16) uint16 { return a ^ 1 }
+
+//go:noinline
+func xor_1_uint16(a uint16) uint16 { return 1 ^ a }
+
+//go:noinline
+func xor_uint16_65535(a uint16) uint16 { return a ^ 65535 }
+
+//go:noinline
+func xor_65535_uint16(a uint16) uint16 { return 65535 ^ a }
+
+//go:noinline
+func add_int16_Neg32768(a int16) int16 { return a + -32768 }
+
+//go:noinline
+func add_Neg32768_int16(a int16) int16 { return -32768 + a }
+
+//go:noinline
+func add_int16_Neg32767(a int16) int16 { return a + -32767 }
+
+//go:noinline
+func add_Neg32767_int16(a int16) int16 { return -32767 + a }
+
+//go:noinline
+func add_int16_Neg1(a int16) int16 { return a + -1 }
+
+//go:noinline
+func add_Neg1_int16(a int16) int16 { return -1 + a }
+
+//go:noinline
+func add_int16_0(a int16) int16 { return a + 0 }
+
+//go:noinline
+func add_0_int16(a int16) int16 { return 0 + a }
+
+//go:noinline
+func add_int16_1(a int16) int16 { return a + 1 }
+
+//go:noinline
+func add_1_int16(a int16) int16 { return 1 + a }
+
+//go:noinline
+func add_int16_32766(a int16) int16 { return a + 32766 }
+
+//go:noinline
+func add_32766_int16(a int16) int16 { return 32766 + a }
+
+//go:noinline
+func add_int16_32767(a int16) int16 { return a + 32767 }
+
+//go:noinline
+func add_32767_int16(a int16) int16 { return 32767 + a }
+
+//go:noinline
+func sub_int16_Neg32768(a int16) int16 { return a - -32768 }
+
+//go:noinline
+func sub_Neg32768_int16(a int16) int16 { return -32768 - a }
+
+//go:noinline
+func sub_int16_Neg32767(a int16) int16 { return a - -32767 }
+
+//go:noinline
+func sub_Neg32767_int16(a int16) int16 { return -32767 - a }
+
+//go:noinline
+func sub_int16_Neg1(a int16) int16 { return a - -1 }
+
+//go:noinline
+func sub_Neg1_int16(a int16) int16 { return -1 - a }
+
+//go:noinline
+func sub_int16_0(a int16) int16 { return a - 0 }
+
+//go:noinline
+func sub_0_int16(a int16) int16 { return 0 - a }
+
+//go:noinline
+func sub_int16_1(a int16) int16 { return a - 1 }
+
+//go:noinline
+func sub_1_int16(a int16) int16 { return 1 - a }
+
+//go:noinline
+func sub_int16_32766(a int16) int16 { return a - 32766 }
+
+//go:noinline
+func sub_32766_int16(a int16) int16 { return 32766 - a }
+
+//go:noinline
+func sub_int16_32767(a int16) int16 { return a - 32767 }
+
+//go:noinline
+func sub_32767_int16(a int16) int16 { return 32767 - a }
+
+//go:noinline
+func div_int16_Neg32768(a int16) int16 { return a / -32768 }
+
+//go:noinline
+func div_Neg32768_int16(a int16) int16 { return -32768 / a }
+
+//go:noinline
+func div_int16_Neg32767(a int16) int16 { return a / -32767 }
+
+//go:noinline
+func div_Neg32767_int16(a int16) int16 { return -32767 / a }
+
+//go:noinline
+func div_int16_Neg1(a int16) int16 { return a / -1 }
+
+//go:noinline
+func div_Neg1_int16(a int16) int16 { return -1 / a }
+
+//go:noinline
+func div_0_int16(a int16) int16 { return 0 / a }
+
+//go:noinline
+func div_int16_1(a int16) int16 { return a / 1 }
+
+//go:noinline
+func div_1_int16(a int16) int16 { return 1 / a }
+
+//go:noinline
+func div_int16_32766(a int16) int16 { return a / 32766 }
+
+//go:noinline
+func div_32766_int16(a int16) int16 { return 32766 / a }
+
+//go:noinline
+func div_int16_32767(a int16) int16 { return a / 32767 }
+
+//go:noinline
+func div_32767_int16(a int16) int16 { return 32767 / a }
+
+//go:noinline
+func mul_int16_Neg32768(a int16) int16 { return a * -32768 }
+
+//go:noinline
+func mul_Neg32768_int16(a int16) int16 { return -32768 * a }
+
+//go:noinline
+func mul_int16_Neg32767(a int16) int16 { return a * -32767 }
+
+//go:noinline
+func mul_Neg32767_int16(a int16) int16 { return -32767 * a }
+
+//go:noinline
+func mul_int16_Neg1(a int16) int16 { return a * -1 }
+
+//go:noinline
+func mul_Neg1_int16(a int16) int16 { return -1 * a }
+
+//go:noinline
+func mul_int16_0(a int16) int16 { return a * 0 }
+
+//go:noinline
+func mul_0_int16(a int16) int16 { return 0 * a }
+
+//go:noinline
+func mul_int16_1(a int16) int16 { return a * 1 }
+
+//go:noinline
+func mul_1_int16(a int16) int16 { return 1 * a }
+
+//go:noinline
+func mul_int16_32766(a int16) int16 { return a * 32766 }
+
+//go:noinline
+func mul_32766_int16(a int16) int16 { return 32766 * a }
+
+//go:noinline
+func mul_int16_32767(a int16) int16 { return a * 32767 }
+
+//go:noinline
+func mul_32767_int16(a int16) int16 { return 32767 * a }
+
+//go:noinline
+func mod_int16_Neg32768(a int16) int16 { return a % -32768 }
+
+//go:noinline
+func mod_Neg32768_int16(a int16) int16 { return -32768 % a }
+
+//go:noinline
+func mod_int16_Neg32767(a int16) int16 { return a % -32767 }
+
+//go:noinline
+func mod_Neg32767_int16(a int16) int16 { return -32767 % a }
+
+//go:noinline
+func mod_int16_Neg1(a int16) int16 { return a % -1 }
+
+//go:noinline
+func mod_Neg1_int16(a int16) int16 { return -1 % a }
+
+//go:noinline
+func mod_0_int16(a int16) int16 { return 0 % a }
+
+//go:noinline
+func mod_int16_1(a int16) int16 { return a % 1 }
+
+//go:noinline
+func mod_1_int16(a int16) int16 { return 1 % a }
+
+//go:noinline
+func mod_int16_32766(a int16) int16 { return a % 32766 }
+
+//go:noinline
+func mod_32766_int16(a int16) int16 { return 32766 % a }
+
+//go:noinline
+func mod_int16_32767(a int16) int16 { return a % 32767 }
+
+//go:noinline
+func mod_32767_int16(a int16) int16 { return 32767 % a }
+
+//go:noinline
+func and_int16_Neg32768(a int16) int16 { return a & -32768 }
+
+//go:noinline
+func and_Neg32768_int16(a int16) int16 { return -32768 & a }
+
+//go:noinline
+func and_int16_Neg32767(a int16) int16 { return a & -32767 }
+
+//go:noinline
+func and_Neg32767_int16(a int16) int16 { return -32767 & a }
+
+//go:noinline
+func and_int16_Neg1(a int16) int16 { return a & -1 }
+
+//go:noinline
+func and_Neg1_int16(a int16) int16 { return -1 & a }
+
+//go:noinline
+func and_int16_0(a int16) int16 { return a & 0 }
+
+//go:noinline
+func and_0_int16(a int16) int16 { return 0 & a }
+
+//go:noinline
+func and_int16_1(a int16) int16 { return a & 1 }
+
+//go:noinline
+func and_1_int16(a int16) int16 { return 1 & a }
+
+//go:noinline
+func and_int16_32766(a int16) int16 { return a & 32766 }
+
+//go:noinline
+func and_32766_int16(a int16) int16 { return 32766 & a }
+
+//go:noinline
+func and_int16_32767(a int16) int16 { return a & 32767 }
+
+//go:noinline
+func and_32767_int16(a int16) int16 { return 32767 & a }
+
+//go:noinline
+func or_int16_Neg32768(a int16) int16 { return a | -32768 }
+
+//go:noinline
+func or_Neg32768_int16(a int16) int16 { return -32768 | a }
+
+//go:noinline
+func or_int16_Neg32767(a int16) int16 { return a | -32767 }
+
+//go:noinline
+func or_Neg32767_int16(a int16) int16 { return -32767 | a }
+
+//go:noinline
+func or_int16_Neg1(a int16) int16 { return a | -1 }
+
+//go:noinline
+func or_Neg1_int16(a int16) int16 { return -1 | a }
+
+//go:noinline
+func or_int16_0(a int16) int16 { return a | 0 }
+
+//go:noinline
+func or_0_int16(a int16) int16 { return 0 | a }
+
+//go:noinline
+func or_int16_1(a int16) int16 { return a | 1 }
+
+//go:noinline
+func or_1_int16(a int16) int16 { return 1 | a }
+
+//go:noinline
+func or_int16_32766(a int16) int16 { return a | 32766 }
+
+//go:noinline
+func or_32766_int16(a int16) int16 { return 32766 | a }
+
+//go:noinline
+func or_int16_32767(a int16) int16 { return a | 32767 }
+
+//go:noinline
+func or_32767_int16(a int16) int16 { return 32767 | a }
+
+//go:noinline
+func xor_int16_Neg32768(a int16) int16 { return a ^ -32768 }
+
+//go:noinline
+func xor_Neg32768_int16(a int16) int16 { return -32768 ^ a }
+
+//go:noinline
+func xor_int16_Neg32767(a int16) int16 { return a ^ -32767 }
+
+//go:noinline
+func xor_Neg32767_int16(a int16) int16 { return -32767 ^ a }
+
+//go:noinline
+func xor_int16_Neg1(a int16) int16 { return a ^ -1 }
+
+//go:noinline
+func xor_Neg1_int16(a int16) int16 { return -1 ^ a }
+
+//go:noinline
+func xor_int16_0(a int16) int16 { return a ^ 0 }
+
+//go:noinline
+func xor_0_int16(a int16) int16 { return 0 ^ a }
+
+//go:noinline
+func xor_int16_1(a int16) int16 { return a ^ 1 }
+
+//go:noinline
+func xor_1_int16(a int16) int16 { return 1 ^ a }
+
+//go:noinline
+func xor_int16_32766(a int16) int16 { return a ^ 32766 }
+
+//go:noinline
+func xor_32766_int16(a int16) int16 { return 32766 ^ a }
+
+//go:noinline
+func xor_int16_32767(a int16) int16 { return a ^ 32767 }
+
+//go:noinline
+func xor_32767_int16(a int16) int16 { return 32767 ^ a }
+
+//go:noinline
+func add_uint8_0(a uint8) uint8 { return a + 0 }
+
+//go:noinline
+func add_0_uint8(a uint8) uint8 { return 0 + a }
+
+//go:noinline
+func add_uint8_1(a uint8) uint8 { return a + 1 }
+
+//go:noinline
+func add_1_uint8(a uint8) uint8 { return 1 + a }
+
+//go:noinline
+func add_uint8_255(a uint8) uint8 { return a + 255 }
+
+//go:noinline
+func add_255_uint8(a uint8) uint8 { return 255 + a }
+
+//go:noinline
+func sub_uint8_0(a uint8) uint8 { return a - 0 }
+
+//go:noinline
+func sub_0_uint8(a uint8) uint8 { return 0 - a }
+
+//go:noinline
+func sub_uint8_1(a uint8) uint8 { return a - 1 }
+
+//go:noinline
+func sub_1_uint8(a uint8) uint8 { return 1 - a }
+
+//go:noinline
+func sub_uint8_255(a uint8) uint8 { return a - 255 }
+
+//go:noinline
+func sub_255_uint8(a uint8) uint8 { return 255 - a }
+
+//go:noinline
+func div_0_uint8(a uint8) uint8 { return 0 / a }
+
+//go:noinline
+func div_uint8_1(a uint8) uint8 { return a / 1 }
+
+//go:noinline
+func div_1_uint8(a uint8) uint8 { return 1 / a }
+
+//go:noinline
+func div_uint8_255(a uint8) uint8 { return a / 255 }
+
+//go:noinline
+func div_255_uint8(a uint8) uint8 { return 255 / a }
+
+//go:noinline
+func mul_uint8_0(a uint8) uint8 { return a * 0 }
+
+//go:noinline
+func mul_0_uint8(a uint8) uint8 { return 0 * a }
+
+//go:noinline
+func mul_uint8_1(a uint8) uint8 { return a * 1 }
+
+//go:noinline
+func mul_1_uint8(a uint8) uint8 { return 1 * a }
+
+//go:noinline
+func mul_uint8_255(a uint8) uint8 { return a * 255 }
+
+//go:noinline
+func mul_255_uint8(a uint8) uint8 { return 255 * a }
+
+//go:noinline
+func lsh_uint8_0(a uint8) uint8 { return a << 0 }
+
+//go:noinline
+func lsh_0_uint8(a uint8) uint8 { return 0 << a }
+
+//go:noinline
+func lsh_uint8_1(a uint8) uint8 { return a << 1 }
+
+//go:noinline
+func lsh_1_uint8(a uint8) uint8 { return 1 << a }
+
+//go:noinline
+func lsh_uint8_255(a uint8) uint8 { return a << 255 }
+
+//go:noinline
+func lsh_255_uint8(a uint8) uint8 { return 255 << a }
+
+//go:noinline
+func rsh_uint8_0(a uint8) uint8 { return a >> 0 }
+
+//go:noinline
+func rsh_0_uint8(a uint8) uint8 { return 0 >> a }
+
+//go:noinline
+func rsh_uint8_1(a uint8) uint8 { return a >> 1 }
+
+//go:noinline
+func rsh_1_uint8(a uint8) uint8 { return 1 >> a }
+
+//go:noinline
+func rsh_uint8_255(a uint8) uint8 { return a >> 255 }
+
+//go:noinline
+func rsh_255_uint8(a uint8) uint8 { return 255 >> a }
+
+//go:noinline
+func mod_0_uint8(a uint8) uint8 { return 0 % a }
+
+//go:noinline
+func mod_uint8_1(a uint8) uint8 { return a % 1 }
+
+//go:noinline
+func mod_1_uint8(a uint8) uint8 { return 1 % a }
+
+//go:noinline
+func mod_uint8_255(a uint8) uint8 { return a % 255 }
+
+//go:noinline
+func mod_255_uint8(a uint8) uint8 { return 255 % a }
+
+//go:noinline
+func and_uint8_0(a uint8) uint8 { return a & 0 }
+
+//go:noinline
+func and_0_uint8(a uint8) uint8 { return 0 & a }
+
+//go:noinline
+func and_uint8_1(a uint8) uint8 { return a & 1 }
+
+//go:noinline
+func and_1_uint8(a uint8) uint8 { return 1 & a }
+
+//go:noinline
+func and_uint8_255(a uint8) uint8 { return a & 255 }
+
+//go:noinline
+func and_255_uint8(a uint8) uint8 { return 255 & a }
+
+//go:noinline
+func or_uint8_0(a uint8) uint8 { return a | 0 }
+
+//go:noinline
+func or_0_uint8(a uint8) uint8 { return 0 | a }
+
+//go:noinline
+func or_uint8_1(a uint8) uint8 { return a | 1 }
+
+//go:noinline
+func or_1_uint8(a uint8) uint8 { return 1 | a }
+
+//go:noinline
+func or_uint8_255(a uint8) uint8 { return a | 255 }
+
+//go:noinline
+func or_255_uint8(a uint8) uint8 { return 255 | a }
+
+//go:noinline
+func xor_uint8_0(a uint8) uint8 { return a ^ 0 }
+
+//go:noinline
+func xor_0_uint8(a uint8) uint8 { return 0 ^ a }
+
+//go:noinline
+func xor_uint8_1(a uint8) uint8 { return a ^ 1 }
+
+//go:noinline
+func xor_1_uint8(a uint8) uint8 { return 1 ^ a }
+
+//go:noinline
+func xor_uint8_255(a uint8) uint8 { return a ^ 255 }
+
+//go:noinline
+func xor_255_uint8(a uint8) uint8 { return 255 ^ a }
+
+//go:noinline
+func add_int8_Neg128(a int8) int8 { return a + -128 }
+
+//go:noinline
+func add_Neg128_int8(a int8) int8 { return -128 + a }
+
+//go:noinline
+func add_int8_Neg127(a int8) int8 { return a + -127 }
+
+//go:noinline
+func add_Neg127_int8(a int8) int8 { return -127 + a }
+
+//go:noinline
+func add_int8_Neg1(a int8) int8 { return a + -1 }
+
+//go:noinline
+func add_Neg1_int8(a int8) int8 { return -1 + a }
+
+//go:noinline
+func add_int8_0(a int8) int8 { return a + 0 }
+
+//go:noinline
+func add_0_int8(a int8) int8 { return 0 + a }
+
+//go:noinline
+func add_int8_1(a int8) int8 { return a + 1 }
+
+//go:noinline
+func add_1_int8(a int8) int8 { return 1 + a }
+
+//go:noinline
+func add_int8_126(a int8) int8 { return a + 126 }
+
+//go:noinline
+func add_126_int8(a int8) int8 { return 126 + a }
+
+//go:noinline
+func add_int8_127(a int8) int8 { return a + 127 }
+
+//go:noinline
+func add_127_int8(a int8) int8 { return 127 + a }
+
+//go:noinline
+func sub_int8_Neg128(a int8) int8 { return a - -128 }
+
+//go:noinline
+func sub_Neg128_int8(a int8) int8 { return -128 - a }
+
+//go:noinline
+func sub_int8_Neg127(a int8) int8 { return a - -127 }
+
+//go:noinline
+func sub_Neg127_int8(a int8) int8 { return -127 - a }
+
+//go:noinline
+func sub_int8_Neg1(a int8) int8 { return a - -1 }
+
+//go:noinline
+func sub_Neg1_int8(a int8) int8 { return -1 - a }
+
+//go:noinline
+func sub_int8_0(a int8) int8 { return a - 0 }
+
+//go:noinline
+func sub_0_int8(a int8) int8 { return 0 - a }
+
+//go:noinline
+func sub_int8_1(a int8) int8 { return a - 1 }
+
+//go:noinline
+func sub_1_int8(a int8) int8 { return 1 - a }
+
+//go:noinline
+func sub_int8_126(a int8) int8 { return a - 126 }
+
+//go:noinline
+func sub_126_int8(a int8) int8 { return 126 - a }
+
+//go:noinline
+func sub_int8_127(a int8) int8 { return a - 127 }
+
+//go:noinline
+func sub_127_int8(a int8) int8 { return 127 - a }
+
+//go:noinline
+func div_int8_Neg128(a int8) int8 { return a / -128 }
+
+//go:noinline
+func div_Neg128_int8(a int8) int8 { return -128 / a }
+
+//go:noinline
+func div_int8_Neg127(a int8) int8 { return a / -127 }
+
+//go:noinline
+func div_Neg127_int8(a int8) int8 { return -127 / a }
+
+//go:noinline
+func div_int8_Neg1(a int8) int8 { return a / -1 }
+
+//go:noinline
+func div_Neg1_int8(a int8) int8 { return -1 / a }
+
+//go:noinline
+func div_0_int8(a int8) int8 { return 0 / a }
+
+//go:noinline
+func div_int8_1(a int8) int8 { return a / 1 }
+
+//go:noinline
+func div_1_int8(a int8) int8 { return 1 / a }
+
+//go:noinline
+func div_int8_126(a int8) int8 { return a / 126 }
+
+//go:noinline
+func div_126_int8(a int8) int8 { return 126 / a }
+
+//go:noinline
+func div_int8_127(a int8) int8 { return a / 127 }
+
+//go:noinline
+func div_127_int8(a int8) int8 { return 127 / a }
+
+//go:noinline
+func mul_int8_Neg128(a int8) int8 { return a * -128 }
+
+//go:noinline
+func mul_Neg128_int8(a int8) int8 { return -128 * a }
+
+//go:noinline
+func mul_int8_Neg127(a int8) int8 { return a * -127 }
+
+//go:noinline
+func mul_Neg127_int8(a int8) int8 { return -127 * a }
+
+//go:noinline
+func mul_int8_Neg1(a int8) int8 { return a * -1 }
+
+//go:noinline
+func mul_Neg1_int8(a int8) int8 { return -1 * a }
+
+//go:noinline
+func mul_int8_0(a int8) int8 { return a * 0 }
+
+//go:noinline
+func mul_0_int8(a int8) int8 { return 0 * a }
+
+//go:noinline
+func mul_int8_1(a int8) int8 { return a * 1 }
+
+//go:noinline
+func mul_1_int8(a int8) int8 { return 1 * a }
+
+//go:noinline
+func mul_int8_126(a int8) int8 { return a * 126 }
+
+//go:noinline
+func mul_126_int8(a int8) int8 { return 126 * a }
+
+//go:noinline
+func mul_int8_127(a int8) int8 { return a * 127 }
+
+//go:noinline
+func mul_127_int8(a int8) int8 { return 127 * a }
+
+//go:noinline
+func mod_int8_Neg128(a int8) int8 { return a % -128 }
+
+//go:noinline
+func mod_Neg128_int8(a int8) int8 { return -128 % a }
+
+//go:noinline
+func mod_int8_Neg127(a int8) int8 { return a % -127 }
+
+//go:noinline
+func mod_Neg127_int8(a int8) int8 { return -127 % a }
+
+//go:noinline
+func mod_int8_Neg1(a int8) int8 { return a % -1 }
+
+//go:noinline
+func mod_Neg1_int8(a int8) int8 { return -1 % a }
+
+//go:noinline
+func mod_0_int8(a int8) int8 { return 0 % a }
+
+//go:noinline
+func mod_int8_1(a int8) int8 { return a % 1 }
+
+//go:noinline
+func mod_1_int8(a int8) int8 { return 1 % a }
+
+//go:noinline
+func mod_int8_126(a int8) int8 { return a % 126 }
+
+//go:noinline
+func mod_126_int8(a int8) int8 { return 126 % a }
+
+//go:noinline
+func mod_int8_127(a int8) int8 { return a % 127 }
+
+//go:noinline
+func mod_127_int8(a int8) int8 { return 127 % a }
+
+//go:noinline
+func and_int8_Neg128(a int8) int8 { return a & -128 }
+
+//go:noinline
+func and_Neg128_int8(a int8) int8 { return -128 & a }
+
+//go:noinline
+func and_int8_Neg127(a int8) int8 { return a & -127 }
+
+//go:noinline
+func and_Neg127_int8(a int8) int8 { return -127 & a }
+
+//go:noinline
+func and_int8_Neg1(a int8) int8 { return a & -1 }
+
+//go:noinline
+func and_Neg1_int8(a int8) int8 { return -1 & a }
+
+//go:noinline
+func and_int8_0(a int8) int8 { return a & 0 }
+
+//go:noinline
+func and_0_int8(a int8) int8 { return 0 & a }
+
+//go:noinline
+func and_int8_1(a int8) int8 { return a & 1 }
+
+//go:noinline
+func and_1_int8(a int8) int8 { return 1 & a }
+
+//go:noinline
+func and_int8_126(a int8) int8 { return a & 126 }
+
+//go:noinline
+func and_126_int8(a int8) int8 { return 126 & a }
+
+//go:noinline
+func and_int8_127(a int8) int8 { return a & 127 }
+
+//go:noinline
+func and_127_int8(a int8) int8 { return 127 & a }
+
+//go:noinline
+func or_int8_Neg128(a int8) int8 { return a | -128 }
+
+//go:noinline
+func or_Neg128_int8(a int8) int8 { return -128 | a }
+
+//go:noinline
+func or_int8_Neg127(a int8) int8 { return a | -127 }
+
+//go:noinline
+func or_Neg127_int8(a int8) int8 { return -127 | a }
+
+//go:noinline
+func or_int8_Neg1(a int8) int8 { return a | -1 }
+
+//go:noinline
+func or_Neg1_int8(a int8) int8 { return -1 | a }
+
+//go:noinline
+func or_int8_0(a int8) int8 { return a | 0 }
+
+//go:noinline
+func or_0_int8(a int8) int8 { return 0 | a }
+
+//go:noinline
+func or_int8_1(a int8) int8 { return a | 1 }
+
+//go:noinline
+func or_1_int8(a int8) int8 { return 1 | a }
+
+//go:noinline
+func or_int8_126(a int8) int8 { return a | 126 }
+
+//go:noinline
+func or_126_int8(a int8) int8 { return 126 | a }
+
+//go:noinline
+func or_int8_127(a int8) int8 { return a | 127 }
+
+//go:noinline
+func or_127_int8(a int8) int8 { return 127 | a }
+
+//go:noinline
+func xor_int8_Neg128(a int8) int8 { return a ^ -128 }
+
+//go:noinline
+func xor_Neg128_int8(a int8) int8 { return -128 ^ a }
+
+//go:noinline
+func xor_int8_Neg127(a int8) int8 { return a ^ -127 }
+
+//go:noinline
+func xor_Neg127_int8(a int8) int8 { return -127 ^ a }
+
+//go:noinline
+func xor_int8_Neg1(a int8) int8 { return a ^ -1 }
+
+//go:noinline
+func xor_Neg1_int8(a int8) int8 { return -1 ^ a }
+
+//go:noinline
+func xor_int8_0(a int8) int8 { return a ^ 0 }
+
+//go:noinline
+func xor_0_int8(a int8) int8 { return 0 ^ a }
+
+//go:noinline
+func xor_int8_1(a int8) int8 { return a ^ 1 }
+
+//go:noinline
+func xor_1_int8(a int8) int8 { return 1 ^ a }
+
+//go:noinline
+func xor_int8_126(a int8) int8 { return a ^ 126 }
+
+//go:noinline
+func xor_126_int8(a int8) int8 { return 126 ^ a }
+
+//go:noinline
+func xor_int8_127(a int8) int8 { return a ^ 127 }
+
+//go:noinline
+func xor_127_int8(a int8) int8 { return 127 ^ a }
+
+type test_uint64 struct {
+ fn func(uint64) uint64
+ fnname string
+ in uint64
+ want uint64
+}
+
+var tests_uint64 = []test_uint64{
+
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 0, want: 0},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 0, want: 0},
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 1, want: 1},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 1, want: 1},
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 0, want: 1},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 0, want: 1},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 1, want: 2},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 1, want: 2},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 4294967296, want: 4294967297},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 4294967296, want: 4294967297},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 18446744073709551615, want: 0},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 0, want: 4294967296},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 1, want: 4294967297},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 1, want: 4294967297},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 4294967296, want: 8589934592},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 4294967296, want: 8589934592},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 18446744073709551615, want: 4294967295},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 18446744073709551615, want: 4294967295},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 0, want: 9223372036854775808},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 1, want: 9223372036854775809},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 1, want: 9223372036854775809},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 0, want: 18446744073709551615},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 1, want: 0},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 1, want: 0},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 4294967296, want: 4294967295},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 4294967296, want: 4294967295},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 0, want: 0},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 0, want: 0},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 1, want: 18446744073709551615},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 1, want: 1},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 4294967296, want: 18446744069414584320},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 0, want: 1},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 0, want: 18446744073709551615},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 1, want: 0},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 1, want: 0},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 4294967296, want: 18446744069414584321},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 4294967296, want: 4294967295},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 18446744073709551615, want: 2},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 0, want: 18446744069414584320},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 1, want: 4294967295},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 1, want: 18446744069414584321},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 9223372036854775808, want: 9223372032559808512},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 18446744073709551615, want: 4294967297},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 18446744073709551615, want: 18446744069414584319},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 0, want: 9223372036854775808},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 1, want: 9223372036854775807},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 1, want: 9223372036854775809},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 4294967296, want: 9223372032559808512},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775809},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 0, want: 1},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 1, want: 18446744073709551614},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 1, want: 2},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 4294967296, want: 18446744069414584319},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 4294967296, want: 4294967297},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 1, want: 0},
+ test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 0, want: 0},
+ test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 1, want: 1},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 1, want: 1},
+ test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 4294967296, want: 4294967296},
+ test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 1, want: 4294967296},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 1, want: 0},
+ test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 4294967296, want: 1},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 4294967296, want: 1},
+ test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 9223372036854775808, want: 2147483648},
+ test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 18446744073709551615, want: 4294967295},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 1, want: 9223372036854775808},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 1, want: 0},
+ test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 4294967296, want: 2147483648},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 9223372036854775808, want: 1},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 9223372036854775808, want: 1},
+ test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 18446744073709551615, want: 1},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 1, want: 18446744073709551615},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 1, want: 0},
+ test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 4294967296, want: 4294967295},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 4294967296, want: 0},
+ test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 9223372036854775808, want: 1},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 9223372036854775808, want: 0},
+ test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 18446744073709551615, want: 1},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 0, want: 0},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 1, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 1, want: 0},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 4294967296, want: 0},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 0, want: 0},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 1, want: 1},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 1, want: 1},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 4294967296, want: 4294967296},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 1, want: 4294967296},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 1, want: 4294967296},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 18446744073709551615, want: 18446744069414584320},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 18446744073709551615, want: 18446744069414584320},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 1, want: 9223372036854775808},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 1, want: 9223372036854775808},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 1, want: 18446744073709551615},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 1, want: 18446744073709551615},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 4294967296, want: 18446744069414584320},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 4294967296, want: 18446744069414584320},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 18446744073709551615, want: 1},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 0, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 0, want: 0},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 1, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 1, want: 1},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 0, want: 1},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 0, want: 0},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 1, want: 2},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 1, want: 2},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 4294967296, want: 8589934592},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 1, want: 8589934592},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 1, want: 0},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 1, want: 0},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 1, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 1, want: 18446744073709551614},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 1, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 0, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 0, want: 0},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 1, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 1, want: 1},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 0, want: 1},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 0, want: 0},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 1, want: 0},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 1, want: 0},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 4294967296, want: 2147483648},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 9223372036854775808, want: 4611686018427387904},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 1, want: 2147483648},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 1, want: 0},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 1, want: 4611686018427387904},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 1, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 1, want: 9223372036854775807},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 1, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 0, want: 0},
+ test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 1, want: 0},
+ test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 4294967296, want: 1},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 4294967296, want: 0},
+ test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 9223372036854775808, want: 1},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 1, want: 1},
+ test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 9223372036854775808, want: 4294967296},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 18446744073709551615, want: 4294967296},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 18446744073709551615, want: 4294967295},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 1, want: 1},
+ test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 4294967296, want: 4294967296},
+ test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 1, want: 1},
+ test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 4294967296, want: 4294967295},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 4294967296, want: 4294967296},
+ test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 18446744073709551615, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 0, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 1, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 1, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 4294967296, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 18446744073709551615, want: 0},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 0, want: 0},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 1, want: 1},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 1, want: 1},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 4294967296, want: 0},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 18446744073709551615, want: 1},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 1, want: 0},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 1, want: 0},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 4294967296, want: 4294967296},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 18446744073709551615, want: 4294967296},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 18446744073709551615, want: 4294967296},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 1, want: 0},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 1, want: 0},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 1, want: 1},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 1, want: 1},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 4294967296, want: 4294967296},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 0, want: 0},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 0, want: 0},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 1, want: 1},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 1, want: 1},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 0, want: 1},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 0, want: 1},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 1, want: 1},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 1, want: 1},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 4294967296, want: 4294967297},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 4294967296, want: 4294967297},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 0, want: 4294967296},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 1, want: 4294967297},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 1, want: 4294967297},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 4294967296, want: 4294967296},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 0, want: 9223372036854775808},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 1, want: 9223372036854775809},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 1, want: 9223372036854775809},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 0, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 1, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 1, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 4294967296, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 4294967296, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 9223372036854775808, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 9223372036854775808, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 0, want: 0},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 0, want: 0},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 1, want: 1},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 1, want: 1},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 0, want: 1},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 0, want: 1},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 1, want: 0},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 1, want: 0},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 4294967296, want: 4294967297},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 4294967296, want: 4294967297},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 0, want: 4294967296},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 1, want: 4294967297},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 1, want: 4294967297},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 18446744073709551615, want: 18446744069414584319},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 18446744073709551615, want: 18446744069414584319},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 0, want: 9223372036854775808},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 1, want: 9223372036854775809},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 1, want: 9223372036854775809},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 0, want: 18446744073709551615},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 1, want: 18446744073709551614},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 1, want: 18446744073709551614},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 4294967296, want: 18446744069414584319},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 4294967296, want: 18446744069414584319},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 18446744073709551615, want: 0}}
+
+type test_uint64mul struct {
+ fn func(uint64) uint64
+ fnname string
+ in uint64
+ want uint64
+}
+
+var tests_uint64mul = []test_uint64{
+
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 3, want: 9},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 3, want: 9},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 5, want: 15},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 5, want: 15},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 7, want: 21},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 7, want: 21},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 9, want: 27},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 9, want: 27},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 10, want: 30},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 10, want: 30},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 11, want: 33},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 11, want: 33},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 13, want: 39},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 13, want: 39},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 19, want: 57},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 19, want: 57},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 21, want: 63},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 21, want: 63},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 25, want: 75},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 25, want: 75},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 27, want: 81},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 27, want: 81},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 37, want: 111},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 37, want: 111},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 41, want: 123},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 41, want: 123},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 45, want: 135},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 45, want: 135},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 73, want: 219},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 73, want: 219},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 81, want: 243},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 81, want: 243},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 3, want: 15},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 3, want: 15},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 5, want: 25},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 5, want: 25},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 7, want: 35},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 7, want: 35},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 9, want: 45},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 9, want: 45},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 10, want: 50},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 10, want: 50},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 11, want: 55},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 11, want: 55},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 13, want: 65},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 13, want: 65},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 19, want: 95},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 19, want: 95},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 21, want: 105},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 21, want: 105},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 25, want: 125},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 25, want: 125},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 27, want: 135},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 27, want: 135},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 37, want: 185},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 37, want: 185},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 41, want: 205},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 41, want: 205},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 45, want: 225},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 45, want: 225},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 73, want: 365},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 73, want: 365},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 81, want: 405},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 81, want: 405},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 3, want: 21},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 3, want: 21},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 5, want: 35},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 5, want: 35},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 7, want: 49},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 7, want: 49},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 9, want: 63},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 9, want: 63},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 10, want: 70},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 10, want: 70},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 11, want: 77},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 11, want: 77},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 13, want: 91},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 13, want: 91},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 19, want: 133},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 19, want: 133},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 21, want: 147},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 21, want: 147},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 25, want: 175},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 25, want: 175},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 27, want: 189},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 27, want: 189},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 37, want: 259},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 37, want: 259},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 41, want: 287},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 41, want: 287},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 45, want: 315},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 45, want: 315},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 73, want: 511},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 73, want: 511},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 81, want: 567},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 81, want: 567},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 3, want: 27},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 3, want: 27},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 5, want: 45},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 5, want: 45},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 7, want: 63},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 7, want: 63},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 9, want: 81},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 9, want: 81},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 10, want: 90},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 10, want: 90},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 11, want: 99},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 11, want: 99},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 13, want: 117},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 13, want: 117},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 19, want: 171},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 19, want: 171},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 21, want: 189},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 21, want: 189},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 25, want: 225},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 25, want: 225},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 27, want: 243},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 27, want: 243},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 37, want: 333},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 37, want: 333},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 41, want: 369},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 41, want: 369},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 45, want: 405},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 45, want: 405},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 73, want: 657},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 73, want: 657},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 81, want: 729},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 81, want: 729},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 3, want: 30},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 3, want: 30},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 5, want: 50},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 5, want: 50},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 7, want: 70},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 7, want: 70},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 9, want: 90},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 9, want: 90},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 10, want: 100},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 10, want: 100},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 11, want: 110},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 11, want: 110},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 13, want: 130},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 13, want: 130},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 19, want: 190},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 19, want: 190},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 21, want: 210},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 21, want: 210},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 25, want: 250},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 25, want: 250},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 27, want: 270},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 27, want: 270},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 37, want: 370},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 37, want: 370},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 41, want: 410},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 41, want: 410},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 45, want: 450},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 45, want: 450},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 73, want: 730},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 73, want: 730},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 81, want: 810},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 81, want: 810},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 3, want: 33},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 3, want: 33},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 5, want: 55},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 5, want: 55},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 7, want: 77},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 7, want: 77},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 9, want: 99},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 9, want: 99},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 10, want: 110},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 10, want: 110},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 11, want: 121},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 11, want: 121},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 13, want: 143},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 13, want: 143},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 19, want: 209},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 19, want: 209},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 21, want: 231},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 21, want: 231},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 25, want: 275},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 25, want: 275},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 27, want: 297},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 27, want: 297},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 37, want: 407},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 37, want: 407},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 41, want: 451},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 41, want: 451},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 45, want: 495},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 45, want: 495},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 73, want: 803},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 73, want: 803},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 81, want: 891},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 81, want: 891},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 3, want: 39},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 3, want: 39},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 5, want: 65},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 5, want: 65},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 7, want: 91},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 7, want: 91},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 9, want: 117},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 9, want: 117},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 10, want: 130},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 10, want: 130},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 11, want: 143},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 11, want: 143},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 13, want: 169},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 13, want: 169},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 19, want: 247},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 19, want: 247},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 21, want: 273},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 21, want: 273},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 25, want: 325},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 25, want: 325},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 27, want: 351},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 27, want: 351},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 37, want: 481},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 37, want: 481},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 41, want: 533},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 41, want: 533},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 45, want: 585},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 45, want: 585},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 73, want: 949},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 73, want: 949},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 81, want: 1053},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 81, want: 1053},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 3, want: 57},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 3, want: 57},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 5, want: 95},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 5, want: 95},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 7, want: 133},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 7, want: 133},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 9, want: 171},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 9, want: 171},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 10, want: 190},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 10, want: 190},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 11, want: 209},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 11, want: 209},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 13, want: 247},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 13, want: 247},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 19, want: 361},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 19, want: 361},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 21, want: 399},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 21, want: 399},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 25, want: 475},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 25, want: 475},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 27, want: 513},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 27, want: 513},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 37, want: 703},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 37, want: 703},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 41, want: 779},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 41, want: 779},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 45, want: 855},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 45, want: 855},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 73, want: 1387},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 73, want: 1387},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 81, want: 1539},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 81, want: 1539},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 3, want: 63},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 3, want: 63},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 5, want: 105},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 5, want: 105},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 7, want: 147},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 7, want: 147},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 9, want: 189},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 9, want: 189},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 10, want: 210},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 10, want: 210},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 11, want: 231},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 11, want: 231},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 13, want: 273},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 13, want: 273},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 19, want: 399},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 19, want: 399},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 21, want: 441},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 21, want: 441},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 25, want: 525},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 25, want: 525},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 27, want: 567},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 27, want: 567},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 37, want: 777},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 37, want: 777},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 41, want: 861},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 41, want: 861},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 45, want: 945},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 45, want: 945},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 73, want: 1533},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 73, want: 1533},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 81, want: 1701},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 81, want: 1701},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 3, want: 75},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 3, want: 75},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 5, want: 125},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 5, want: 125},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 7, want: 175},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 7, want: 175},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 9, want: 225},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 9, want: 225},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 10, want: 250},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 10, want: 250},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 11, want: 275},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 11, want: 275},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 13, want: 325},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 13, want: 325},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 19, want: 475},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 19, want: 475},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 21, want: 525},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 21, want: 525},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 25, want: 625},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 25, want: 625},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 27, want: 675},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 27, want: 675},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 37, want: 925},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 37, want: 925},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 41, want: 1025},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 41, want: 1025},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 45, want: 1125},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 45, want: 1125},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 73, want: 1825},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 73, want: 1825},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 81, want: 2025},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 81, want: 2025},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 3, want: 81},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 3, want: 81},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 5, want: 135},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 5, want: 135},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 7, want: 189},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 7, want: 189},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 9, want: 243},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 9, want: 243},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 10, want: 270},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 10, want: 270},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 11, want: 297},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 11, want: 297},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 13, want: 351},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 13, want: 351},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 19, want: 513},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 19, want: 513},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 21, want: 567},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 21, want: 567},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 25, want: 675},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 25, want: 675},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 27, want: 729},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 27, want: 729},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 37, want: 999},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 37, want: 999},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 41, want: 1107},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 41, want: 1107},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 45, want: 1215},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 45, want: 1215},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 73, want: 1971},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 73, want: 1971},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 81, want: 2187},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 81, want: 2187},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 3, want: 111},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 3, want: 111},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 5, want: 185},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 5, want: 185},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 7, want: 259},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 7, want: 259},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 9, want: 333},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 9, want: 333},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 10, want: 370},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 10, want: 370},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 11, want: 407},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 11, want: 407},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 13, want: 481},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 13, want: 481},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 19, want: 703},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 19, want: 703},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 21, want: 777},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 21, want: 777},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 25, want: 925},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 25, want: 925},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 27, want: 999},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 27, want: 999},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 37, want: 1369},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 37, want: 1369},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 41, want: 1517},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 41, want: 1517},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 45, want: 1665},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 45, want: 1665},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 73, want: 2701},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 73, want: 2701},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 81, want: 2997},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 81, want: 2997},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 3, want: 123},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 3, want: 123},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 5, want: 205},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 5, want: 205},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 7, want: 287},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 7, want: 287},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 9, want: 369},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 9, want: 369},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 10, want: 410},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 10, want: 410},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 11, want: 451},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 11, want: 451},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 13, want: 533},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 13, want: 533},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 19, want: 779},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 19, want: 779},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 21, want: 861},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 21, want: 861},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 25, want: 1025},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 25, want: 1025},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 27, want: 1107},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 27, want: 1107},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 37, want: 1517},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 37, want: 1517},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 41, want: 1681},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 41, want: 1681},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 45, want: 1845},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 45, want: 1845},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 73, want: 2993},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 73, want: 2993},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 81, want: 3321},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 81, want: 3321},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 3, want: 135},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 3, want: 135},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 5, want: 225},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 5, want: 225},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 7, want: 315},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 7, want: 315},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 9, want: 405},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 9, want: 405},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 10, want: 450},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 10, want: 450},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 11, want: 495},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 11, want: 495},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 13, want: 585},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 13, want: 585},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 19, want: 855},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 19, want: 855},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 21, want: 945},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 21, want: 945},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 25, want: 1125},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 25, want: 1125},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 27, want: 1215},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 27, want: 1215},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 37, want: 1665},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 37, want: 1665},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 41, want: 1845},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 41, want: 1845},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 45, want: 2025},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 45, want: 2025},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 73, want: 3285},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 73, want: 3285},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 81, want: 3645},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 81, want: 3645},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 3, want: 219},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 3, want: 219},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 5, want: 365},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 5, want: 365},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 7, want: 511},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 7, want: 511},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 9, want: 657},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 9, want: 657},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 10, want: 730},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 10, want: 730},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 11, want: 803},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 11, want: 803},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 13, want: 949},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 13, want: 949},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 19, want: 1387},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 19, want: 1387},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 21, want: 1533},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 21, want: 1533},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 25, want: 1825},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 25, want: 1825},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 27, want: 1971},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 27, want: 1971},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 37, want: 2701},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 37, want: 2701},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 41, want: 2993},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 41, want: 2993},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 45, want: 3285},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 45, want: 3285},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 73, want: 5329},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 73, want: 5329},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 81, want: 5913},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 81, want: 5913},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 3, want: 243},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 3, want: 243},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 5, want: 405},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 5, want: 405},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 7, want: 567},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 7, want: 567},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 9, want: 729},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 9, want: 729},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 10, want: 810},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 10, want: 810},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 11, want: 891},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 11, want: 891},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 13, want: 1053},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 13, want: 1053},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 19, want: 1539},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 19, want: 1539},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 21, want: 1701},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 21, want: 1701},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 25, want: 2025},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 25, want: 2025},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 27, want: 2187},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 27, want: 2187},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 37, want: 2997},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 37, want: 2997},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 41, want: 3321},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 41, want: 3321},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 45, want: 3645},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 45, want: 3645},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 73, want: 5913},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 73, want: 5913},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 81, want: 6561},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 81, want: 6561}}
+
+type test_int64 struct {
+ fn func(int64) int64
+ fnname string
+ in int64
+ want int64
+}
+
+var tests_int64 = []test_int64{
+
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -9223372036854775807, want: 1},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -1, want: 9223372036854775807},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -9223372036854775808, want: 1},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -9223372036854775807, want: 2},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -9223372036854775807, want: 2},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -1, want: -9223372036854775808},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 0, want: -9223372036854775807},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 1, want: -9223372036854775806},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 1, want: -9223372036854775806},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 9223372036854775806, want: -1},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 9223372036854775807, want: 0},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -4294967296, want: -8589934592},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -4294967296, want: -8589934592},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -1, want: -4294967297},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -1, want: -4294967297},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 0, want: -4294967296},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 0, want: -4294967296},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 1, want: -4294967295},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 1, want: -4294967295},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 4294967296, want: 0},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -4294967296, want: -4294967297},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -4294967296, want: -4294967297},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -1, want: -2},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -1, want: -2},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 0, want: -1},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 0, want: -1},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 1, want: 0},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 1, want: 0},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 4294967296, want: 4294967295},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 4294967296, want: 4294967295},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 9223372036854775806, want: 9223372036854775805},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 9223372036854775806, want: 9223372036854775805},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -4294967296, want: -4294967296},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -1, want: -1},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -1, want: -1},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 0, want: 0},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 0, want: 0},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 1, want: 1},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 1, want: 1},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 4294967296, want: 4294967296},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -4294967296, want: -4294967295},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -1, want: 0},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -1, want: 0},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 0, want: 1},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 0, want: 1},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 1, want: 2},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 1, want: 2},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 4294967296, want: 4294967297},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 4294967296, want: 4294967297},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -4294967296, want: 0},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -1, want: 4294967295},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -1, want: 4294967295},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 0, want: 4294967296},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 0, want: 4294967296},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 1, want: 4294967297},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 1, want: 4294967297},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 4294967296, want: 8589934592},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 4294967296, want: 8589934592},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -9223372036854775808, want: -2},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -9223372036854775808, want: -2},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -4294967296, want: 9223372032559808510},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -4294967296, want: 9223372032559808510},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -1, want: 9223372036854775805},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -1, want: 9223372036854775805},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 0, want: 9223372036854775806},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 0, want: 9223372036854775806},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 1, want: 9223372036854775807},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 4294967296, want: -9223372032559808514},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 4294967296, want: -9223372032559808514},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 9223372036854775806, want: -4},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 9223372036854775806, want: -4},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 9223372036854775807, want: -3},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 9223372036854775807, want: -3},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -4294967296, want: 9223372032559808511},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -4294967296, want: 9223372032559808511},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -1, want: 9223372036854775806},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -1, want: 9223372036854775806},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 0, want: 9223372036854775807},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 0, want: 9223372036854775807},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 1, want: -9223372036854775808},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 4294967296, want: -9223372032559808513},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 4294967296, want: -9223372032559808513},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 9223372036854775806, want: -3},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 9223372036854775806, want: -3},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 9223372036854775807, want: -2},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 9223372036854775807, want: -2},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -9223372036854775807, want: 1},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -4294967296, want: -9223372032559808512},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -1, want: 9223372036854775807},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 4294967296, want: 9223372032559808512},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 9223372036854775806, want: 2},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -4294967296, want: -9223372032559808511},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -4294967296, want: 9223372032559808511},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -1, want: -9223372036854775806},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -1, want: 9223372036854775806},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 0, want: 9223372036854775807},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 1, want: -9223372036854775808},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 4294967296, want: 9223372032559808513},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808513},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 9223372036854775806, want: 3},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 9223372036854775806, want: -3},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 9223372036854775807, want: 2},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 9223372036854775807, want: -2},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -4294967296, want: 0},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -1, want: -4294967295},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -1, want: 4294967295},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 0, want: -4294967296},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 0, want: 4294967296},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 1, want: -4294967297},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 1, want: 4294967297},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 4294967296, want: -8589934592},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 4294967296, want: 8589934592},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 9223372036854775806, want: 9223372032559808514},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -4294967296, want: 4294967295},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -4294967296, want: -4294967295},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -1, want: 0},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -1, want: 0},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 0, want: -1},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 0, want: 1},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 1, want: -2},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 1, want: 2},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 4294967296, want: -4294967297},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 4294967296, want: 4294967297},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 9223372036854775806, want: -9223372036854775807},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -4294967296, want: 4294967296},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -4294967296, want: -4294967296},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -1, want: 1},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -1, want: -1},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 0, want: 0},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 0, want: 0},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 1, want: -1},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 1, want: 1},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 4294967296, want: -4294967296},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 4294967296, want: 4294967296},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -4294967296, want: 4294967297},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -4294967296, want: -4294967297},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -1, want: 2},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -1, want: -2},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 0, want: 1},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 0, want: -1},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 1, want: 0},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 1, want: 0},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 4294967296, want: -4294967295},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 4294967296, want: 4294967295},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 9223372036854775806, want: -9223372036854775805},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 9223372036854775806, want: 9223372036854775805},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -4294967296, want: 8589934592},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -4294967296, want: -8589934592},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -1, want: 4294967297},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -1, want: -4294967297},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 0, want: 4294967296},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 0, want: -4294967296},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 1, want: 4294967295},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 1, want: -4294967295},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 4294967296, want: 0},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 9223372036854775806, want: -9223372032559808510},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -9223372036854775808, want: -2},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -9223372036854775808, want: 2},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -9223372036854775807, want: -3},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -9223372036854775807, want: 3},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -4294967296, want: -9223372032559808514},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -4294967296, want: 9223372032559808514},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -1, want: -9223372036854775807},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 0, want: 9223372036854775806},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 0, want: -9223372036854775806},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 1, want: 9223372036854775805},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 1, want: -9223372036854775805},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 4294967296, want: 9223372032559808510},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 4294967296, want: -9223372032559808510},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 9223372036854775806, want: 0},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 9223372036854775807, want: 1},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -9223372036854775808, want: 1},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -9223372036854775807, want: -2},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -9223372036854775807, want: 2},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -4294967296, want: -9223372032559808513},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -1, want: -9223372036854775808},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 0, want: 9223372036854775807},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 0, want: -9223372036854775807},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 1, want: 9223372036854775806},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 1, want: -9223372036854775806},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 4294967296, want: 9223372032559808511},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 9223372036854775806, want: -1},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -9223372036854775808, want: 1},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -4294967296, want: 2147483648},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -4294967296, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -1, want: 0},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 0, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 1, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 4294967296, want: -2147483648},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 4294967296, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -9223372036854775808, want: 1},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -9223372036854775807, want: 1},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -4294967296, want: 2147483647},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -4294967296, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -1, want: 0},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 0, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 1, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 4294967296, want: -2147483647},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 4294967296, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 9223372036854775807, want: -1},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -9223372036854775808, want: 2147483648},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -9223372036854775807, want: 2147483647},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -4294967296, want: 1},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -4294967296, want: 1},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -1, want: 4294967296},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -1, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 0, want: 0},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 1, want: -4294967296},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 1, want: 0},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 4294967296, want: -1},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 4294967296, want: -1},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 9223372036854775806, want: -2147483647},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 9223372036854775807, want: -2147483647},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -4294967296, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -4294967296, want: 4294967296},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -1, want: 1},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -1, want: 1},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 0, want: 0},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 1, want: -1},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 1, want: -1},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 4294967296, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 4294967296, want: -4294967296},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -4294967296, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -1, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 1, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 4294967296, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -4294967296, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -4294967296, want: -4294967296},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -1, want: -1},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -1, want: -1},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 0, want: 0},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 1, want: 1},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 1, want: 1},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 4294967296, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 4294967296, want: 4294967296},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -9223372036854775808, want: -2147483648},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -9223372036854775807, want: -2147483647},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -4294967296, want: -1},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -4294967296, want: -1},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -1, want: -4294967296},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -1, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 0, want: 0},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 1, want: 4294967296},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 1, want: 0},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 4294967296, want: 1},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 4294967296, want: 1},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 9223372036854775806, want: 2147483647},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 9223372036854775807, want: 2147483647},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -9223372036854775808, want: -1},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -4294967296, want: -2147483647},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -4294967296, want: 0},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -1, want: -9223372036854775806},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -1, want: 0},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 0, want: 0},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 1, want: 9223372036854775806},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 1, want: 0},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 4294967296, want: 2147483647},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 4294967296, want: 0},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 9223372036854775806, want: 1},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 9223372036854775807, want: 1},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -9223372036854775807, want: -1},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -4294967296, want: -2147483647},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -4294967296, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -1, want: 0},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 0, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 1, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 4294967296, want: 2147483647},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 4294967296, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 9223372036854775807, want: 1},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -4294967296, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -4294967296, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -1, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 0, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 1, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 4294967296, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 4294967296, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 9223372036854775806, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -9223372036854775807, want: 1},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -4294967296, want: -4294967296},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -1, want: 9223372036854775807},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 0, want: 0},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 1, want: -9223372036854775807},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 4294967296, want: 4294967296},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 9223372036854775807, want: -1},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -9223372036854775807, want: -4294967296},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -9223372036854775807, want: -4294967296},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -4294967296, want: 0},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -1, want: 4294967296},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -1, want: 4294967296},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 0, want: 0},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 1, want: -4294967296},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 1, want: -4294967296},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 4294967296, want: 0},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 9223372036854775806, want: 8589934592},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 9223372036854775806, want: 8589934592},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -4294967296, want: 4294967296},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -4294967296, want: 4294967296},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -1, want: 1},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -1, want: 1},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 0, want: 0},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 1, want: -1},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 1, want: -1},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 4294967296, want: -4294967296},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 4294967296, want: -4294967296},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -9223372036854775807, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -4294967296, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -4294967296, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -1, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -1, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 0, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 1, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 1, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 4294967296, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 4294967296, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 9223372036854775806, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 9223372036854775807, want: 0},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -4294967296, want: -4294967296},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -1, want: -1},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -1, want: -1},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 0, want: 0},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 1, want: 1},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 1, want: 1},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 4294967296, want: 4294967296},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -9223372036854775807, want: 4294967296},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -9223372036854775807, want: 4294967296},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -4294967296, want: 0},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -1, want: -4294967296},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -1, want: -4294967296},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 0, want: 0},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 1, want: 4294967296},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 1, want: 4294967296},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 4294967296, want: 0},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 9223372036854775806, want: -8589934592},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 9223372036854775806, want: -8589934592},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 9223372036854775807, want: -4294967296},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 9223372036854775807, want: -4294967296},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -4294967296, want: 8589934592},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -4294967296, want: 8589934592},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -1, want: -9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -1, want: -9223372036854775806},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 0, want: 0},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 1, want: 9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 1, want: 9223372036854775806},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 4294967296, want: -8589934592},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 4294967296, want: -8589934592},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 9223372036854775806, want: 4},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 9223372036854775806, want: 4},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -9223372036854775807, want: -1},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -4294967296, want: 4294967296},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -4294967296, want: 4294967296},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -1, want: -9223372036854775807},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 0, want: 0},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 1, want: 9223372036854775807},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 4294967296, want: -4294967296},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 4294967296, want: -4294967296},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 9223372036854775807, want: 1},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -4294967296, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -4294967296, want: -4294967296},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -1, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 0, want: 0},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 1, want: 1},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 4294967296, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 4294967296, want: 4294967296},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -4294967296, want: -4294967296},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -1, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 0, want: 0},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 1, want: 1},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 4294967296, want: -4294967295},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 4294967296, want: 4294967296},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -9223372036854775808, want: -4294967296},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -9223372036854775807, want: -4294967296},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -9223372036854775807, want: -4294967295},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -4294967296, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -1, want: -1},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 0, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 1, want: 1},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 4294967296, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 9223372036854775806, want: -4294967296},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 9223372036854775806, want: 4294967294},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 9223372036854775807, want: -4294967296},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 9223372036854775807, want: 4294967295},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -4294967296, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -4294967296, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 0, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 1, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 4294967296, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 4294967296, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -4294967296, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -1, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 1, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 4294967296, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -4294967296, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -4294967296, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -1, want: 0},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 0, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 1, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 4294967296, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 4294967296, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -9223372036854775808, want: 4294967296},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -9223372036854775807, want: 4294967296},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -9223372036854775807, want: -4294967295},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -4294967296, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -1, want: -1},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 0, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 1, want: 1},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 4294967296, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 9223372036854775806, want: 4294967296},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 9223372036854775806, want: 4294967294},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 9223372036854775807, want: 4294967295},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -9223372036854775808, want: 9223372036854775806},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -9223372036854775808, want: -2},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -4294967296, want: 4294967294},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -4294967296, want: -4294967296},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -1, want: -1},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 0, want: 0},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 1, want: 1},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 4294967296, want: 4294967294},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 4294967296, want: 4294967296},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 9223372036854775807, want: 1},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -4294967296, want: 4294967295},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -4294967296, want: -4294967296},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -1, want: -1},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 0, want: 0},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 1, want: 1},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 4294967296, want: 4294967295},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 4294967296, want: 4294967296},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -4294967296, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -4294967296, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -1, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 0, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 1, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 4294967296, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 4294967296, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -4294967296, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -4294967296, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -1, want: -9223372036854775807},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 0, want: 0},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 1, want: 1},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 1, want: 1},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 4294967296, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 4294967296, want: 0},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 9223372036854775807, want: 1},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -4294967296, want: -4294967296},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -1, want: -4294967296},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -1, want: -4294967296},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 0, want: 0},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 1, want: 0},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 9223372036854775806, want: 9223372032559808512},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 9223372036854775806, want: 9223372032559808512},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 9223372036854775807, want: 9223372032559808512},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 9223372036854775807, want: 9223372032559808512},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -4294967296, want: -4294967296},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -1, want: -1},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -1, want: -1},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 0, want: 0},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 1, want: 1},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 1, want: 1},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -4294967296, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -4294967296, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -1, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -1, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 0, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 1, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 4294967296, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 4294967296, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -9223372036854775807, want: 1},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -4294967296, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -4294967296, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -1, want: 1},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -1, want: 1},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 0, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 1, want: 1},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 1, want: 1},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 4294967296, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 4294967296, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 9223372036854775807, want: 1},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -4294967296, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -4294967296, want: 4294967296},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -1, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -1, want: 4294967296},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 0, want: 0},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 1, want: 0},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 9223372036854775806, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 9223372036854775806, want: 4294967296},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -1, want: 9223372036854775806},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -1, want: 9223372036854775806},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 0, want: 0},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 1, want: 0},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -9223372036854775807, want: 1},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -1, want: 9223372036854775807},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 0, want: 0},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 1, want: 1},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 1, want: 1},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -1, want: -1},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -4294967296, want: -4294967295},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -1, want: -1},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 0, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 1, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 9223372036854775806, want: -1},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -9223372036854775808, want: -4294967296},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -9223372036854775808, want: -4294967296},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -9223372036854775807, want: -4294967295},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -9223372036854775807, want: -4294967295},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -1, want: -1},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 0, want: -4294967296},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 0, want: -4294967296},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 1, want: -4294967295},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 1, want: -4294967295},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 4294967296, want: -4294967296},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 4294967296, want: -4294967296},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 9223372036854775806, want: -2},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -9223372036854775808, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -4294967296, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -4294967296, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -1, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 0, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 0, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 1, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 1, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 4294967296, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 4294967296, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 9223372036854775806, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -1, want: -1},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 0, want: 0},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 0, want: 0},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 1, want: 1},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 1, want: 1},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 4294967296, want: 4294967296},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -4294967296, want: -4294967295},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -1, want: -1},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 0, want: 1},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 0, want: 1},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 1, want: 1},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 1, want: 1},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 4294967296, want: 4294967297},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 4294967296, want: 4294967297},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -1, want: -1},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 0, want: 4294967296},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 0, want: 4294967296},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 1, want: 4294967297},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 1, want: 4294967297},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 4294967296, want: 4294967296},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -9223372036854775808, want: -2},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -9223372036854775808, want: -2},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -4294967296, want: -2},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -4294967296, want: -2},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -1, want: -1},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 0, want: 9223372036854775806},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 0, want: 9223372036854775806},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 1, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 4294967296, want: 9223372036854775806},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 4294967296, want: 9223372036854775806},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -4294967296, want: -1},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -4294967296, want: -1},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -1, want: -1},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 0, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 0, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 1, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 4294967296, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 4294967296, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -9223372036854775807, want: 1},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -1, want: 9223372036854775807},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -9223372036854775808, want: 1},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -1, want: 9223372036854775806},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -1, want: 9223372036854775806},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 0, want: -9223372036854775807},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 1, want: -9223372036854775808},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 9223372036854775806, want: -1},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 9223372036854775807, want: -2},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 9223372036854775807, want: -2},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -4294967296, want: 0},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -1, want: 4294967295},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -1, want: 4294967295},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 0, want: -4294967296},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 0, want: -4294967296},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 1, want: -4294967295},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 1, want: -4294967295},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 4294967296, want: -8589934592},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 4294967296, want: -8589934592},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -4294967296, want: 4294967295},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -4294967296, want: 4294967295},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -1, want: 0},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -1, want: 0},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 0, want: -1},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 0, want: -1},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 1, want: -2},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 1, want: -2},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 4294967296, want: -4294967297},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 4294967296, want: -4294967297},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 9223372036854775806, want: -9223372036854775807},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 9223372036854775806, want: -9223372036854775807},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -4294967296, want: -4294967296},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -1, want: -1},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -1, want: -1},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 0, want: 0},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 0, want: 0},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 1, want: 1},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 1, want: 1},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 4294967296, want: 4294967296},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -4294967296, want: -4294967295},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -1, want: -2},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -1, want: -2},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 0, want: 1},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 0, want: 1},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 1, want: 0},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 1, want: 0},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 4294967296, want: 4294967297},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 4294967296, want: 4294967297},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -4294967296, want: -8589934592},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -4294967296, want: -8589934592},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -1, want: -4294967297},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -1, want: -4294967297},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 0, want: 4294967296},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 0, want: 4294967296},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 1, want: 4294967297},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 1, want: 4294967297},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 4294967296, want: 0},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -9223372036854775808, want: -2},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -9223372036854775808, want: -2},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -4294967296, want: -9223372032559808514},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -4294967296, want: -9223372032559808514},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -1, want: -9223372036854775807},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 0, want: 9223372036854775806},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 0, want: 9223372036854775806},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 1, want: 9223372036854775807},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 4294967296, want: 9223372032559808510},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 4294967296, want: 9223372032559808510},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 9223372036854775806, want: 0},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 9223372036854775807, want: 1},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -9223372036854775807, want: -2},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -9223372036854775807, want: -2},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -4294967296, want: -9223372032559808513},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -4294967296, want: -9223372032559808513},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -1, want: -9223372036854775808},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 0, want: 9223372036854775807},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 0, want: 9223372036854775807},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 1, want: 9223372036854775806},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 1, want: 9223372036854775806},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 4294967296, want: 9223372032559808511},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 4294967296, want: 9223372032559808511},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 9223372036854775806, want: 1},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 9223372036854775807, want: 0}}
+
+type test_int64mul struct {
+ fn func(int64) int64
+ fnname string
+ in int64
+ want int64
+}
+
+var tests_int64mul = []test_int64{
+
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: -9, want: 81},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: -9, want: 81},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: -5, want: 45},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: -5, want: 45},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: -3, want: 27},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: -3, want: 27},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 3, want: -27},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 3, want: -27},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 5, want: -45},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 5, want: -45},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 7, want: -63},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 7, want: -63},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 9, want: -81},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 9, want: -81},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 10, want: -90},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 10, want: -90},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 11, want: -99},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 11, want: -99},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 13, want: -117},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 13, want: -117},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 19, want: -171},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 19, want: -171},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 21, want: -189},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 21, want: -189},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 25, want: -225},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 25, want: -225},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 27, want: -243},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 27, want: -243},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 37, want: -333},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 37, want: -333},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 41, want: -369},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 41, want: -369},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 45, want: -405},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 45, want: -405},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 73, want: -657},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 73, want: -657},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 81, want: -729},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 81, want: -729},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: -9, want: 45},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: -9, want: 45},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: -5, want: 25},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: -5, want: 25},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: -3, want: 15},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: -3, want: 15},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 3, want: -15},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 3, want: -15},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 5, want: -25},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 5, want: -25},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 7, want: -35},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 7, want: -35},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 9, want: -45},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 9, want: -45},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 10, want: -50},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 10, want: -50},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 11, want: -55},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 11, want: -55},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 13, want: -65},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 13, want: -65},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 19, want: -95},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 19, want: -95},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 21, want: -105},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 21, want: -105},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 25, want: -125},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 25, want: -125},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 27, want: -135},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 27, want: -135},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 37, want: -185},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 37, want: -185},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 41, want: -205},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 41, want: -205},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 45, want: -225},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 45, want: -225},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 73, want: -365},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 73, want: -365},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 81, want: -405},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 81, want: -405},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: -9, want: 27},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: -9, want: 27},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: -5, want: 15},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: -5, want: 15},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: -3, want: 9},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: -3, want: 9},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 3, want: -9},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 3, want: -9},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 5, want: -15},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 5, want: -15},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 7, want: -21},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 7, want: -21},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 9, want: -27},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 9, want: -27},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 10, want: -30},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 10, want: -30},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 11, want: -33},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 11, want: -33},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 13, want: -39},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 13, want: -39},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 19, want: -57},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 19, want: -57},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 21, want: -63},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 21, want: -63},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 25, want: -75},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 25, want: -75},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 27, want: -81},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 27, want: -81},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 37, want: -111},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 37, want: -111},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 41, want: -123},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 41, want: -123},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 45, want: -135},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 45, want: -135},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 73, want: -219},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 73, want: -219},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 81, want: -243},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 81, want: -243},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: -9, want: -27},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: -9, want: -27},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: -5, want: -15},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: -5, want: -15},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: -3, want: -9},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: -3, want: -9},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 3, want: 9},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 3, want: 9},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 5, want: 15},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 5, want: 15},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 7, want: 21},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 7, want: 21},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 9, want: 27},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 9, want: 27},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 10, want: 30},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 10, want: 30},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 11, want: 33},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 11, want: 33},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 13, want: 39},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 13, want: 39},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 19, want: 57},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 19, want: 57},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 21, want: 63},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 21, want: 63},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 25, want: 75},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 25, want: 75},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 27, want: 81},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 27, want: 81},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 37, want: 111},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 37, want: 111},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 41, want: 123},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 41, want: 123},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 45, want: 135},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 45, want: 135},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 73, want: 219},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 73, want: 219},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 81, want: 243},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 81, want: 243},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: -9, want: -45},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: -9, want: -45},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: -5, want: -25},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: -5, want: -25},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: -3, want: -15},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: -3, want: -15},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 3, want: 15},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 3, want: 15},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 5, want: 25},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 5, want: 25},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 7, want: 35},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 7, want: 35},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 9, want: 45},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 9, want: 45},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 10, want: 50},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 10, want: 50},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 11, want: 55},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 11, want: 55},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 13, want: 65},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 13, want: 65},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 19, want: 95},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 19, want: 95},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 21, want: 105},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 21, want: 105},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 25, want: 125},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 25, want: 125},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 27, want: 135},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 27, want: 135},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 37, want: 185},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 37, want: 185},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 41, want: 205},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 41, want: 205},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 45, want: 225},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 45, want: 225},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 73, want: 365},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 73, want: 365},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 81, want: 405},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 81, want: 405},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: -9, want: -63},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: -9, want: -63},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: -5, want: -35},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: -5, want: -35},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: -3, want: -21},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: -3, want: -21},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 3, want: 21},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 3, want: 21},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 5, want: 35},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 5, want: 35},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 7, want: 49},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 7, want: 49},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 9, want: 63},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 9, want: 63},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 10, want: 70},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 10, want: 70},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 11, want: 77},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 11, want: 77},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 13, want: 91},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 13, want: 91},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 19, want: 133},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 19, want: 133},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 21, want: 147},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 21, want: 147},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 25, want: 175},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 25, want: 175},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 27, want: 189},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 27, want: 189},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 37, want: 259},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 37, want: 259},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 41, want: 287},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 41, want: 287},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 45, want: 315},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 45, want: 315},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 73, want: 511},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 73, want: 511},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 81, want: 567},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 81, want: 567},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: -9, want: -81},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: -9, want: -81},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: -5, want: -45},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: -5, want: -45},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: -3, want: -27},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: -3, want: -27},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 3, want: 27},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 3, want: 27},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 5, want: 45},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 5, want: 45},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 7, want: 63},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 7, want: 63},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 9, want: 81},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 9, want: 81},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 10, want: 90},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 10, want: 90},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 11, want: 99},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 11, want: 99},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 13, want: 117},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 13, want: 117},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 19, want: 171},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 19, want: 171},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 21, want: 189},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 21, want: 189},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 25, want: 225},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 25, want: 225},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 27, want: 243},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 27, want: 243},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 37, want: 333},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 37, want: 333},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 41, want: 369},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 41, want: 369},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 45, want: 405},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 45, want: 405},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 73, want: 657},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 73, want: 657},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 81, want: 729},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 81, want: 729},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: -9, want: -90},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: -9, want: -90},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: -5, want: -50},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: -5, want: -50},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: -3, want: -30},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: -3, want: -30},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 3, want: 30},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 3, want: 30},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 5, want: 50},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 5, want: 50},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 7, want: 70},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 7, want: 70},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 9, want: 90},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 9, want: 90},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 10, want: 100},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 10, want: 100},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 11, want: 110},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 11, want: 110},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 13, want: 130},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 13, want: 130},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 19, want: 190},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 19, want: 190},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 21, want: 210},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 21, want: 210},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 25, want: 250},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 25, want: 250},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 27, want: 270},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 27, want: 270},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 37, want: 370},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 37, want: 370},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 41, want: 410},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 41, want: 410},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 45, want: 450},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 45, want: 450},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 73, want: 730},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 73, want: 730},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 81, want: 810},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 81, want: 810},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: -9, want: -99},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: -9, want: -99},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: -5, want: -55},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: -5, want: -55},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: -3, want: -33},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: -3, want: -33},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 3, want: 33},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 3, want: 33},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 5, want: 55},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 5, want: 55},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 7, want: 77},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 7, want: 77},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 9, want: 99},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 9, want: 99},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 10, want: 110},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 10, want: 110},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 11, want: 121},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 11, want: 121},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 13, want: 143},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 13, want: 143},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 19, want: 209},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 19, want: 209},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 21, want: 231},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 21, want: 231},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 25, want: 275},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 25, want: 275},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 27, want: 297},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 27, want: 297},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 37, want: 407},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 37, want: 407},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 41, want: 451},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 41, want: 451},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 45, want: 495},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 45, want: 495},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 73, want: 803},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 73, want: 803},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 81, want: 891},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 81, want: 891},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: -9, want: -117},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: -9, want: -117},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: -5, want: -65},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: -5, want: -65},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: -3, want: -39},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: -3, want: -39},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 3, want: 39},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 3, want: 39},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 5, want: 65},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 5, want: 65},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 7, want: 91},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 7, want: 91},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 9, want: 117},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 9, want: 117},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 10, want: 130},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 10, want: 130},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 11, want: 143},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 11, want: 143},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 13, want: 169},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 13, want: 169},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 19, want: 247},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 19, want: 247},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 21, want: 273},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 21, want: 273},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 25, want: 325},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 25, want: 325},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 27, want: 351},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 27, want: 351},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 37, want: 481},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 37, want: 481},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 41, want: 533},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 41, want: 533},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 45, want: 585},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 45, want: 585},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 73, want: 949},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 73, want: 949},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 81, want: 1053},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 81, want: 1053},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: -9, want: -171},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: -9, want: -171},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: -5, want: -95},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: -5, want: -95},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: -3, want: -57},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: -3, want: -57},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 3, want: 57},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 3, want: 57},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 5, want: 95},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 5, want: 95},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 7, want: 133},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 7, want: 133},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 9, want: 171},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 9, want: 171},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 10, want: 190},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 10, want: 190},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 11, want: 209},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 11, want: 209},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 13, want: 247},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 13, want: 247},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 19, want: 361},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 19, want: 361},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 21, want: 399},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 21, want: 399},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 25, want: 475},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 25, want: 475},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 27, want: 513},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 27, want: 513},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 37, want: 703},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 37, want: 703},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 41, want: 779},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 41, want: 779},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 45, want: 855},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 45, want: 855},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 73, want: 1387},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 73, want: 1387},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 81, want: 1539},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 81, want: 1539},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: -9, want: -189},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: -9, want: -189},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: -5, want: -105},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: -5, want: -105},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: -3, want: -63},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: -3, want: -63},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 3, want: 63},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 3, want: 63},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 5, want: 105},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 5, want: 105},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 7, want: 147},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 7, want: 147},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 9, want: 189},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 9, want: 189},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 10, want: 210},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 10, want: 210},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 11, want: 231},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 11, want: 231},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 13, want: 273},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 13, want: 273},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 19, want: 399},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 19, want: 399},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 21, want: 441},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 21, want: 441},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 25, want: 525},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 25, want: 525},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 27, want: 567},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 27, want: 567},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 37, want: 777},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 37, want: 777},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 41, want: 861},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 41, want: 861},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 45, want: 945},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 45, want: 945},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 73, want: 1533},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 73, want: 1533},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 81, want: 1701},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 81, want: 1701},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: -9, want: -225},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: -9, want: -225},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: -5, want: -125},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: -5, want: -125},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: -3, want: -75},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: -3, want: -75},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 3, want: 75},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 3, want: 75},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 5, want: 125},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 5, want: 125},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 7, want: 175},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 7, want: 175},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 9, want: 225},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 9, want: 225},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 10, want: 250},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 10, want: 250},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 11, want: 275},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 11, want: 275},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 13, want: 325},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 13, want: 325},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 19, want: 475},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 19, want: 475},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 21, want: 525},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 21, want: 525},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 25, want: 625},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 25, want: 625},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 27, want: 675},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 27, want: 675},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 37, want: 925},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 37, want: 925},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 41, want: 1025},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 41, want: 1025},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 45, want: 1125},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 45, want: 1125},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 73, want: 1825},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 73, want: 1825},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 81, want: 2025},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 81, want: 2025},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: -9, want: -243},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: -9, want: -243},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: -5, want: -135},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: -5, want: -135},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: -3, want: -81},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: -3, want: -81},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 3, want: 81},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 3, want: 81},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 5, want: 135},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 5, want: 135},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 7, want: 189},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 7, want: 189},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 9, want: 243},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 9, want: 243},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 10, want: 270},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 10, want: 270},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 11, want: 297},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 11, want: 297},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 13, want: 351},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 13, want: 351},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 19, want: 513},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 19, want: 513},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 21, want: 567},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 21, want: 567},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 25, want: 675},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 25, want: 675},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 27, want: 729},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 27, want: 729},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 37, want: 999},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 37, want: 999},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 41, want: 1107},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 41, want: 1107},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 45, want: 1215},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 45, want: 1215},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 73, want: 1971},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 73, want: 1971},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 81, want: 2187},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 81, want: 2187},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: -9, want: -333},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: -9, want: -333},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: -5, want: -185},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: -5, want: -185},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: -3, want: -111},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: -3, want: -111},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 3, want: 111},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 3, want: 111},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 5, want: 185},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 5, want: 185},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 7, want: 259},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 7, want: 259},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 9, want: 333},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 9, want: 333},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 10, want: 370},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 10, want: 370},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 11, want: 407},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 11, want: 407},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 13, want: 481},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 13, want: 481},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 19, want: 703},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 19, want: 703},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 21, want: 777},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 21, want: 777},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 25, want: 925},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 25, want: 925},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 27, want: 999},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 27, want: 999},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 37, want: 1369},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 37, want: 1369},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 41, want: 1517},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 41, want: 1517},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 45, want: 1665},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 45, want: 1665},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 73, want: 2701},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 73, want: 2701},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 81, want: 2997},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 81, want: 2997},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: -9, want: -369},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: -9, want: -369},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: -5, want: -205},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: -5, want: -205},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: -3, want: -123},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: -3, want: -123},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 3, want: 123},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 3, want: 123},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 5, want: 205},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 5, want: 205},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 7, want: 287},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 7, want: 287},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 9, want: 369},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 9, want: 369},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 10, want: 410},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 10, want: 410},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 11, want: 451},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 11, want: 451},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 13, want: 533},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 13, want: 533},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 19, want: 779},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 19, want: 779},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 21, want: 861},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 21, want: 861},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 25, want: 1025},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 25, want: 1025},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 27, want: 1107},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 27, want: 1107},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 37, want: 1517},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 37, want: 1517},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 41, want: 1681},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 41, want: 1681},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 45, want: 1845},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 45, want: 1845},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 73, want: 2993},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 73, want: 2993},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 81, want: 3321},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 81, want: 3321},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: -9, want: -405},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: -9, want: -405},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: -5, want: -225},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: -5, want: -225},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: -3, want: -135},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: -3, want: -135},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 3, want: 135},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 3, want: 135},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 5, want: 225},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 5, want: 225},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 7, want: 315},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 7, want: 315},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 9, want: 405},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 9, want: 405},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 10, want: 450},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 10, want: 450},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 11, want: 495},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 11, want: 495},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 13, want: 585},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 13, want: 585},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 19, want: 855},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 19, want: 855},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 21, want: 945},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 21, want: 945},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 25, want: 1125},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 25, want: 1125},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 27, want: 1215},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 27, want: 1215},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 37, want: 1665},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 37, want: 1665},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 41, want: 1845},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 41, want: 1845},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 45, want: 2025},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 45, want: 2025},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 73, want: 3285},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 73, want: 3285},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 81, want: 3645},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 81, want: 3645},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: -9, want: -657},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: -9, want: -657},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: -5, want: -365},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: -5, want: -365},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: -3, want: -219},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: -3, want: -219},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 3, want: 219},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 3, want: 219},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 5, want: 365},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 5, want: 365},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 7, want: 511},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 7, want: 511},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 9, want: 657},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 9, want: 657},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 10, want: 730},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 10, want: 730},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 11, want: 803},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 11, want: 803},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 13, want: 949},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 13, want: 949},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 19, want: 1387},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 19, want: 1387},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 21, want: 1533},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 21, want: 1533},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 25, want: 1825},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 25, want: 1825},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 27, want: 1971},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 27, want: 1971},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 37, want: 2701},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 37, want: 2701},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 41, want: 2993},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 41, want: 2993},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 45, want: 3285},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 45, want: 3285},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 73, want: 5329},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 73, want: 5329},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 81, want: 5913},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 81, want: 5913},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: -9, want: -729},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: -9, want: -729},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: -5, want: -405},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: -5, want: -405},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: -3, want: -243},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: -3, want: -243},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 3, want: 243},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 3, want: 243},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 5, want: 405},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 5, want: 405},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 7, want: 567},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 7, want: 567},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 9, want: 729},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 9, want: 729},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 10, want: 810},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 10, want: 810},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 11, want: 891},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 11, want: 891},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 13, want: 1053},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 13, want: 1053},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 19, want: 1539},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 19, want: 1539},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 21, want: 1701},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 21, want: 1701},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 25, want: 2025},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 25, want: 2025},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 27, want: 2187},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 27, want: 2187},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 37, want: 2997},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 37, want: 2997},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 41, want: 3321},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 41, want: 3321},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 45, want: 3645},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 45, want: 3645},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 73, want: 5913},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 73, want: 5913},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 81, want: 6561},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 81, want: 6561}}
+
+type test_uint32 struct {
+ fn func(uint32) uint32
+ fnname string
+ in uint32
+ want uint32
+}
+
+var tests_uint32 = []test_uint32{
+
+ test_uint32{fn: add_0_uint32, fnname: "add_0_uint32", in: 0, want: 0},
+ test_uint32{fn: add_uint32_0, fnname: "add_uint32_0", in: 0, want: 0},
+ test_uint32{fn: add_0_uint32, fnname: "add_0_uint32", in: 1, want: 1},
+ test_uint32{fn: add_uint32_0, fnname: "add_uint32_0", in: 1, want: 1},
+ test_uint32{fn: add_0_uint32, fnname: "add_0_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: add_uint32_0, fnname: "add_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: add_1_uint32, fnname: "add_1_uint32", in: 0, want: 1},
+ test_uint32{fn: add_uint32_1, fnname: "add_uint32_1", in: 0, want: 1},
+ test_uint32{fn: add_1_uint32, fnname: "add_1_uint32", in: 1, want: 2},
+ test_uint32{fn: add_uint32_1, fnname: "add_uint32_1", in: 1, want: 2},
+ test_uint32{fn: add_1_uint32, fnname: "add_1_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: add_uint32_1, fnname: "add_uint32_1", in: 4294967295, want: 0},
+ test_uint32{fn: add_4294967295_uint32, fnname: "add_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: add_uint32_4294967295, fnname: "add_uint32_4294967295", in: 0, want: 4294967295},
+ test_uint32{fn: add_4294967295_uint32, fnname: "add_4294967295_uint32", in: 1, want: 0},
+ test_uint32{fn: add_uint32_4294967295, fnname: "add_uint32_4294967295", in: 1, want: 0},
+ test_uint32{fn: add_4294967295_uint32, fnname: "add_4294967295_uint32", in: 4294967295, want: 4294967294},
+ test_uint32{fn: add_uint32_4294967295, fnname: "add_uint32_4294967295", in: 4294967295, want: 4294967294},
+ test_uint32{fn: sub_0_uint32, fnname: "sub_0_uint32", in: 0, want: 0},
+ test_uint32{fn: sub_uint32_0, fnname: "sub_uint32_0", in: 0, want: 0},
+ test_uint32{fn: sub_0_uint32, fnname: "sub_0_uint32", in: 1, want: 4294967295},
+ test_uint32{fn: sub_uint32_0, fnname: "sub_uint32_0", in: 1, want: 1},
+ test_uint32{fn: sub_0_uint32, fnname: "sub_0_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: sub_uint32_0, fnname: "sub_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: sub_1_uint32, fnname: "sub_1_uint32", in: 0, want: 1},
+ test_uint32{fn: sub_uint32_1, fnname: "sub_uint32_1", in: 0, want: 4294967295},
+ test_uint32{fn: sub_1_uint32, fnname: "sub_1_uint32", in: 1, want: 0},
+ test_uint32{fn: sub_uint32_1, fnname: "sub_uint32_1", in: 1, want: 0},
+ test_uint32{fn: sub_1_uint32, fnname: "sub_1_uint32", in: 4294967295, want: 2},
+ test_uint32{fn: sub_uint32_1, fnname: "sub_uint32_1", in: 4294967295, want: 4294967294},
+ test_uint32{fn: sub_4294967295_uint32, fnname: "sub_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: sub_uint32_4294967295, fnname: "sub_uint32_4294967295", in: 0, want: 1},
+ test_uint32{fn: sub_4294967295_uint32, fnname: "sub_4294967295_uint32", in: 1, want: 4294967294},
+ test_uint32{fn: sub_uint32_4294967295, fnname: "sub_uint32_4294967295", in: 1, want: 2},
+ test_uint32{fn: sub_4294967295_uint32, fnname: "sub_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: sub_uint32_4294967295, fnname: "sub_uint32_4294967295", in: 4294967295, want: 0},
+ test_uint32{fn: div_0_uint32, fnname: "div_0_uint32", in: 1, want: 0},
+ test_uint32{fn: div_0_uint32, fnname: "div_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: div_uint32_1, fnname: "div_uint32_1", in: 0, want: 0},
+ test_uint32{fn: div_1_uint32, fnname: "div_1_uint32", in: 1, want: 1},
+ test_uint32{fn: div_uint32_1, fnname: "div_uint32_1", in: 1, want: 1},
+ test_uint32{fn: div_1_uint32, fnname: "div_1_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: div_uint32_1, fnname: "div_uint32_1", in: 4294967295, want: 4294967295},
+ test_uint32{fn: div_uint32_4294967295, fnname: "div_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: div_4294967295_uint32, fnname: "div_4294967295_uint32", in: 1, want: 4294967295},
+ test_uint32{fn: div_uint32_4294967295, fnname: "div_uint32_4294967295", in: 1, want: 0},
+ test_uint32{fn: div_4294967295_uint32, fnname: "div_4294967295_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: div_uint32_4294967295, fnname: "div_uint32_4294967295", in: 4294967295, want: 1},
+ test_uint32{fn: mul_0_uint32, fnname: "mul_0_uint32", in: 0, want: 0},
+ test_uint32{fn: mul_uint32_0, fnname: "mul_uint32_0", in: 0, want: 0},
+ test_uint32{fn: mul_0_uint32, fnname: "mul_0_uint32", in: 1, want: 0},
+ test_uint32{fn: mul_uint32_0, fnname: "mul_uint32_0", in: 1, want: 0},
+ test_uint32{fn: mul_0_uint32, fnname: "mul_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: mul_uint32_0, fnname: "mul_uint32_0", in: 4294967295, want: 0},
+ test_uint32{fn: mul_1_uint32, fnname: "mul_1_uint32", in: 0, want: 0},
+ test_uint32{fn: mul_uint32_1, fnname: "mul_uint32_1", in: 0, want: 0},
+ test_uint32{fn: mul_1_uint32, fnname: "mul_1_uint32", in: 1, want: 1},
+ test_uint32{fn: mul_uint32_1, fnname: "mul_uint32_1", in: 1, want: 1},
+ test_uint32{fn: mul_1_uint32, fnname: "mul_1_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: mul_uint32_1, fnname: "mul_uint32_1", in: 4294967295, want: 4294967295},
+ test_uint32{fn: mul_4294967295_uint32, fnname: "mul_4294967295_uint32", in: 0, want: 0},
+ test_uint32{fn: mul_uint32_4294967295, fnname: "mul_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: mul_4294967295_uint32, fnname: "mul_4294967295_uint32", in: 1, want: 4294967295},
+ test_uint32{fn: mul_uint32_4294967295, fnname: "mul_uint32_4294967295", in: 1, want: 4294967295},
+ test_uint32{fn: mul_4294967295_uint32, fnname: "mul_4294967295_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: mul_uint32_4294967295, fnname: "mul_uint32_4294967295", in: 4294967295, want: 1},
+ test_uint32{fn: lsh_0_uint32, fnname: "lsh_0_uint32", in: 0, want: 0},
+ test_uint32{fn: lsh_uint32_0, fnname: "lsh_uint32_0", in: 0, want: 0},
+ test_uint32{fn: lsh_0_uint32, fnname: "lsh_0_uint32", in: 1, want: 0},
+ test_uint32{fn: lsh_uint32_0, fnname: "lsh_uint32_0", in: 1, want: 1},
+ test_uint32{fn: lsh_0_uint32, fnname: "lsh_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: lsh_uint32_0, fnname: "lsh_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: lsh_1_uint32, fnname: "lsh_1_uint32", in: 0, want: 1},
+ test_uint32{fn: lsh_uint32_1, fnname: "lsh_uint32_1", in: 0, want: 0},
+ test_uint32{fn: lsh_1_uint32, fnname: "lsh_1_uint32", in: 1, want: 2},
+ test_uint32{fn: lsh_uint32_1, fnname: "lsh_uint32_1", in: 1, want: 2},
+ test_uint32{fn: lsh_1_uint32, fnname: "lsh_1_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: lsh_uint32_1, fnname: "lsh_uint32_1", in: 4294967295, want: 4294967294},
+ test_uint32{fn: lsh_4294967295_uint32, fnname: "lsh_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: lsh_uint32_4294967295, fnname: "lsh_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: lsh_4294967295_uint32, fnname: "lsh_4294967295_uint32", in: 1, want: 4294967294},
+ test_uint32{fn: lsh_uint32_4294967295, fnname: "lsh_uint32_4294967295", in: 1, want: 0},
+ test_uint32{fn: lsh_4294967295_uint32, fnname: "lsh_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: lsh_uint32_4294967295, fnname: "lsh_uint32_4294967295", in: 4294967295, want: 0},
+ test_uint32{fn: rsh_0_uint32, fnname: "rsh_0_uint32", in: 0, want: 0},
+ test_uint32{fn: rsh_uint32_0, fnname: "rsh_uint32_0", in: 0, want: 0},
+ test_uint32{fn: rsh_0_uint32, fnname: "rsh_0_uint32", in: 1, want: 0},
+ test_uint32{fn: rsh_uint32_0, fnname: "rsh_uint32_0", in: 1, want: 1},
+ test_uint32{fn: rsh_0_uint32, fnname: "rsh_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: rsh_uint32_0, fnname: "rsh_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: rsh_1_uint32, fnname: "rsh_1_uint32", in: 0, want: 1},
+ test_uint32{fn: rsh_uint32_1, fnname: "rsh_uint32_1", in: 0, want: 0},
+ test_uint32{fn: rsh_1_uint32, fnname: "rsh_1_uint32", in: 1, want: 0},
+ test_uint32{fn: rsh_uint32_1, fnname: "rsh_uint32_1", in: 1, want: 0},
+ test_uint32{fn: rsh_1_uint32, fnname: "rsh_1_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: rsh_uint32_1, fnname: "rsh_uint32_1", in: 4294967295, want: 2147483647},
+ test_uint32{fn: rsh_4294967295_uint32, fnname: "rsh_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: rsh_uint32_4294967295, fnname: "rsh_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: rsh_4294967295_uint32, fnname: "rsh_4294967295_uint32", in: 1, want: 2147483647},
+ test_uint32{fn: rsh_uint32_4294967295, fnname: "rsh_uint32_4294967295", in: 1, want: 0},
+ test_uint32{fn: rsh_4294967295_uint32, fnname: "rsh_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: rsh_uint32_4294967295, fnname: "rsh_uint32_4294967295", in: 4294967295, want: 0},
+ test_uint32{fn: mod_0_uint32, fnname: "mod_0_uint32", in: 1, want: 0},
+ test_uint32{fn: mod_0_uint32, fnname: "mod_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: mod_uint32_1, fnname: "mod_uint32_1", in: 0, want: 0},
+ test_uint32{fn: mod_1_uint32, fnname: "mod_1_uint32", in: 1, want: 0},
+ test_uint32{fn: mod_uint32_1, fnname: "mod_uint32_1", in: 1, want: 0},
+ test_uint32{fn: mod_1_uint32, fnname: "mod_1_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: mod_uint32_1, fnname: "mod_uint32_1", in: 4294967295, want: 0},
+ test_uint32{fn: mod_uint32_4294967295, fnname: "mod_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: mod_4294967295_uint32, fnname: "mod_4294967295_uint32", in: 1, want: 0},
+ test_uint32{fn: mod_uint32_4294967295, fnname: "mod_uint32_4294967295", in: 1, want: 1},
+ test_uint32{fn: mod_4294967295_uint32, fnname: "mod_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: mod_uint32_4294967295, fnname: "mod_uint32_4294967295", in: 4294967295, want: 0},
+ test_uint32{fn: and_0_uint32, fnname: "and_0_uint32", in: 0, want: 0},
+ test_uint32{fn: and_uint32_0, fnname: "and_uint32_0", in: 0, want: 0},
+ test_uint32{fn: and_0_uint32, fnname: "and_0_uint32", in: 1, want: 0},
+ test_uint32{fn: and_uint32_0, fnname: "and_uint32_0", in: 1, want: 0},
+ test_uint32{fn: and_0_uint32, fnname: "and_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: and_uint32_0, fnname: "and_uint32_0", in: 4294967295, want: 0},
+ test_uint32{fn: and_1_uint32, fnname: "and_1_uint32", in: 0, want: 0},
+ test_uint32{fn: and_uint32_1, fnname: "and_uint32_1", in: 0, want: 0},
+ test_uint32{fn: and_1_uint32, fnname: "and_1_uint32", in: 1, want: 1},
+ test_uint32{fn: and_uint32_1, fnname: "and_uint32_1", in: 1, want: 1},
+ test_uint32{fn: and_1_uint32, fnname: "and_1_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: and_uint32_1, fnname: "and_uint32_1", in: 4294967295, want: 1},
+ test_uint32{fn: and_4294967295_uint32, fnname: "and_4294967295_uint32", in: 0, want: 0},
+ test_uint32{fn: and_uint32_4294967295, fnname: "and_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: and_4294967295_uint32, fnname: "and_4294967295_uint32", in: 1, want: 1},
+ test_uint32{fn: and_uint32_4294967295, fnname: "and_uint32_4294967295", in: 1, want: 1},
+ test_uint32{fn: and_4294967295_uint32, fnname: "and_4294967295_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: and_uint32_4294967295, fnname: "and_uint32_4294967295", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_0_uint32, fnname: "or_0_uint32", in: 0, want: 0},
+ test_uint32{fn: or_uint32_0, fnname: "or_uint32_0", in: 0, want: 0},
+ test_uint32{fn: or_0_uint32, fnname: "or_0_uint32", in: 1, want: 1},
+ test_uint32{fn: or_uint32_0, fnname: "or_uint32_0", in: 1, want: 1},
+ test_uint32{fn: or_0_uint32, fnname: "or_0_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_uint32_0, fnname: "or_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_1_uint32, fnname: "or_1_uint32", in: 0, want: 1},
+ test_uint32{fn: or_uint32_1, fnname: "or_uint32_1", in: 0, want: 1},
+ test_uint32{fn: or_1_uint32, fnname: "or_1_uint32", in: 1, want: 1},
+ test_uint32{fn: or_uint32_1, fnname: "or_uint32_1", in: 1, want: 1},
+ test_uint32{fn: or_1_uint32, fnname: "or_1_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_uint32_1, fnname: "or_uint32_1", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_4294967295_uint32, fnname: "or_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: or_uint32_4294967295, fnname: "or_uint32_4294967295", in: 0, want: 4294967295},
+ test_uint32{fn: or_4294967295_uint32, fnname: "or_4294967295_uint32", in: 1, want: 4294967295},
+ test_uint32{fn: or_uint32_4294967295, fnname: "or_uint32_4294967295", in: 1, want: 4294967295},
+ test_uint32{fn: or_4294967295_uint32, fnname: "or_4294967295_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_uint32_4294967295, fnname: "or_uint32_4294967295", in: 4294967295, want: 4294967295},
+ test_uint32{fn: xor_0_uint32, fnname: "xor_0_uint32", in: 0, want: 0},
+ test_uint32{fn: xor_uint32_0, fnname: "xor_uint32_0", in: 0, want: 0},
+ test_uint32{fn: xor_0_uint32, fnname: "xor_0_uint32", in: 1, want: 1},
+ test_uint32{fn: xor_uint32_0, fnname: "xor_uint32_0", in: 1, want: 1},
+ test_uint32{fn: xor_0_uint32, fnname: "xor_0_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: xor_uint32_0, fnname: "xor_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: xor_1_uint32, fnname: "xor_1_uint32", in: 0, want: 1},
+ test_uint32{fn: xor_uint32_1, fnname: "xor_uint32_1", in: 0, want: 1},
+ test_uint32{fn: xor_1_uint32, fnname: "xor_1_uint32", in: 1, want: 0},
+ test_uint32{fn: xor_uint32_1, fnname: "xor_uint32_1", in: 1, want: 0},
+ test_uint32{fn: xor_1_uint32, fnname: "xor_1_uint32", in: 4294967295, want: 4294967294},
+ test_uint32{fn: xor_uint32_1, fnname: "xor_uint32_1", in: 4294967295, want: 4294967294},
+ test_uint32{fn: xor_4294967295_uint32, fnname: "xor_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: xor_uint32_4294967295, fnname: "xor_uint32_4294967295", in: 0, want: 4294967295},
+ test_uint32{fn: xor_4294967295_uint32, fnname: "xor_4294967295_uint32", in: 1, want: 4294967294},
+ test_uint32{fn: xor_uint32_4294967295, fnname: "xor_uint32_4294967295", in: 1, want: 4294967294},
+ test_uint32{fn: xor_4294967295_uint32, fnname: "xor_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: xor_uint32_4294967295, fnname: "xor_uint32_4294967295", in: 4294967295, want: 0}}
+
+type test_uint32mul struct {
+ fn func(uint32) uint32
+ fnname string
+ in uint32
+ want uint32
+}
+
+var tests_uint32mul = []test_uint32{
+
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 3, want: 9},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 3, want: 9},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 5, want: 15},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 5, want: 15},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 7, want: 21},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 7, want: 21},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 9, want: 27},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 9, want: 27},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 10, want: 30},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 10, want: 30},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 11, want: 33},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 11, want: 33},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 13, want: 39},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 13, want: 39},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 19, want: 57},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 19, want: 57},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 21, want: 63},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 21, want: 63},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 25, want: 75},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 25, want: 75},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 27, want: 81},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 27, want: 81},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 37, want: 111},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 37, want: 111},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 41, want: 123},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 41, want: 123},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 45, want: 135},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 45, want: 135},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 73, want: 219},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 73, want: 219},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 81, want: 243},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 81, want: 243},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 3, want: 15},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 3, want: 15},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 5, want: 25},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 5, want: 25},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 7, want: 35},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 7, want: 35},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 9, want: 45},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 9, want: 45},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 10, want: 50},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 10, want: 50},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 11, want: 55},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 11, want: 55},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 13, want: 65},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 13, want: 65},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 19, want: 95},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 19, want: 95},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 21, want: 105},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 21, want: 105},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 25, want: 125},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 25, want: 125},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 27, want: 135},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 27, want: 135},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 37, want: 185},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 37, want: 185},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 41, want: 205},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 41, want: 205},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 45, want: 225},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 45, want: 225},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 73, want: 365},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 73, want: 365},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 81, want: 405},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 81, want: 405},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 3, want: 21},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 3, want: 21},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 5, want: 35},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 5, want: 35},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 7, want: 49},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 7, want: 49},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 9, want: 63},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 9, want: 63},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 10, want: 70},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 10, want: 70},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 11, want: 77},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 11, want: 77},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 13, want: 91},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 13, want: 91},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 19, want: 133},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 19, want: 133},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 21, want: 147},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 21, want: 147},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 25, want: 175},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 25, want: 175},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 27, want: 189},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 27, want: 189},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 37, want: 259},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 37, want: 259},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 41, want: 287},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 41, want: 287},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 45, want: 315},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 45, want: 315},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 73, want: 511},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 73, want: 511},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 81, want: 567},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 81, want: 567},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 3, want: 27},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 3, want: 27},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 5, want: 45},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 5, want: 45},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 7, want: 63},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 7, want: 63},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 9, want: 81},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 9, want: 81},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 10, want: 90},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 10, want: 90},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 11, want: 99},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 11, want: 99},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 13, want: 117},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 13, want: 117},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 19, want: 171},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 19, want: 171},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 21, want: 189},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 21, want: 189},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 25, want: 225},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 25, want: 225},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 27, want: 243},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 27, want: 243},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 37, want: 333},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 37, want: 333},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 41, want: 369},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 41, want: 369},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 45, want: 405},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 45, want: 405},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 73, want: 657},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 73, want: 657},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 81, want: 729},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 81, want: 729},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 3, want: 30},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 3, want: 30},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 5, want: 50},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 5, want: 50},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 7, want: 70},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 7, want: 70},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 9, want: 90},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 9, want: 90},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 10, want: 100},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 10, want: 100},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 11, want: 110},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 11, want: 110},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 13, want: 130},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 13, want: 130},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 19, want: 190},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 19, want: 190},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 21, want: 210},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 21, want: 210},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 25, want: 250},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 25, want: 250},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 27, want: 270},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 27, want: 270},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 37, want: 370},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 37, want: 370},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 41, want: 410},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 41, want: 410},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 45, want: 450},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 45, want: 450},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 73, want: 730},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 73, want: 730},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 81, want: 810},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 81, want: 810},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 3, want: 33},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 3, want: 33},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 5, want: 55},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 5, want: 55},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 7, want: 77},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 7, want: 77},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 9, want: 99},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 9, want: 99},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 10, want: 110},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 10, want: 110},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 11, want: 121},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 11, want: 121},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 13, want: 143},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 13, want: 143},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 19, want: 209},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 19, want: 209},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 21, want: 231},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 21, want: 231},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 25, want: 275},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 25, want: 275},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 27, want: 297},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 27, want: 297},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 37, want: 407},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 37, want: 407},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 41, want: 451},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 41, want: 451},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 45, want: 495},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 45, want: 495},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 73, want: 803},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 73, want: 803},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 81, want: 891},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 81, want: 891},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 3, want: 39},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 3, want: 39},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 5, want: 65},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 5, want: 65},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 7, want: 91},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 7, want: 91},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 9, want: 117},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 9, want: 117},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 10, want: 130},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 10, want: 130},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 11, want: 143},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 11, want: 143},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 13, want: 169},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 13, want: 169},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 19, want: 247},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 19, want: 247},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 21, want: 273},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 21, want: 273},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 25, want: 325},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 25, want: 325},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 27, want: 351},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 27, want: 351},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 37, want: 481},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 37, want: 481},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 41, want: 533},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 41, want: 533},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 45, want: 585},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 45, want: 585},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 73, want: 949},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 73, want: 949},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 81, want: 1053},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 81, want: 1053},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 3, want: 57},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 3, want: 57},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 5, want: 95},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 5, want: 95},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 7, want: 133},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 7, want: 133},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 9, want: 171},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 9, want: 171},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 10, want: 190},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 10, want: 190},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 11, want: 209},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 11, want: 209},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 13, want: 247},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 13, want: 247},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 19, want: 361},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 19, want: 361},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 21, want: 399},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 21, want: 399},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 25, want: 475},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 25, want: 475},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 27, want: 513},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 27, want: 513},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 37, want: 703},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 37, want: 703},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 41, want: 779},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 41, want: 779},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 45, want: 855},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 45, want: 855},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 73, want: 1387},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 73, want: 1387},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 81, want: 1539},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 81, want: 1539},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 3, want: 63},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 3, want: 63},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 5, want: 105},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 5, want: 105},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 7, want: 147},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 7, want: 147},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 9, want: 189},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 9, want: 189},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 10, want: 210},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 10, want: 210},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 11, want: 231},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 11, want: 231},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 13, want: 273},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 13, want: 273},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 19, want: 399},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 19, want: 399},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 21, want: 441},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 21, want: 441},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 25, want: 525},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 25, want: 525},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 27, want: 567},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 27, want: 567},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 37, want: 777},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 37, want: 777},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 41, want: 861},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 41, want: 861},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 45, want: 945},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 45, want: 945},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 73, want: 1533},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 73, want: 1533},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 81, want: 1701},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 81, want: 1701},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 3, want: 75},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 3, want: 75},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 5, want: 125},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 5, want: 125},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 7, want: 175},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 7, want: 175},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 9, want: 225},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 9, want: 225},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 10, want: 250},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 10, want: 250},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 11, want: 275},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 11, want: 275},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 13, want: 325},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 13, want: 325},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 19, want: 475},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 19, want: 475},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 21, want: 525},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 21, want: 525},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 25, want: 625},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 25, want: 625},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 27, want: 675},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 27, want: 675},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 37, want: 925},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 37, want: 925},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 41, want: 1025},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 41, want: 1025},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 45, want: 1125},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 45, want: 1125},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 73, want: 1825},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 73, want: 1825},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 81, want: 2025},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 81, want: 2025},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 3, want: 81},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 3, want: 81},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 5, want: 135},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 5, want: 135},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 7, want: 189},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 7, want: 189},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 9, want: 243},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 9, want: 243},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 10, want: 270},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 10, want: 270},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 11, want: 297},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 11, want: 297},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 13, want: 351},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 13, want: 351},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 19, want: 513},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 19, want: 513},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 21, want: 567},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 21, want: 567},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 25, want: 675},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 25, want: 675},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 27, want: 729},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 27, want: 729},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 37, want: 999},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 37, want: 999},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 41, want: 1107},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 41, want: 1107},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 45, want: 1215},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 45, want: 1215},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 73, want: 1971},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 73, want: 1971},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 81, want: 2187},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 81, want: 2187},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 3, want: 111},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 3, want: 111},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 5, want: 185},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 5, want: 185},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 7, want: 259},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 7, want: 259},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 9, want: 333},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 9, want: 333},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 10, want: 370},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 10, want: 370},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 11, want: 407},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 11, want: 407},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 13, want: 481},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 13, want: 481},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 19, want: 703},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 19, want: 703},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 21, want: 777},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 21, want: 777},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 25, want: 925},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 25, want: 925},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 27, want: 999},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 27, want: 999},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 37, want: 1369},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 37, want: 1369},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 41, want: 1517},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 41, want: 1517},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 45, want: 1665},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 45, want: 1665},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 73, want: 2701},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 73, want: 2701},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 81, want: 2997},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 81, want: 2997},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 3, want: 123},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 3, want: 123},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 5, want: 205},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 5, want: 205},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 7, want: 287},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 7, want: 287},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 9, want: 369},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 9, want: 369},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 10, want: 410},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 10, want: 410},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 11, want: 451},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 11, want: 451},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 13, want: 533},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 13, want: 533},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 19, want: 779},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 19, want: 779},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 21, want: 861},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 21, want: 861},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 25, want: 1025},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 25, want: 1025},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 27, want: 1107},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 27, want: 1107},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 37, want: 1517},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 37, want: 1517},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 41, want: 1681},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 41, want: 1681},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 45, want: 1845},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 45, want: 1845},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 73, want: 2993},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 73, want: 2993},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 81, want: 3321},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 81, want: 3321},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 3, want: 135},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 3, want: 135},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 5, want: 225},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 5, want: 225},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 7, want: 315},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 7, want: 315},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 9, want: 405},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 9, want: 405},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 10, want: 450},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 10, want: 450},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 11, want: 495},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 11, want: 495},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 13, want: 585},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 13, want: 585},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 19, want: 855},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 19, want: 855},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 21, want: 945},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 21, want: 945},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 25, want: 1125},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 25, want: 1125},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 27, want: 1215},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 27, want: 1215},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 37, want: 1665},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 37, want: 1665},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 41, want: 1845},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 41, want: 1845},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 45, want: 2025},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 45, want: 2025},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 73, want: 3285},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 73, want: 3285},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 81, want: 3645},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 81, want: 3645},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 3, want: 219},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 3, want: 219},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 5, want: 365},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 5, want: 365},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 7, want: 511},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 7, want: 511},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 9, want: 657},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 9, want: 657},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 10, want: 730},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 10, want: 730},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 11, want: 803},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 11, want: 803},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 13, want: 949},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 13, want: 949},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 19, want: 1387},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 19, want: 1387},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 21, want: 1533},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 21, want: 1533},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 25, want: 1825},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 25, want: 1825},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 27, want: 1971},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 27, want: 1971},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 37, want: 2701},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 37, want: 2701},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 41, want: 2993},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 41, want: 2993},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 45, want: 3285},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 45, want: 3285},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 73, want: 5329},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 73, want: 5329},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 81, want: 5913},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 81, want: 5913},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 3, want: 243},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 3, want: 243},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 5, want: 405},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 5, want: 405},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 7, want: 567},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 7, want: 567},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 9, want: 729},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 9, want: 729},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 10, want: 810},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 10, want: 810},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 11, want: 891},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 11, want: 891},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 13, want: 1053},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 13, want: 1053},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 19, want: 1539},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 19, want: 1539},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 21, want: 1701},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 21, want: 1701},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 25, want: 2025},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 25, want: 2025},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 27, want: 2187},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 27, want: 2187},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 37, want: 2997},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 37, want: 2997},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 41, want: 3321},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 41, want: 3321},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 45, want: 3645},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 45, want: 3645},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 73, want: 5913},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 73, want: 5913},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 81, want: 6561},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 81, want: 6561}}
+
+type test_int32 struct {
+ fn func(int32) int32
+ fnname string
+ in int32
+ want int32
+}
+
+var tests_int32 = []test_int32{
+
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: -2147483647, want: 1},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: -2147483647, want: 1},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: -1, want: 2147483647},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: -1, want: 2147483647},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: 0, want: -2147483648},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: 0, want: -2147483648},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: 1, want: -2147483647},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: 1, want: -2147483647},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: 2147483647, want: -1},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: -2147483648, want: 1},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: -2147483648, want: 1},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: -2147483647, want: 2},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: -2147483647, want: 2},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: -1, want: -2147483648},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: -1, want: -2147483648},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: 0, want: -2147483647},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: 0, want: -2147483647},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: 1, want: -2147483646},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: 1, want: -2147483646},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: 2147483647, want: 0},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: -2147483648, want: 2147483647},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: -2147483648, want: 2147483647},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: -2147483647, want: -2147483648},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: -1, want: -2},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: -1, want: -2},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: 0, want: -1},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: 0, want: -1},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: 1, want: 0},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: 1, want: 0},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: 2147483647, want: 2147483646},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: 2147483647, want: 2147483646},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: -2147483648, want: -2147483648},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: -2147483647, want: -2147483647},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: -1, want: -1},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: -1, want: -1},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: 0, want: 0},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: 0, want: 0},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: 1, want: 1},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: 1, want: 1},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: 2147483647, want: 2147483647},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: -2147483648, want: -2147483647},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: -2147483647, want: -2147483646},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: -2147483647, want: -2147483646},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: -1, want: 0},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: -1, want: 0},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: 0, want: 1},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: 0, want: 1},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: 1, want: 2},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: 1, want: 2},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: 2147483647, want: -2147483648},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: 2147483647, want: -2147483648},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: -2147483648, want: -1},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: -2147483647, want: 0},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: -1, want: 2147483646},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: -1, want: 2147483646},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: 0, want: 2147483647},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: 0, want: 2147483647},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: 1, want: -2147483648},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: 1, want: -2147483648},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: 2147483647, want: -2},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: 2147483647, want: -2},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: -2147483647, want: -1},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: -2147483647, want: 1},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: -1, want: -2147483647},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: -1, want: 2147483647},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: 0, want: -2147483648},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: 0, want: -2147483648},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: 1, want: 2147483647},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: 1, want: -2147483647},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: 2147483647, want: 1},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: 2147483647, want: -1},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: -2147483648, want: 1},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: -2147483648, want: -1},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: -2147483647, want: 0},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: -1, want: -2147483646},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: -1, want: 2147483646},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: 0, want: -2147483647},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: 0, want: 2147483647},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: 1, want: -2147483648},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: 1, want: -2147483648},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: 2147483647, want: 2},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: 2147483647, want: -2},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: -2147483648, want: 2147483647},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: -2147483648, want: -2147483647},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: -2147483647, want: 2147483646},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: -2147483647, want: -2147483646},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: -1, want: 0},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: -1, want: 0},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: 0, want: -1},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: 0, want: 1},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: 1, want: -2},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: 1, want: 2},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: 2147483647, want: -2147483648},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: 2147483647, want: -2147483648},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: -2147483648, want: -2147483648},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: -2147483647, want: 2147483647},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: -2147483647, want: -2147483647},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: -1, want: 1},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: -1, want: -1},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: 0, want: 0},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: 0, want: 0},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: 1, want: -1},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: 1, want: 1},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: 2147483647, want: -2147483647},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: 2147483647, want: 2147483647},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: -2147483648, want: 2147483647},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: -2147483647, want: -2147483648},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: -1, want: 2},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: -1, want: -2},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: 0, want: 1},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: 0, want: -1},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: 1, want: 0},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: 1, want: 0},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: 2147483647, want: -2147483646},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: 2147483647, want: 2147483646},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: -2147483648, want: -1},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: -2147483648, want: 1},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: -2147483647, want: -2},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: -2147483647, want: 2},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: -1, want: -2147483648},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: -1, want: -2147483648},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: 0, want: 2147483647},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: 0, want: -2147483647},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: 1, want: 2147483646},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: 1, want: -2147483646},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: 2147483647, want: 0},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: -2147483648, want: 1},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: -2147483648, want: 1},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: -2147483647, want: 1},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: -2147483647, want: 0},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: -1, want: -2147483648},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: -1, want: 0},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: 0, want: 0},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: 1, want: -2147483648},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: 1, want: 0},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: 2147483647, want: 0},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: -2147483648, want: 1},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: -2147483647, want: 1},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: -2147483647, want: 1},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: -1, want: 2147483647},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: -1, want: 0},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: 0, want: 0},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: 1, want: -2147483647},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: 1, want: 0},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: 2147483647, want: -1},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: 2147483647, want: -1},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: -2147483648, want: -2147483648},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: -2147483647, want: 0},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: -2147483647, want: 2147483647},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: -1, want: 1},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: -1, want: 1},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: 0, want: 0},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: 1, want: -1},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: 1, want: -1},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: 2147483647, want: 0},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: 2147483647, want: -2147483647},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: -2147483647, want: 0},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: -1, want: 0},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: 1, want: 0},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: 2147483647, want: 0},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: -2147483648, want: -2147483648},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: -2147483647, want: 0},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: -2147483647, want: -2147483647},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: -1, want: -1},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: -1, want: -1},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: 0, want: 0},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: 1, want: 1},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: 1, want: 1},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: 2147483647, want: 0},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: 2147483647, want: 2147483647},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: -2147483647, want: -1},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: -2147483647, want: -1},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: -1, want: -2147483647},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: -1, want: 0},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: 0, want: 0},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: 1, want: 2147483647},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: 1, want: 0},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: 2147483647, want: 1},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: 2147483647, want: 1},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: -2147483647, want: -2147483648},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: -1, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: -1, want: -2147483648},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: 0, want: 0},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: 1, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: 1, want: -2147483648},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: 2147483647, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: 2147483647, want: -2147483648},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: -2147483647, want: 1},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: -2147483647, want: 1},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: -1, want: 2147483647},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: -1, want: 2147483647},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: 0, want: 0},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: 1, want: -2147483647},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: 1, want: -2147483647},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: 2147483647, want: -1},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: 2147483647, want: -1},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: -2147483647, want: 2147483647},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: -2147483647, want: 2147483647},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: -1, want: 1},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: -1, want: 1},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: 0, want: 0},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: 1, want: -1},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: 1, want: -1},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: 2147483647, want: -2147483647},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: 2147483647, want: -2147483647},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: -2147483648, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: -2147483648, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: -2147483647, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: -2147483647, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: -1, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: -1, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: 0, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: 1, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: 1, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: 2147483647, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: 2147483647, want: 0},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: -2147483647, want: -2147483647},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: -1, want: -1},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: -1, want: -1},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: 0, want: 0},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: 1, want: 1},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: 1, want: 1},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: 2147483647, want: 2147483647},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: -2147483647, want: -1},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: -2147483647, want: -1},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: -1, want: -2147483647},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: -1, want: -2147483647},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: 0, want: 0},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: 1, want: 2147483647},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: 1, want: 2147483647},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: 2147483647, want: 1},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: 2147483647, want: 1},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: -2147483647, want: -1},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: -2147483647, want: -2147483647},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: -1, want: -1},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: 0, want: 0},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: 1, want: 1},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: 2147483647, want: 2147483647},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: -2147483648, want: -1},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: -2147483647, want: 0},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: -1, want: -1},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: 0, want: 0},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: 1, want: 1},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: 2147483647, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: -2147483648, want: -1},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: -2147483648, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: -2147483647, want: -1},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: -2147483647, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: -1, want: 0},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: 0, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: 1, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: 2147483647, want: -1},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: 2147483647, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: -2147483648, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: -2147483647, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: -1, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: 1, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: 2147483647, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: -2147483648, want: 1},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: -2147483648, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: -2147483647, want: 1},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: -2147483647, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: -1, want: 0},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: 0, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: 1, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: 2147483647, want: 1},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: 2147483647, want: 0},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: -2147483648, want: 2147483647},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: -2147483647, want: 0},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: -1, want: -1},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: 0, want: 0},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: 1, want: 1},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: 2147483647, want: 0},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: -2147483647, want: -2147483648},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: -1, want: -2147483648},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: -1, want: -2147483648},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: 0, want: 0},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: 1, want: 0},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: 1, want: 0},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: 2147483647, want: 0},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: 2147483647, want: 0},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: -2147483647, want: -2147483647},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: -1, want: -2147483647},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: -1, want: -2147483647},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: 0, want: 0},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: 1, want: 1},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: 1, want: 1},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: 2147483647, want: 1},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: 2147483647, want: 1},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: -2147483647, want: -2147483647},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: -1, want: -1},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: -1, want: -1},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: 0, want: 0},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: 1, want: 1},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: 1, want: 1},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: 2147483647, want: 2147483647},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: -2147483648, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: -2147483648, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: -2147483647, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: -2147483647, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: -1, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: -1, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: 0, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: 1, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: 1, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: 2147483647, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: 2147483647, want: 0},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: -2147483648, want: 0},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: -2147483648, want: 0},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: -2147483647, want: 1},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: -2147483647, want: 1},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: -1, want: 1},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: -1, want: 1},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: 0, want: 0},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: 1, want: 1},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: 1, want: 1},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: 2147483647, want: 1},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: 2147483647, want: 1},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: -2147483648, want: 0},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: -2147483648, want: 0},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: -2147483647, want: 1},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: -2147483647, want: 1},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: -1, want: 2147483647},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: -1, want: 2147483647},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: 0, want: 0},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: 1, want: 1},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: 1, want: 1},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: -2147483648, want: -2147483648},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: -1, want: -1},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: 0, want: -2147483648},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: 0, want: -2147483648},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: 1, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: 1, want: -2147483647},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: 2147483647, want: -1},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: -2147483648, want: -2147483647},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: -1, want: -1},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: 0, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: 0, want: -2147483647},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: 1, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: 1, want: -2147483647},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: 2147483647, want: -1},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: 2147483647, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: -2147483648, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: -2147483648, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: -2147483647, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: -2147483647, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: -1, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: 0, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: 0, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: 1, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: 1, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: 2147483647, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: 2147483647, want: -1},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: -2147483648, want: -2147483648},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: -1, want: -1},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: 0, want: 0},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: 0, want: 0},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: 1, want: 1},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: 1, want: 1},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: -2147483648, want: -2147483647},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: -1, want: -1},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: 0, want: 1},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: 0, want: 1},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: 1, want: 1},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: 1, want: 1},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: -2147483648, want: -1},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: -2147483647, want: -1},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: -2147483647, want: -1},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: -1, want: -1},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: 0, want: 2147483647},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: 0, want: 2147483647},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: 1, want: 2147483647},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: 1, want: 2147483647},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: 2147483647, want: 2147483647},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: -2147483647, want: 1},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: -2147483647, want: 1},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: -1, want: 2147483647},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: -1, want: 2147483647},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: 0, want: -2147483648},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: 0, want: -2147483648},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: 1, want: -2147483647},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: 1, want: -2147483647},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: 2147483647, want: -1},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: -2147483648, want: 1},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: -2147483648, want: 1},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: -2147483647, want: 0},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: -1, want: 2147483646},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: -1, want: 2147483646},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: 0, want: -2147483647},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: 0, want: -2147483647},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: 1, want: -2147483648},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: 1, want: -2147483648},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: 2147483647, want: -2},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: 2147483647, want: -2},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: -2147483648, want: 2147483647},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: -2147483648, want: 2147483647},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: -2147483647, want: 2147483646},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: -2147483647, want: 2147483646},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: -1, want: 0},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: -1, want: 0},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: 0, want: -1},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: 0, want: -1},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: 1, want: -2},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: 1, want: -2},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: 2147483647, want: -2147483648},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: 2147483647, want: -2147483648},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: -2147483648, want: -2147483648},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: -2147483647, want: -2147483647},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: -1, want: -1},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: -1, want: -1},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: 0, want: 0},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: 0, want: 0},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: 1, want: 1},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: 1, want: 1},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: 2147483647, want: 2147483647},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: -2147483648, want: -2147483647},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: -2147483647, want: -2147483648},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: -1, want: -2},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: -1, want: -2},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: 0, want: 1},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: 0, want: 1},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: 1, want: 0},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: 1, want: 0},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: 2147483647, want: 2147483646},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: 2147483647, want: 2147483646},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: -2147483648, want: -1},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: -2147483647, want: -2},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: -2147483647, want: -2},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: -1, want: -2147483648},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: -1, want: -2147483648},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: 0, want: 2147483647},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: 0, want: 2147483647},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: 1, want: 2147483646},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: 1, want: 2147483646},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: 2147483647, want: 0}}
+
+type test_int32mul struct {
+ fn func(int32) int32
+ fnname string
+ in int32
+ want int32
+}
+
+var tests_int32mul = []test_int32{
+
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: -9, want: 81},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: -9, want: 81},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: -5, want: 45},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: -5, want: 45},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: -3, want: 27},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: -3, want: 27},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 3, want: -27},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 3, want: -27},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 5, want: -45},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 5, want: -45},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 7, want: -63},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 7, want: -63},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 9, want: -81},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 9, want: -81},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 10, want: -90},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 10, want: -90},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 11, want: -99},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 11, want: -99},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 13, want: -117},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 13, want: -117},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 19, want: -171},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 19, want: -171},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 21, want: -189},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 21, want: -189},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 25, want: -225},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 25, want: -225},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 27, want: -243},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 27, want: -243},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 37, want: -333},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 37, want: -333},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 41, want: -369},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 41, want: -369},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 45, want: -405},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 45, want: -405},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 73, want: -657},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 73, want: -657},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 81, want: -729},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 81, want: -729},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: -9, want: 45},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: -9, want: 45},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: -5, want: 25},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: -5, want: 25},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: -3, want: 15},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: -3, want: 15},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 3, want: -15},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 3, want: -15},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 5, want: -25},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 5, want: -25},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 7, want: -35},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 7, want: -35},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 9, want: -45},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 9, want: -45},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 10, want: -50},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 10, want: -50},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 11, want: -55},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 11, want: -55},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 13, want: -65},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 13, want: -65},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 19, want: -95},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 19, want: -95},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 21, want: -105},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 21, want: -105},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 25, want: -125},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 25, want: -125},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 27, want: -135},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 27, want: -135},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 37, want: -185},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 37, want: -185},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 41, want: -205},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 41, want: -205},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 45, want: -225},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 45, want: -225},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 73, want: -365},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 73, want: -365},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 81, want: -405},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 81, want: -405},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: -9, want: 27},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: -9, want: 27},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: -5, want: 15},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: -5, want: 15},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: -3, want: 9},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: -3, want: 9},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 3, want: -9},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 3, want: -9},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 5, want: -15},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 5, want: -15},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 7, want: -21},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 7, want: -21},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 9, want: -27},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 9, want: -27},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 10, want: -30},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 10, want: -30},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 11, want: -33},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 11, want: -33},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 13, want: -39},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 13, want: -39},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 19, want: -57},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 19, want: -57},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 21, want: -63},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 21, want: -63},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 25, want: -75},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 25, want: -75},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 27, want: -81},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 27, want: -81},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 37, want: -111},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 37, want: -111},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 41, want: -123},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 41, want: -123},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 45, want: -135},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 45, want: -135},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 73, want: -219},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 73, want: -219},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 81, want: -243},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 81, want: -243},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: -9, want: -27},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: -9, want: -27},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: -5, want: -15},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: -5, want: -15},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: -3, want: -9},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: -3, want: -9},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 3, want: 9},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 3, want: 9},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 5, want: 15},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 5, want: 15},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 7, want: 21},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 7, want: 21},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 9, want: 27},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 9, want: 27},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 10, want: 30},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 10, want: 30},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 11, want: 33},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 11, want: 33},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 13, want: 39},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 13, want: 39},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 19, want: 57},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 19, want: 57},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 21, want: 63},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 21, want: 63},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 25, want: 75},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 25, want: 75},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 27, want: 81},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 27, want: 81},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 37, want: 111},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 37, want: 111},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 41, want: 123},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 41, want: 123},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 45, want: 135},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 45, want: 135},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 73, want: 219},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 73, want: 219},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 81, want: 243},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 81, want: 243},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: -9, want: -45},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: -9, want: -45},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: -5, want: -25},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: -5, want: -25},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: -3, want: -15},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: -3, want: -15},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 3, want: 15},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 3, want: 15},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 5, want: 25},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 5, want: 25},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 7, want: 35},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 7, want: 35},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 9, want: 45},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 9, want: 45},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 10, want: 50},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 10, want: 50},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 11, want: 55},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 11, want: 55},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 13, want: 65},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 13, want: 65},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 19, want: 95},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 19, want: 95},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 21, want: 105},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 21, want: 105},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 25, want: 125},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 25, want: 125},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 27, want: 135},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 27, want: 135},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 37, want: 185},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 37, want: 185},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 41, want: 205},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 41, want: 205},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 45, want: 225},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 45, want: 225},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 73, want: 365},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 73, want: 365},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 81, want: 405},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 81, want: 405},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: -9, want: -63},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: -9, want: -63},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: -5, want: -35},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: -5, want: -35},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: -3, want: -21},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: -3, want: -21},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 3, want: 21},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 3, want: 21},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 5, want: 35},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 5, want: 35},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 7, want: 49},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 7, want: 49},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 9, want: 63},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 9, want: 63},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 10, want: 70},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 10, want: 70},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 11, want: 77},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 11, want: 77},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 13, want: 91},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 13, want: 91},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 19, want: 133},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 19, want: 133},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 21, want: 147},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 21, want: 147},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 25, want: 175},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 25, want: 175},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 27, want: 189},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 27, want: 189},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 37, want: 259},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 37, want: 259},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 41, want: 287},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 41, want: 287},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 45, want: 315},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 45, want: 315},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 73, want: 511},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 73, want: 511},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 81, want: 567},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 81, want: 567},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: -9, want: -81},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: -9, want: -81},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: -5, want: -45},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: -5, want: -45},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: -3, want: -27},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: -3, want: -27},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 3, want: 27},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 3, want: 27},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 5, want: 45},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 5, want: 45},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 7, want: 63},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 7, want: 63},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 9, want: 81},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 9, want: 81},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 10, want: 90},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 10, want: 90},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 11, want: 99},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 11, want: 99},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 13, want: 117},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 13, want: 117},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 19, want: 171},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 19, want: 171},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 21, want: 189},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 21, want: 189},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 25, want: 225},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 25, want: 225},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 27, want: 243},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 27, want: 243},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 37, want: 333},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 37, want: 333},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 41, want: 369},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 41, want: 369},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 45, want: 405},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 45, want: 405},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 73, want: 657},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 73, want: 657},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 81, want: 729},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 81, want: 729},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: -9, want: -90},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: -9, want: -90},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: -5, want: -50},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: -5, want: -50},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: -3, want: -30},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: -3, want: -30},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 3, want: 30},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 3, want: 30},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 5, want: 50},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 5, want: 50},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 7, want: 70},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 7, want: 70},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 9, want: 90},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 9, want: 90},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 10, want: 100},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 10, want: 100},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 11, want: 110},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 11, want: 110},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 13, want: 130},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 13, want: 130},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 19, want: 190},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 19, want: 190},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 21, want: 210},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 21, want: 210},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 25, want: 250},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 25, want: 250},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 27, want: 270},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 27, want: 270},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 37, want: 370},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 37, want: 370},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 41, want: 410},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 41, want: 410},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 45, want: 450},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 45, want: 450},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 73, want: 730},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 73, want: 730},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 81, want: 810},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 81, want: 810},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: -9, want: -99},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: -9, want: -99},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: -5, want: -55},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: -5, want: -55},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: -3, want: -33},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: -3, want: -33},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 3, want: 33},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 3, want: 33},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 5, want: 55},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 5, want: 55},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 7, want: 77},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 7, want: 77},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 9, want: 99},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 9, want: 99},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 10, want: 110},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 10, want: 110},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 11, want: 121},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 11, want: 121},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 13, want: 143},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 13, want: 143},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 19, want: 209},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 19, want: 209},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 21, want: 231},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 21, want: 231},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 25, want: 275},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 25, want: 275},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 27, want: 297},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 27, want: 297},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 37, want: 407},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 37, want: 407},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 41, want: 451},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 41, want: 451},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 45, want: 495},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 45, want: 495},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 73, want: 803},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 73, want: 803},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 81, want: 891},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 81, want: 891},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: -9, want: -117},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: -9, want: -117},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: -5, want: -65},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: -5, want: -65},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: -3, want: -39},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: -3, want: -39},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 3, want: 39},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 3, want: 39},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 5, want: 65},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 5, want: 65},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 7, want: 91},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 7, want: 91},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 9, want: 117},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 9, want: 117},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 10, want: 130},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 10, want: 130},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 11, want: 143},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 11, want: 143},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 13, want: 169},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 13, want: 169},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 19, want: 247},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 19, want: 247},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 21, want: 273},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 21, want: 273},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 25, want: 325},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 25, want: 325},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 27, want: 351},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 27, want: 351},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 37, want: 481},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 37, want: 481},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 41, want: 533},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 41, want: 533},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 45, want: 585},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 45, want: 585},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 73, want: 949},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 73, want: 949},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 81, want: 1053},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 81, want: 1053},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: -9, want: -171},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: -9, want: -171},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: -5, want: -95},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: -5, want: -95},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: -3, want: -57},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: -3, want: -57},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 3, want: 57},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 3, want: 57},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 5, want: 95},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 5, want: 95},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 7, want: 133},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 7, want: 133},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 9, want: 171},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 9, want: 171},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 10, want: 190},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 10, want: 190},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 11, want: 209},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 11, want: 209},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 13, want: 247},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 13, want: 247},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 19, want: 361},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 19, want: 361},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 21, want: 399},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 21, want: 399},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 25, want: 475},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 25, want: 475},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 27, want: 513},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 27, want: 513},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 37, want: 703},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 37, want: 703},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 41, want: 779},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 41, want: 779},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 45, want: 855},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 45, want: 855},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 73, want: 1387},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 73, want: 1387},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 81, want: 1539},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 81, want: 1539},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: -9, want: -189},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: -9, want: -189},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: -5, want: -105},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: -5, want: -105},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: -3, want: -63},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: -3, want: -63},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 3, want: 63},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 3, want: 63},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 5, want: 105},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 5, want: 105},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 7, want: 147},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 7, want: 147},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 9, want: 189},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 9, want: 189},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 10, want: 210},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 10, want: 210},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 11, want: 231},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 11, want: 231},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 13, want: 273},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 13, want: 273},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 19, want: 399},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 19, want: 399},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 21, want: 441},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 21, want: 441},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 25, want: 525},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 25, want: 525},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 27, want: 567},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 27, want: 567},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 37, want: 777},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 37, want: 777},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 41, want: 861},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 41, want: 861},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 45, want: 945},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 45, want: 945},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 73, want: 1533},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 73, want: 1533},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 81, want: 1701},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 81, want: 1701},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: -9, want: -225},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: -9, want: -225},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: -5, want: -125},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: -5, want: -125},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: -3, want: -75},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: -3, want: -75},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 3, want: 75},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 3, want: 75},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 5, want: 125},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 5, want: 125},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 7, want: 175},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 7, want: 175},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 9, want: 225},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 9, want: 225},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 10, want: 250},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 10, want: 250},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 11, want: 275},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 11, want: 275},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 13, want: 325},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 13, want: 325},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 19, want: 475},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 19, want: 475},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 21, want: 525},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 21, want: 525},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 25, want: 625},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 25, want: 625},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 27, want: 675},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 27, want: 675},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 37, want: 925},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 37, want: 925},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 41, want: 1025},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 41, want: 1025},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 45, want: 1125},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 45, want: 1125},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 73, want: 1825},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 73, want: 1825},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 81, want: 2025},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 81, want: 2025},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: -9, want: -243},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: -9, want: -243},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: -5, want: -135},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: -5, want: -135},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: -3, want: -81},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: -3, want: -81},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 3, want: 81},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 3, want: 81},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 5, want: 135},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 5, want: 135},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 7, want: 189},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 7, want: 189},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 9, want: 243},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 9, want: 243},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 10, want: 270},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 10, want: 270},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 11, want: 297},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 11, want: 297},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 13, want: 351},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 13, want: 351},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 19, want: 513},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 19, want: 513},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 21, want: 567},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 21, want: 567},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 25, want: 675},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 25, want: 675},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 27, want: 729},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 27, want: 729},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 37, want: 999},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 37, want: 999},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 41, want: 1107},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 41, want: 1107},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 45, want: 1215},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 45, want: 1215},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 73, want: 1971},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 73, want: 1971},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 81, want: 2187},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 81, want: 2187},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: -9, want: -333},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: -9, want: -333},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: -5, want: -185},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: -5, want: -185},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: -3, want: -111},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: -3, want: -111},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 3, want: 111},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 3, want: 111},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 5, want: 185},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 5, want: 185},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 7, want: 259},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 7, want: 259},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 9, want: 333},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 9, want: 333},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 10, want: 370},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 10, want: 370},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 11, want: 407},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 11, want: 407},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 13, want: 481},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 13, want: 481},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 19, want: 703},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 19, want: 703},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 21, want: 777},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 21, want: 777},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 25, want: 925},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 25, want: 925},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 27, want: 999},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 27, want: 999},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 37, want: 1369},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 37, want: 1369},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 41, want: 1517},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 41, want: 1517},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 45, want: 1665},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 45, want: 1665},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 73, want: 2701},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 73, want: 2701},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 81, want: 2997},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 81, want: 2997},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: -9, want: -369},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: -9, want: -369},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: -5, want: -205},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: -5, want: -205},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: -3, want: -123},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: -3, want: -123},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 3, want: 123},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 3, want: 123},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 5, want: 205},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 5, want: 205},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 7, want: 287},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 7, want: 287},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 9, want: 369},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 9, want: 369},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 10, want: 410},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 10, want: 410},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 11, want: 451},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 11, want: 451},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 13, want: 533},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 13, want: 533},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 19, want: 779},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 19, want: 779},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 21, want: 861},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 21, want: 861},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 25, want: 1025},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 25, want: 1025},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 27, want: 1107},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 27, want: 1107},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 37, want: 1517},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 37, want: 1517},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 41, want: 1681},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 41, want: 1681},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 45, want: 1845},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 45, want: 1845},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 73, want: 2993},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 73, want: 2993},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 81, want: 3321},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 81, want: 3321},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: -9, want: -405},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: -9, want: -405},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: -5, want: -225},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: -5, want: -225},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: -3, want: -135},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: -3, want: -135},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 3, want: 135},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 3, want: 135},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 5, want: 225},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 5, want: 225},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 7, want: 315},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 7, want: 315},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 9, want: 405},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 9, want: 405},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 10, want: 450},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 10, want: 450},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 11, want: 495},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 11, want: 495},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 13, want: 585},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 13, want: 585},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 19, want: 855},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 19, want: 855},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 21, want: 945},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 21, want: 945},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 25, want: 1125},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 25, want: 1125},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 27, want: 1215},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 27, want: 1215},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 37, want: 1665},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 37, want: 1665},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 41, want: 1845},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 41, want: 1845},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 45, want: 2025},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 45, want: 2025},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 73, want: 3285},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 73, want: 3285},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 81, want: 3645},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 81, want: 3645},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: -9, want: -657},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: -9, want: -657},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: -5, want: -365},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: -5, want: -365},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: -3, want: -219},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: -3, want: -219},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 3, want: 219},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 3, want: 219},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 5, want: 365},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 5, want: 365},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 7, want: 511},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 7, want: 511},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 9, want: 657},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 9, want: 657},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 10, want: 730},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 10, want: 730},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 11, want: 803},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 11, want: 803},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 13, want: 949},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 13, want: 949},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 19, want: 1387},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 19, want: 1387},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 21, want: 1533},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 21, want: 1533},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 25, want: 1825},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 25, want: 1825},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 27, want: 1971},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 27, want: 1971},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 37, want: 2701},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 37, want: 2701},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 41, want: 2993},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 41, want: 2993},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 45, want: 3285},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 45, want: 3285},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 73, want: 5329},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 73, want: 5329},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 81, want: 5913},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 81, want: 5913},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: -9, want: -729},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: -9, want: -729},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: -5, want: -405},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: -5, want: -405},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: -3, want: -243},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: -3, want: -243},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 3, want: 243},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 3, want: 243},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 5, want: 405},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 5, want: 405},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 7, want: 567},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 7, want: 567},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 9, want: 729},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 9, want: 729},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 10, want: 810},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 10, want: 810},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 11, want: 891},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 11, want: 891},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 13, want: 1053},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 13, want: 1053},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 19, want: 1539},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 19, want: 1539},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 21, want: 1701},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 21, want: 1701},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 25, want: 2025},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 25, want: 2025},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 27, want: 2187},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 27, want: 2187},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 37, want: 2997},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 37, want: 2997},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 41, want: 3321},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 41, want: 3321},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 45, want: 3645},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 45, want: 3645},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 73, want: 5913},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 73, want: 5913},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 81, want: 6561},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 81, want: 6561}}
+
+type test_uint16 struct {
+ fn func(uint16) uint16
+ fnname string
+ in uint16
+ want uint16
+}
+
+var tests_uint16 = []test_uint16{
+
+ test_uint16{fn: add_0_uint16, fnname: "add_0_uint16", in: 0, want: 0},
+ test_uint16{fn: add_uint16_0, fnname: "add_uint16_0", in: 0, want: 0},
+ test_uint16{fn: add_0_uint16, fnname: "add_0_uint16", in: 1, want: 1},
+ test_uint16{fn: add_uint16_0, fnname: "add_uint16_0", in: 1, want: 1},
+ test_uint16{fn: add_0_uint16, fnname: "add_0_uint16", in: 65535, want: 65535},
+ test_uint16{fn: add_uint16_0, fnname: "add_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: add_1_uint16, fnname: "add_1_uint16", in: 0, want: 1},
+ test_uint16{fn: add_uint16_1, fnname: "add_uint16_1", in: 0, want: 1},
+ test_uint16{fn: add_1_uint16, fnname: "add_1_uint16", in: 1, want: 2},
+ test_uint16{fn: add_uint16_1, fnname: "add_uint16_1", in: 1, want: 2},
+ test_uint16{fn: add_1_uint16, fnname: "add_1_uint16", in: 65535, want: 0},
+ test_uint16{fn: add_uint16_1, fnname: "add_uint16_1", in: 65535, want: 0},
+ test_uint16{fn: add_65535_uint16, fnname: "add_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: add_uint16_65535, fnname: "add_uint16_65535", in: 0, want: 65535},
+ test_uint16{fn: add_65535_uint16, fnname: "add_65535_uint16", in: 1, want: 0},
+ test_uint16{fn: add_uint16_65535, fnname: "add_uint16_65535", in: 1, want: 0},
+ test_uint16{fn: add_65535_uint16, fnname: "add_65535_uint16", in: 65535, want: 65534},
+ test_uint16{fn: add_uint16_65535, fnname: "add_uint16_65535", in: 65535, want: 65534},
+ test_uint16{fn: sub_0_uint16, fnname: "sub_0_uint16", in: 0, want: 0},
+ test_uint16{fn: sub_uint16_0, fnname: "sub_uint16_0", in: 0, want: 0},
+ test_uint16{fn: sub_0_uint16, fnname: "sub_0_uint16", in: 1, want: 65535},
+ test_uint16{fn: sub_uint16_0, fnname: "sub_uint16_0", in: 1, want: 1},
+ test_uint16{fn: sub_0_uint16, fnname: "sub_0_uint16", in: 65535, want: 1},
+ test_uint16{fn: sub_uint16_0, fnname: "sub_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: sub_1_uint16, fnname: "sub_1_uint16", in: 0, want: 1},
+ test_uint16{fn: sub_uint16_1, fnname: "sub_uint16_1", in: 0, want: 65535},
+ test_uint16{fn: sub_1_uint16, fnname: "sub_1_uint16", in: 1, want: 0},
+ test_uint16{fn: sub_uint16_1, fnname: "sub_uint16_1", in: 1, want: 0},
+ test_uint16{fn: sub_1_uint16, fnname: "sub_1_uint16", in: 65535, want: 2},
+ test_uint16{fn: sub_uint16_1, fnname: "sub_uint16_1", in: 65535, want: 65534},
+ test_uint16{fn: sub_65535_uint16, fnname: "sub_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: sub_uint16_65535, fnname: "sub_uint16_65535", in: 0, want: 1},
+ test_uint16{fn: sub_65535_uint16, fnname: "sub_65535_uint16", in: 1, want: 65534},
+ test_uint16{fn: sub_uint16_65535, fnname: "sub_uint16_65535", in: 1, want: 2},
+ test_uint16{fn: sub_65535_uint16, fnname: "sub_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: sub_uint16_65535, fnname: "sub_uint16_65535", in: 65535, want: 0},
+ test_uint16{fn: div_0_uint16, fnname: "div_0_uint16", in: 1, want: 0},
+ test_uint16{fn: div_0_uint16, fnname: "div_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: div_uint16_1, fnname: "div_uint16_1", in: 0, want: 0},
+ test_uint16{fn: div_1_uint16, fnname: "div_1_uint16", in: 1, want: 1},
+ test_uint16{fn: div_uint16_1, fnname: "div_uint16_1", in: 1, want: 1},
+ test_uint16{fn: div_1_uint16, fnname: "div_1_uint16", in: 65535, want: 0},
+ test_uint16{fn: div_uint16_1, fnname: "div_uint16_1", in: 65535, want: 65535},
+ test_uint16{fn: div_uint16_65535, fnname: "div_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: div_65535_uint16, fnname: "div_65535_uint16", in: 1, want: 65535},
+ test_uint16{fn: div_uint16_65535, fnname: "div_uint16_65535", in: 1, want: 0},
+ test_uint16{fn: div_65535_uint16, fnname: "div_65535_uint16", in: 65535, want: 1},
+ test_uint16{fn: div_uint16_65535, fnname: "div_uint16_65535", in: 65535, want: 1},
+ test_uint16{fn: mul_0_uint16, fnname: "mul_0_uint16", in: 0, want: 0},
+ test_uint16{fn: mul_uint16_0, fnname: "mul_uint16_0", in: 0, want: 0},
+ test_uint16{fn: mul_0_uint16, fnname: "mul_0_uint16", in: 1, want: 0},
+ test_uint16{fn: mul_uint16_0, fnname: "mul_uint16_0", in: 1, want: 0},
+ test_uint16{fn: mul_0_uint16, fnname: "mul_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: mul_uint16_0, fnname: "mul_uint16_0", in: 65535, want: 0},
+ test_uint16{fn: mul_1_uint16, fnname: "mul_1_uint16", in: 0, want: 0},
+ test_uint16{fn: mul_uint16_1, fnname: "mul_uint16_1", in: 0, want: 0},
+ test_uint16{fn: mul_1_uint16, fnname: "mul_1_uint16", in: 1, want: 1},
+ test_uint16{fn: mul_uint16_1, fnname: "mul_uint16_1", in: 1, want: 1},
+ test_uint16{fn: mul_1_uint16, fnname: "mul_1_uint16", in: 65535, want: 65535},
+ test_uint16{fn: mul_uint16_1, fnname: "mul_uint16_1", in: 65535, want: 65535},
+ test_uint16{fn: mul_65535_uint16, fnname: "mul_65535_uint16", in: 0, want: 0},
+ test_uint16{fn: mul_uint16_65535, fnname: "mul_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: mul_65535_uint16, fnname: "mul_65535_uint16", in: 1, want: 65535},
+ test_uint16{fn: mul_uint16_65535, fnname: "mul_uint16_65535", in: 1, want: 65535},
+ test_uint16{fn: mul_65535_uint16, fnname: "mul_65535_uint16", in: 65535, want: 1},
+ test_uint16{fn: mul_uint16_65535, fnname: "mul_uint16_65535", in: 65535, want: 1},
+ test_uint16{fn: lsh_0_uint16, fnname: "lsh_0_uint16", in: 0, want: 0},
+ test_uint16{fn: lsh_uint16_0, fnname: "lsh_uint16_0", in: 0, want: 0},
+ test_uint16{fn: lsh_0_uint16, fnname: "lsh_0_uint16", in: 1, want: 0},
+ test_uint16{fn: lsh_uint16_0, fnname: "lsh_uint16_0", in: 1, want: 1},
+ test_uint16{fn: lsh_0_uint16, fnname: "lsh_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: lsh_uint16_0, fnname: "lsh_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: lsh_1_uint16, fnname: "lsh_1_uint16", in: 0, want: 1},
+ test_uint16{fn: lsh_uint16_1, fnname: "lsh_uint16_1", in: 0, want: 0},
+ test_uint16{fn: lsh_1_uint16, fnname: "lsh_1_uint16", in: 1, want: 2},
+ test_uint16{fn: lsh_uint16_1, fnname: "lsh_uint16_1", in: 1, want: 2},
+ test_uint16{fn: lsh_1_uint16, fnname: "lsh_1_uint16", in: 65535, want: 0},
+ test_uint16{fn: lsh_uint16_1, fnname: "lsh_uint16_1", in: 65535, want: 65534},
+ test_uint16{fn: lsh_65535_uint16, fnname: "lsh_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: lsh_uint16_65535, fnname: "lsh_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: lsh_65535_uint16, fnname: "lsh_65535_uint16", in: 1, want: 65534},
+ test_uint16{fn: lsh_uint16_65535, fnname: "lsh_uint16_65535", in: 1, want: 0},
+ test_uint16{fn: lsh_65535_uint16, fnname: "lsh_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: lsh_uint16_65535, fnname: "lsh_uint16_65535", in: 65535, want: 0},
+ test_uint16{fn: rsh_0_uint16, fnname: "rsh_0_uint16", in: 0, want: 0},
+ test_uint16{fn: rsh_uint16_0, fnname: "rsh_uint16_0", in: 0, want: 0},
+ test_uint16{fn: rsh_0_uint16, fnname: "rsh_0_uint16", in: 1, want: 0},
+ test_uint16{fn: rsh_uint16_0, fnname: "rsh_uint16_0", in: 1, want: 1},
+ test_uint16{fn: rsh_0_uint16, fnname: "rsh_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: rsh_uint16_0, fnname: "rsh_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: rsh_1_uint16, fnname: "rsh_1_uint16", in: 0, want: 1},
+ test_uint16{fn: rsh_uint16_1, fnname: "rsh_uint16_1", in: 0, want: 0},
+ test_uint16{fn: rsh_1_uint16, fnname: "rsh_1_uint16", in: 1, want: 0},
+ test_uint16{fn: rsh_uint16_1, fnname: "rsh_uint16_1", in: 1, want: 0},
+ test_uint16{fn: rsh_1_uint16, fnname: "rsh_1_uint16", in: 65535, want: 0},
+ test_uint16{fn: rsh_uint16_1, fnname: "rsh_uint16_1", in: 65535, want: 32767},
+ test_uint16{fn: rsh_65535_uint16, fnname: "rsh_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: rsh_uint16_65535, fnname: "rsh_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: rsh_65535_uint16, fnname: "rsh_65535_uint16", in: 1, want: 32767},
+ test_uint16{fn: rsh_uint16_65535, fnname: "rsh_uint16_65535", in: 1, want: 0},
+ test_uint16{fn: rsh_65535_uint16, fnname: "rsh_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: rsh_uint16_65535, fnname: "rsh_uint16_65535", in: 65535, want: 0},
+ test_uint16{fn: mod_0_uint16, fnname: "mod_0_uint16", in: 1, want: 0},
+ test_uint16{fn: mod_0_uint16, fnname: "mod_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: mod_uint16_1, fnname: "mod_uint16_1", in: 0, want: 0},
+ test_uint16{fn: mod_1_uint16, fnname: "mod_1_uint16", in: 1, want: 0},
+ test_uint16{fn: mod_uint16_1, fnname: "mod_uint16_1", in: 1, want: 0},
+ test_uint16{fn: mod_1_uint16, fnname: "mod_1_uint16", in: 65535, want: 1},
+ test_uint16{fn: mod_uint16_1, fnname: "mod_uint16_1", in: 65535, want: 0},
+ test_uint16{fn: mod_uint16_65535, fnname: "mod_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: mod_65535_uint16, fnname: "mod_65535_uint16", in: 1, want: 0},
+ test_uint16{fn: mod_uint16_65535, fnname: "mod_uint16_65535", in: 1, want: 1},
+ test_uint16{fn: mod_65535_uint16, fnname: "mod_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: mod_uint16_65535, fnname: "mod_uint16_65535", in: 65535, want: 0},
+ test_uint16{fn: and_0_uint16, fnname: "and_0_uint16", in: 0, want: 0},
+ test_uint16{fn: and_uint16_0, fnname: "and_uint16_0", in: 0, want: 0},
+ test_uint16{fn: and_0_uint16, fnname: "and_0_uint16", in: 1, want: 0},
+ test_uint16{fn: and_uint16_0, fnname: "and_uint16_0", in: 1, want: 0},
+ test_uint16{fn: and_0_uint16, fnname: "and_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: and_uint16_0, fnname: "and_uint16_0", in: 65535, want: 0},
+ test_uint16{fn: and_1_uint16, fnname: "and_1_uint16", in: 0, want: 0},
+ test_uint16{fn: and_uint16_1, fnname: "and_uint16_1", in: 0, want: 0},
+ test_uint16{fn: and_1_uint16, fnname: "and_1_uint16", in: 1, want: 1},
+ test_uint16{fn: and_uint16_1, fnname: "and_uint16_1", in: 1, want: 1},
+ test_uint16{fn: and_1_uint16, fnname: "and_1_uint16", in: 65535, want: 1},
+ test_uint16{fn: and_uint16_1, fnname: "and_uint16_1", in: 65535, want: 1},
+ test_uint16{fn: and_65535_uint16, fnname: "and_65535_uint16", in: 0, want: 0},
+ test_uint16{fn: and_uint16_65535, fnname: "and_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: and_65535_uint16, fnname: "and_65535_uint16", in: 1, want: 1},
+ test_uint16{fn: and_uint16_65535, fnname: "and_uint16_65535", in: 1, want: 1},
+ test_uint16{fn: and_65535_uint16, fnname: "and_65535_uint16", in: 65535, want: 65535},
+ test_uint16{fn: and_uint16_65535, fnname: "and_uint16_65535", in: 65535, want: 65535},
+ test_uint16{fn: or_0_uint16, fnname: "or_0_uint16", in: 0, want: 0},
+ test_uint16{fn: or_uint16_0, fnname: "or_uint16_0", in: 0, want: 0},
+ test_uint16{fn: or_0_uint16, fnname: "or_0_uint16", in: 1, want: 1},
+ test_uint16{fn: or_uint16_0, fnname: "or_uint16_0", in: 1, want: 1},
+ test_uint16{fn: or_0_uint16, fnname: "or_0_uint16", in: 65535, want: 65535},
+ test_uint16{fn: or_uint16_0, fnname: "or_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: or_1_uint16, fnname: "or_1_uint16", in: 0, want: 1},
+ test_uint16{fn: or_uint16_1, fnname: "or_uint16_1", in: 0, want: 1},
+ test_uint16{fn: or_1_uint16, fnname: "or_1_uint16", in: 1, want: 1},
+ test_uint16{fn: or_uint16_1, fnname: "or_uint16_1", in: 1, want: 1},
+ test_uint16{fn: or_1_uint16, fnname: "or_1_uint16", in: 65535, want: 65535},
+ test_uint16{fn: or_uint16_1, fnname: "or_uint16_1", in: 65535, want: 65535},
+ test_uint16{fn: or_65535_uint16, fnname: "or_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: or_uint16_65535, fnname: "or_uint16_65535", in: 0, want: 65535},
+ test_uint16{fn: or_65535_uint16, fnname: "or_65535_uint16", in: 1, want: 65535},
+ test_uint16{fn: or_uint16_65535, fnname: "or_uint16_65535", in: 1, want: 65535},
+ test_uint16{fn: or_65535_uint16, fnname: "or_65535_uint16", in: 65535, want: 65535},
+ test_uint16{fn: or_uint16_65535, fnname: "or_uint16_65535", in: 65535, want: 65535},
+ test_uint16{fn: xor_0_uint16, fnname: "xor_0_uint16", in: 0, want: 0},
+ test_uint16{fn: xor_uint16_0, fnname: "xor_uint16_0", in: 0, want: 0},
+ test_uint16{fn: xor_0_uint16, fnname: "xor_0_uint16", in: 1, want: 1},
+ test_uint16{fn: xor_uint16_0, fnname: "xor_uint16_0", in: 1, want: 1},
+ test_uint16{fn: xor_0_uint16, fnname: "xor_0_uint16", in: 65535, want: 65535},
+ test_uint16{fn: xor_uint16_0, fnname: "xor_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: xor_1_uint16, fnname: "xor_1_uint16", in: 0, want: 1},
+ test_uint16{fn: xor_uint16_1, fnname: "xor_uint16_1", in: 0, want: 1},
+ test_uint16{fn: xor_1_uint16, fnname: "xor_1_uint16", in: 1, want: 0},
+ test_uint16{fn: xor_uint16_1, fnname: "xor_uint16_1", in: 1, want: 0},
+ test_uint16{fn: xor_1_uint16, fnname: "xor_1_uint16", in: 65535, want: 65534},
+ test_uint16{fn: xor_uint16_1, fnname: "xor_uint16_1", in: 65535, want: 65534},
+ test_uint16{fn: xor_65535_uint16, fnname: "xor_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: xor_uint16_65535, fnname: "xor_uint16_65535", in: 0, want: 65535},
+ test_uint16{fn: xor_65535_uint16, fnname: "xor_65535_uint16", in: 1, want: 65534},
+ test_uint16{fn: xor_uint16_65535, fnname: "xor_uint16_65535", in: 1, want: 65534},
+ test_uint16{fn: xor_65535_uint16, fnname: "xor_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: xor_uint16_65535, fnname: "xor_uint16_65535", in: 65535, want: 0}}
+
+type test_int16 struct {
+ fn func(int16) int16
+ fnname string
+ in int16
+ want int16
+}
+
+var tests_int16 = []test_int16{
+
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: -32767, want: 1},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: -32767, want: 1},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: -1, want: 32767},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: -1, want: 32767},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 0, want: -32768},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 0, want: -32768},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 1, want: -32767},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 1, want: -32767},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 32766, want: -2},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 32766, want: -2},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 32767, want: -1},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: -32768, want: 1},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: -32768, want: 1},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: -32767, want: 2},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: -32767, want: 2},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: -1, want: -32768},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: -1, want: -32768},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 0, want: -32767},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 0, want: -32767},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 1, want: -32766},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 1, want: -32766},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 32766, want: -1},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 32767, want: 0},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 32767, want: 0},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: -32768, want: 32767},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: -32768, want: 32767},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: -32767, want: -32768},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: -32767, want: -32768},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: -1, want: -2},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: -1, want: -2},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 0, want: -1},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 0, want: -1},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 1, want: 0},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 1, want: 0},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 32766, want: 32765},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 32766, want: 32765},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 32767, want: 32766},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 32767, want: 32766},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: -32768, want: -32768},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: -32768, want: -32768},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: -32767, want: -32767},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: -32767, want: -32767},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: -1, want: -1},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: -1, want: -1},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 0, want: 0},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 0, want: 0},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 1, want: 1},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 1, want: 1},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 32766, want: 32766},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 32766, want: 32766},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 32767, want: 32767},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 32767, want: 32767},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: -32768, want: -32767},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: -32768, want: -32767},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: -32767, want: -32766},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: -32767, want: -32766},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: -1, want: 0},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: -1, want: 0},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 0, want: 1},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 0, want: 1},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 1, want: 2},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 1, want: 2},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 32766, want: 32767},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 32766, want: 32767},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 32767, want: -32768},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 32767, want: -32768},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: -32768, want: -2},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: -32768, want: -2},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: -32767, want: -1},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: -32767, want: -1},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: -1, want: 32765},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: -1, want: 32765},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 0, want: 32766},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 0, want: 32766},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 1, want: 32767},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 1, want: 32767},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 32766, want: -4},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 32766, want: -4},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 32767, want: -3},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 32767, want: -3},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: -32768, want: -1},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: -32768, want: -1},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: -32767, want: 0},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: -32767, want: 0},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: -1, want: 32766},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: -1, want: 32766},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 0, want: 32767},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 0, want: 32767},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 1, want: -32768},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 1, want: -32768},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 32766, want: -3},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 32766, want: -3},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 32767, want: -2},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 32767, want: -2},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: -32767, want: -1},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: -32767, want: 1},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: -1, want: -32767},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: -1, want: 32767},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 0, want: -32768},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 0, want: -32768},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 1, want: 32767},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 1, want: -32767},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 32766, want: 2},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 32766, want: -2},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 32767, want: 1},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 32767, want: -1},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: -32768, want: 1},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: -32768, want: -1},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: -32767, want: 0},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: -32767, want: 0},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: -1, want: -32766},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: -1, want: 32766},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 0, want: -32767},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 0, want: 32767},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 1, want: -32768},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 1, want: -32768},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 32766, want: 3},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 32766, want: -3},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 32767, want: 2},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 32767, want: -2},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: -32768, want: 32767},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: -32768, want: -32767},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: -32767, want: 32766},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: -32767, want: -32766},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: -1, want: 0},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: -1, want: 0},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 0, want: -1},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 0, want: 1},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 1, want: -2},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 1, want: 2},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 32766, want: -32767},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 32766, want: 32767},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 32767, want: -32768},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 32767, want: -32768},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: -32768, want: -32768},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: -32768, want: -32768},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: -32767, want: 32767},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: -32767, want: -32767},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: -1, want: 1},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: -1, want: -1},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 0, want: 0},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 0, want: 0},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 1, want: -1},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 1, want: 1},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 32766, want: -32766},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 32766, want: 32766},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 32767, want: -32767},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 32767, want: 32767},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: -32768, want: -32767},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: -32768, want: 32767},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: -32767, want: -32768},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: -32767, want: -32768},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: -1, want: 2},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: -1, want: -2},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 0, want: 1},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 0, want: -1},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 1, want: 0},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 1, want: 0},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 32766, want: -32765},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 32766, want: 32765},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 32767, want: -32766},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 32767, want: 32766},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: -32768, want: -2},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: -32768, want: 2},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: -32767, want: -3},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: -32767, want: 3},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: -1, want: 32767},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: -1, want: -32767},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 0, want: 32766},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 0, want: -32766},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 1, want: 32765},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 1, want: -32765},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 32766, want: 0},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 32766, want: 0},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 32767, want: -1},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 32767, want: 1},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: -32768, want: -1},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: -32768, want: 1},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: -32767, want: -2},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: -32767, want: 2},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: -1, want: -32768},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: -1, want: -32768},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 0, want: 32767},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 0, want: -32767},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 1, want: 32766},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 1, want: -32766},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 32766, want: 1},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 32766, want: -1},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 32767, want: 0},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 32767, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: -32768, want: 1},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: -32768, want: 1},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: -32767, want: 1},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: -32767, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: -1, want: -32768},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: -1, want: 0},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 0, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: 1, want: -32768},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 1, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: 32766, want: -1},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 32766, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 32767, want: 0},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: -32768, want: 1},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: -32767, want: 1},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: -32767, want: 1},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: -1, want: 32767},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: -1, want: 0},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 0, want: 0},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: 1, want: -32767},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 1, want: 0},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 32766, want: 0},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: 32767, want: -1},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 32767, want: -1},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: -32768, want: -32768},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: -32767, want: 0},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: -32767, want: 32767},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: -1, want: 1},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: -1, want: 1},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 0, want: 0},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: 1, want: -1},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 1, want: -1},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: 32766, want: 0},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 32766, want: -32766},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: 32767, want: 0},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 32767, want: -32767},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: -32768, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: -32767, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: -1, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: 1, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: 32766, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: 32767, want: 0},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: -32768, want: -32768},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: -32767, want: 0},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: -32767, want: -32767},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: -1, want: -1},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: -1, want: -1},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 0, want: 0},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: 1, want: 1},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 1, want: 1},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: 32766, want: 0},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 32766, want: 32766},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: 32767, want: 0},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 32767, want: 32767},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: -32768, want: -1},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: -32767, want: 0},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: -32767, want: -1},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: -1, want: -32766},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: -1, want: 0},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 0, want: 0},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: 1, want: 32766},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 1, want: 0},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: 32766, want: 1},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 32766, want: 1},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: 32767, want: 0},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 32767, want: 1},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: -32768, want: -1},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: -32767, want: -1},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: -32767, want: -1},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: -1, want: -32767},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: -1, want: 0},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 0, want: 0},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: 1, want: 32767},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 1, want: 0},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: 32766, want: 1},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 32766, want: 0},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: 32767, want: 1},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 32767, want: 1},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: -32767, want: -32768},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: -32767, want: -32768},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: -1, want: -32768},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: -1, want: -32768},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 0, want: 0},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 1, want: -32768},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 1, want: -32768},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 32766, want: 0},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 32766, want: 0},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 32767, want: -32768},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 32767, want: -32768},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: -32768, want: -32768},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: -32768, want: -32768},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: -32767, want: 1},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: -32767, want: 1},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: -1, want: 32767},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: -1, want: 32767},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 0, want: 0},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 1, want: -32767},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 1, want: -32767},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 32766, want: 32766},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 32766, want: 32766},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 32767, want: -1},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 32767, want: -1},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: -32768, want: -32768},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: -32768, want: -32768},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: -32767, want: 32767},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: -32767, want: 32767},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: -1, want: 1},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: -1, want: 1},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 0, want: 0},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 1, want: -1},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 1, want: -1},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 32766, want: -32766},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 32766, want: -32766},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 32767, want: -32767},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 32767, want: -32767},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: -32768, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: -32768, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: -32767, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: -32767, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: -1, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: -1, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 0, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 1, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 1, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 32766, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 32766, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 32767, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 32767, want: 0},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: -32768, want: -32768},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: -32768, want: -32768},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: -32767, want: -32767},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: -32767, want: -32767},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: -1, want: -1},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: -1, want: -1},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 0, want: 0},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 1, want: 1},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 1, want: 1},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 32766, want: 32766},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 32766, want: 32766},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 32767, want: 32767},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 32767, want: 32767},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: -32768, want: 0},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: -32768, want: 0},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: -32767, want: 32766},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: -32767, want: 32766},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: -1, want: -32766},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: -1, want: -32766},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 0, want: 0},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 1, want: 32766},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 1, want: 32766},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 32766, want: 4},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 32766, want: 4},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 32767, want: -32766},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 32767, want: -32766},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: -32768, want: -32768},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: -32768, want: -32768},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: -32767, want: -1},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: -32767, want: -1},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: -1, want: -32767},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: -1, want: -32767},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 0, want: 0},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 1, want: 32767},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 1, want: 32767},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 32766, want: -32766},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 32766, want: -32766},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 32767, want: 1},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 32767, want: 1},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: -32767, want: -1},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: -32767, want: -32767},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: -1, want: -1},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 0, want: 0},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 1, want: 1},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: 32766, want: -2},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 32766, want: 32766},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 32767, want: 32767},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: -32768, want: -32767},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: -32768, want: -1},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: -32767, want: 0},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: -32767, want: 0},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: -1, want: -1},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 0, want: 0},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 1, want: 1},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 32766, want: 32766},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: 32767, want: 0},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 32767, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: -32768, want: -1},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: -32768, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: -32767, want: -1},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: -32767, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: -1, want: 0},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 0, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 1, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: 32766, want: -1},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 32766, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: 32767, want: -1},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 32767, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: -32768, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: -32767, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: -1, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: 1, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: 32766, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: 32767, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: -32768, want: 1},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: -32768, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: -32767, want: 1},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: -32767, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: -1, want: 0},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 0, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 1, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: 32766, want: 1},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 32766, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: 32767, want: 1},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 32767, want: 0},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: -32768, want: 32766},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: -32768, want: -2},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: -32767, want: 32766},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: -32767, want: -1},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: -1, want: -1},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 0, want: 0},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 1, want: 1},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: 32766, want: 0},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 32766, want: 0},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: 32767, want: 32766},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 32767, want: 1},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: -32768, want: 32767},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: -32768, want: -1},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: -32767, want: 0},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: -32767, want: 0},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: -1, want: -1},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 0, want: 0},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 1, want: 1},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: 32766, want: 1},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 32766, want: 32766},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: 32767, want: 0},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 32767, want: 0},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: -32768, want: -32768},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: -32768, want: -32768},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: -32767, want: -32768},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: -32767, want: -32768},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: -1, want: -32768},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: -1, want: -32768},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 0, want: 0},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 1, want: 0},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 1, want: 0},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 32766, want: 0},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 32766, want: 0},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 32767, want: 0},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 32767, want: 0},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: -32768, want: -32768},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: -32768, want: -32768},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: -32767, want: -32767},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: -32767, want: -32767},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: -1, want: -32767},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: -1, want: -32767},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 0, want: 0},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 1, want: 1},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 1, want: 1},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 32766, want: 0},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 32766, want: 0},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 32767, want: 1},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 32767, want: 1},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: -32768, want: -32768},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: -32768, want: -32768},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: -32767, want: -32767},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: -32767, want: -32767},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: -1, want: -1},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: -1, want: -1},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 0, want: 0},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 1, want: 1},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 1, want: 1},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 32766, want: 32766},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 32766, want: 32766},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 32767, want: 32767},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 32767, want: 32767},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: -32768, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: -32768, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: -32767, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: -32767, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: -1, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: -1, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 0, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 1, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 1, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 32766, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 32766, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 32767, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 32767, want: 0},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: -32768, want: 0},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: -32768, want: 0},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: -32767, want: 1},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: -32767, want: 1},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: -1, want: 1},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: -1, want: 1},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 0, want: 0},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 1, want: 1},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 1, want: 1},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 32766, want: 0},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 32766, want: 0},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 32767, want: 1},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 32767, want: 1},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: -32768, want: 0},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: -32768, want: 0},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: -32767, want: 0},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: -32767, want: 0},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: -1, want: 32766},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: -1, want: 32766},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 0, want: 0},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 1, want: 0},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 1, want: 0},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 32766, want: 32766},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 32766, want: 32766},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 32767, want: 32766},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 32767, want: 32766},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: -32768, want: 0},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: -32768, want: 0},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: -32767, want: 1},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: -32767, want: 1},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: -1, want: 32767},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: -1, want: 32767},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 0, want: 0},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 1, want: 1},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 1, want: 1},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 32766, want: 32766},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 32766, want: 32766},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 32767, want: 32767},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 32767, want: 32767},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: -32768, want: -32768},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: -32768, want: -32768},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: -32767, want: -32767},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: -32767, want: -32767},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: -1, want: -1},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 0, want: -32768},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 0, want: -32768},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 1, want: -32767},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 1, want: -32767},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 32766, want: -2},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 32766, want: -2},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 32767, want: -1},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: -32768, want: -32767},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: -32768, want: -32767},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: -32767, want: -32767},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: -32767, want: -32767},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: -1, want: -1},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 0, want: -32767},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 0, want: -32767},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 1, want: -32767},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 1, want: -32767},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 32766, want: -1},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 32767, want: -1},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 32767, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: -32768, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: -32768, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: -32767, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: -32767, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: -1, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 0, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 0, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 1, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 1, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 32766, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 32766, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 32767, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 32767, want: -1},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: -32768, want: -32768},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: -32768, want: -32768},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: -32767, want: -32767},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: -32767, want: -32767},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: -1, want: -1},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 0, want: 0},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 0, want: 0},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 1, want: 1},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 1, want: 1},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 32766, want: 32766},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 32766, want: 32766},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 32767, want: 32767},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 32767, want: 32767},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: -32768, want: -32767},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: -32768, want: -32767},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: -32767, want: -32767},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: -32767, want: -32767},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: -1, want: -1},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 0, want: 1},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 0, want: 1},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 1, want: 1},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 1, want: 1},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 32766, want: 32767},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 32766, want: 32767},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 32767, want: 32767},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 32767, want: 32767},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: -32768, want: -2},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: -32768, want: -2},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: -32767, want: -1},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: -32767, want: -1},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: -1, want: -1},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 0, want: 32766},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 0, want: 32766},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 1, want: 32767},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 1, want: 32767},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 32766, want: 32766},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 32766, want: 32766},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 32767, want: 32767},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 32767, want: 32767},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: -32768, want: -1},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: -32768, want: -1},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: -32767, want: -1},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: -32767, want: -1},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: -1, want: -1},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 0, want: 32767},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 0, want: 32767},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 1, want: 32767},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 1, want: 32767},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 32766, want: 32767},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 32766, want: 32767},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 32767, want: 32767},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 32767, want: 32767},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: -32767, want: 1},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: -32767, want: 1},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: -1, want: 32767},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: -1, want: 32767},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 0, want: -32768},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 0, want: -32768},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 1, want: -32767},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 1, want: -32767},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 32766, want: -2},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 32766, want: -2},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 32767, want: -1},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: -32768, want: 1},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: -32768, want: 1},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: -32767, want: 0},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: -32767, want: 0},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: -1, want: 32766},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: -1, want: 32766},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 0, want: -32767},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 0, want: -32767},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 1, want: -32768},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 1, want: -32768},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 32766, want: -1},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 32767, want: -2},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 32767, want: -2},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: -32768, want: 32767},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: -32768, want: 32767},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: -32767, want: 32766},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: -32767, want: 32766},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: -1, want: 0},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: -1, want: 0},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 0, want: -1},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 0, want: -1},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 1, want: -2},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 1, want: -2},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 32766, want: -32767},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 32766, want: -32767},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 32767, want: -32768},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 32767, want: -32768},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: -32768, want: -32768},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: -32768, want: -32768},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: -32767, want: -32767},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: -32767, want: -32767},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: -1, want: -1},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: -1, want: -1},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 0, want: 0},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 0, want: 0},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 1, want: 1},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 1, want: 1},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 32766, want: 32766},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 32766, want: 32766},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 32767, want: 32767},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 32767, want: 32767},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: -32768, want: -32767},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: -32768, want: -32767},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: -32767, want: -32768},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: -32767, want: -32768},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: -1, want: -2},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: -1, want: -2},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 0, want: 1},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 0, want: 1},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 1, want: 0},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 1, want: 0},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 32766, want: 32767},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 32766, want: 32767},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 32767, want: 32766},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 32767, want: 32766},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: -32768, want: -2},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: -32768, want: -2},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: -32767, want: -1},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: -32767, want: -1},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: -1, want: -32767},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: -1, want: -32767},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 0, want: 32766},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 0, want: 32766},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 1, want: 32767},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 1, want: 32767},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 32766, want: 0},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 32766, want: 0},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 32767, want: 1},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 32767, want: 1},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: -32768, want: -1},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: -32768, want: -1},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: -32767, want: -2},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: -32767, want: -2},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: -1, want: -32768},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: -1, want: -32768},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 0, want: 32767},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 0, want: 32767},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 1, want: 32766},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 1, want: 32766},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 32766, want: 1},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 32766, want: 1},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 32767, want: 0},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 32767, want: 0}}
+
+type test_uint8 struct {
+ fn func(uint8) uint8
+ fnname string
+ in uint8
+ want uint8
+}
+
+var tests_uint8 = []test_uint8{
+
+ test_uint8{fn: add_0_uint8, fnname: "add_0_uint8", in: 0, want: 0},
+ test_uint8{fn: add_uint8_0, fnname: "add_uint8_0", in: 0, want: 0},
+ test_uint8{fn: add_0_uint8, fnname: "add_0_uint8", in: 1, want: 1},
+ test_uint8{fn: add_uint8_0, fnname: "add_uint8_0", in: 1, want: 1},
+ test_uint8{fn: add_0_uint8, fnname: "add_0_uint8", in: 255, want: 255},
+ test_uint8{fn: add_uint8_0, fnname: "add_uint8_0", in: 255, want: 255},
+ test_uint8{fn: add_1_uint8, fnname: "add_1_uint8", in: 0, want: 1},
+ test_uint8{fn: add_uint8_1, fnname: "add_uint8_1", in: 0, want: 1},
+ test_uint8{fn: add_1_uint8, fnname: "add_1_uint8", in: 1, want: 2},
+ test_uint8{fn: add_uint8_1, fnname: "add_uint8_1", in: 1, want: 2},
+ test_uint8{fn: add_1_uint8, fnname: "add_1_uint8", in: 255, want: 0},
+ test_uint8{fn: add_uint8_1, fnname: "add_uint8_1", in: 255, want: 0},
+ test_uint8{fn: add_255_uint8, fnname: "add_255_uint8", in: 0, want: 255},
+ test_uint8{fn: add_uint8_255, fnname: "add_uint8_255", in: 0, want: 255},
+ test_uint8{fn: add_255_uint8, fnname: "add_255_uint8", in: 1, want: 0},
+ test_uint8{fn: add_uint8_255, fnname: "add_uint8_255", in: 1, want: 0},
+ test_uint8{fn: add_255_uint8, fnname: "add_255_uint8", in: 255, want: 254},
+ test_uint8{fn: add_uint8_255, fnname: "add_uint8_255", in: 255, want: 254},
+ test_uint8{fn: sub_0_uint8, fnname: "sub_0_uint8", in: 0, want: 0},
+ test_uint8{fn: sub_uint8_0, fnname: "sub_uint8_0", in: 0, want: 0},
+ test_uint8{fn: sub_0_uint8, fnname: "sub_0_uint8", in: 1, want: 255},
+ test_uint8{fn: sub_uint8_0, fnname: "sub_uint8_0", in: 1, want: 1},
+ test_uint8{fn: sub_0_uint8, fnname: "sub_0_uint8", in: 255, want: 1},
+ test_uint8{fn: sub_uint8_0, fnname: "sub_uint8_0", in: 255, want: 255},
+ test_uint8{fn: sub_1_uint8, fnname: "sub_1_uint8", in: 0, want: 1},
+ test_uint8{fn: sub_uint8_1, fnname: "sub_uint8_1", in: 0, want: 255},
+ test_uint8{fn: sub_1_uint8, fnname: "sub_1_uint8", in: 1, want: 0},
+ test_uint8{fn: sub_uint8_1, fnname: "sub_uint8_1", in: 1, want: 0},
+ test_uint8{fn: sub_1_uint8, fnname: "sub_1_uint8", in: 255, want: 2},
+ test_uint8{fn: sub_uint8_1, fnname: "sub_uint8_1", in: 255, want: 254},
+ test_uint8{fn: sub_255_uint8, fnname: "sub_255_uint8", in: 0, want: 255},
+ test_uint8{fn: sub_uint8_255, fnname: "sub_uint8_255", in: 0, want: 1},
+ test_uint8{fn: sub_255_uint8, fnname: "sub_255_uint8", in: 1, want: 254},
+ test_uint8{fn: sub_uint8_255, fnname: "sub_uint8_255", in: 1, want: 2},
+ test_uint8{fn: sub_255_uint8, fnname: "sub_255_uint8", in: 255, want: 0},
+ test_uint8{fn: sub_uint8_255, fnname: "sub_uint8_255", in: 255, want: 0},
+ test_uint8{fn: div_0_uint8, fnname: "div_0_uint8", in: 1, want: 0},
+ test_uint8{fn: div_0_uint8, fnname: "div_0_uint8", in: 255, want: 0},
+ test_uint8{fn: div_uint8_1, fnname: "div_uint8_1", in: 0, want: 0},
+ test_uint8{fn: div_1_uint8, fnname: "div_1_uint8", in: 1, want: 1},
+ test_uint8{fn: div_uint8_1, fnname: "div_uint8_1", in: 1, want: 1},
+ test_uint8{fn: div_1_uint8, fnname: "div_1_uint8", in: 255, want: 0},
+ test_uint8{fn: div_uint8_1, fnname: "div_uint8_1", in: 255, want: 255},
+ test_uint8{fn: div_uint8_255, fnname: "div_uint8_255", in: 0, want: 0},
+ test_uint8{fn: div_255_uint8, fnname: "div_255_uint8", in: 1, want: 255},
+ test_uint8{fn: div_uint8_255, fnname: "div_uint8_255", in: 1, want: 0},
+ test_uint8{fn: div_255_uint8, fnname: "div_255_uint8", in: 255, want: 1},
+ test_uint8{fn: div_uint8_255, fnname: "div_uint8_255", in: 255, want: 1},
+ test_uint8{fn: mul_0_uint8, fnname: "mul_0_uint8", in: 0, want: 0},
+ test_uint8{fn: mul_uint8_0, fnname: "mul_uint8_0", in: 0, want: 0},
+ test_uint8{fn: mul_0_uint8, fnname: "mul_0_uint8", in: 1, want: 0},
+ test_uint8{fn: mul_uint8_0, fnname: "mul_uint8_0", in: 1, want: 0},
+ test_uint8{fn: mul_0_uint8, fnname: "mul_0_uint8", in: 255, want: 0},
+ test_uint8{fn: mul_uint8_0, fnname: "mul_uint8_0", in: 255, want: 0},
+ test_uint8{fn: mul_1_uint8, fnname: "mul_1_uint8", in: 0, want: 0},
+ test_uint8{fn: mul_uint8_1, fnname: "mul_uint8_1", in: 0, want: 0},
+ test_uint8{fn: mul_1_uint8, fnname: "mul_1_uint8", in: 1, want: 1},
+ test_uint8{fn: mul_uint8_1, fnname: "mul_uint8_1", in: 1, want: 1},
+ test_uint8{fn: mul_1_uint8, fnname: "mul_1_uint8", in: 255, want: 255},
+ test_uint8{fn: mul_uint8_1, fnname: "mul_uint8_1", in: 255, want: 255},
+ test_uint8{fn: mul_255_uint8, fnname: "mul_255_uint8", in: 0, want: 0},
+ test_uint8{fn: mul_uint8_255, fnname: "mul_uint8_255", in: 0, want: 0},
+ test_uint8{fn: mul_255_uint8, fnname: "mul_255_uint8", in: 1, want: 255},
+ test_uint8{fn: mul_uint8_255, fnname: "mul_uint8_255", in: 1, want: 255},
+ test_uint8{fn: mul_255_uint8, fnname: "mul_255_uint8", in: 255, want: 1},
+ test_uint8{fn: mul_uint8_255, fnname: "mul_uint8_255", in: 255, want: 1},
+ test_uint8{fn: lsh_0_uint8, fnname: "lsh_0_uint8", in: 0, want: 0},
+ test_uint8{fn: lsh_uint8_0, fnname: "lsh_uint8_0", in: 0, want: 0},
+ test_uint8{fn: lsh_0_uint8, fnname: "lsh_0_uint8", in: 1, want: 0},
+ test_uint8{fn: lsh_uint8_0, fnname: "lsh_uint8_0", in: 1, want: 1},
+ test_uint8{fn: lsh_0_uint8, fnname: "lsh_0_uint8", in: 255, want: 0},
+ test_uint8{fn: lsh_uint8_0, fnname: "lsh_uint8_0", in: 255, want: 255},
+ test_uint8{fn: lsh_1_uint8, fnname: "lsh_1_uint8", in: 0, want: 1},
+ test_uint8{fn: lsh_uint8_1, fnname: "lsh_uint8_1", in: 0, want: 0},
+ test_uint8{fn: lsh_1_uint8, fnname: "lsh_1_uint8", in: 1, want: 2},
+ test_uint8{fn: lsh_uint8_1, fnname: "lsh_uint8_1", in: 1, want: 2},
+ test_uint8{fn: lsh_1_uint8, fnname: "lsh_1_uint8", in: 255, want: 0},
+ test_uint8{fn: lsh_uint8_1, fnname: "lsh_uint8_1", in: 255, want: 254},
+ test_uint8{fn: lsh_255_uint8, fnname: "lsh_255_uint8", in: 0, want: 255},
+ test_uint8{fn: lsh_uint8_255, fnname: "lsh_uint8_255", in: 0, want: 0},
+ test_uint8{fn: lsh_255_uint8, fnname: "lsh_255_uint8", in: 1, want: 254},
+ test_uint8{fn: lsh_uint8_255, fnname: "lsh_uint8_255", in: 1, want: 0},
+ test_uint8{fn: lsh_255_uint8, fnname: "lsh_255_uint8", in: 255, want: 0},
+ test_uint8{fn: lsh_uint8_255, fnname: "lsh_uint8_255", in: 255, want: 0},
+ test_uint8{fn: rsh_0_uint8, fnname: "rsh_0_uint8", in: 0, want: 0},
+ test_uint8{fn: rsh_uint8_0, fnname: "rsh_uint8_0", in: 0, want: 0},
+ test_uint8{fn: rsh_0_uint8, fnname: "rsh_0_uint8", in: 1, want: 0},
+ test_uint8{fn: rsh_uint8_0, fnname: "rsh_uint8_0", in: 1, want: 1},
+ test_uint8{fn: rsh_0_uint8, fnname: "rsh_0_uint8", in: 255, want: 0},
+ test_uint8{fn: rsh_uint8_0, fnname: "rsh_uint8_0", in: 255, want: 255},
+ test_uint8{fn: rsh_1_uint8, fnname: "rsh_1_uint8", in: 0, want: 1},
+ test_uint8{fn: rsh_uint8_1, fnname: "rsh_uint8_1", in: 0, want: 0},
+ test_uint8{fn: rsh_1_uint8, fnname: "rsh_1_uint8", in: 1, want: 0},
+ test_uint8{fn: rsh_uint8_1, fnname: "rsh_uint8_1", in: 1, want: 0},
+ test_uint8{fn: rsh_1_uint8, fnname: "rsh_1_uint8", in: 255, want: 0},
+ test_uint8{fn: rsh_uint8_1, fnname: "rsh_uint8_1", in: 255, want: 127},
+ test_uint8{fn: rsh_255_uint8, fnname: "rsh_255_uint8", in: 0, want: 255},
+ test_uint8{fn: rsh_uint8_255, fnname: "rsh_uint8_255", in: 0, want: 0},
+ test_uint8{fn: rsh_255_uint8, fnname: "rsh_255_uint8", in: 1, want: 127},
+ test_uint8{fn: rsh_uint8_255, fnname: "rsh_uint8_255", in: 1, want: 0},
+ test_uint8{fn: rsh_255_uint8, fnname: "rsh_255_uint8", in: 255, want: 0},
+ test_uint8{fn: rsh_uint8_255, fnname: "rsh_uint8_255", in: 255, want: 0},
+ test_uint8{fn: mod_0_uint8, fnname: "mod_0_uint8", in: 1, want: 0},
+ test_uint8{fn: mod_0_uint8, fnname: "mod_0_uint8", in: 255, want: 0},
+ test_uint8{fn: mod_uint8_1, fnname: "mod_uint8_1", in: 0, want: 0},
+ test_uint8{fn: mod_1_uint8, fnname: "mod_1_uint8", in: 1, want: 0},
+ test_uint8{fn: mod_uint8_1, fnname: "mod_uint8_1", in: 1, want: 0},
+ test_uint8{fn: mod_1_uint8, fnname: "mod_1_uint8", in: 255, want: 1},
+ test_uint8{fn: mod_uint8_1, fnname: "mod_uint8_1", in: 255, want: 0},
+ test_uint8{fn: mod_uint8_255, fnname: "mod_uint8_255", in: 0, want: 0},
+ test_uint8{fn: mod_255_uint8, fnname: "mod_255_uint8", in: 1, want: 0},
+ test_uint8{fn: mod_uint8_255, fnname: "mod_uint8_255", in: 1, want: 1},
+ test_uint8{fn: mod_255_uint8, fnname: "mod_255_uint8", in: 255, want: 0},
+ test_uint8{fn: mod_uint8_255, fnname: "mod_uint8_255", in: 255, want: 0},
+ test_uint8{fn: and_0_uint8, fnname: "and_0_uint8", in: 0, want: 0},
+ test_uint8{fn: and_uint8_0, fnname: "and_uint8_0", in: 0, want: 0},
+ test_uint8{fn: and_0_uint8, fnname: "and_0_uint8", in: 1, want: 0},
+ test_uint8{fn: and_uint8_0, fnname: "and_uint8_0", in: 1, want: 0},
+ test_uint8{fn: and_0_uint8, fnname: "and_0_uint8", in: 255, want: 0},
+ test_uint8{fn: and_uint8_0, fnname: "and_uint8_0", in: 255, want: 0},
+ test_uint8{fn: and_1_uint8, fnname: "and_1_uint8", in: 0, want: 0},
+ test_uint8{fn: and_uint8_1, fnname: "and_uint8_1", in: 0, want: 0},
+ test_uint8{fn: and_1_uint8, fnname: "and_1_uint8", in: 1, want: 1},
+ test_uint8{fn: and_uint8_1, fnname: "and_uint8_1", in: 1, want: 1},
+ test_uint8{fn: and_1_uint8, fnname: "and_1_uint8", in: 255, want: 1},
+ test_uint8{fn: and_uint8_1, fnname: "and_uint8_1", in: 255, want: 1},
+ test_uint8{fn: and_255_uint8, fnname: "and_255_uint8", in: 0, want: 0},
+ test_uint8{fn: and_uint8_255, fnname: "and_uint8_255", in: 0, want: 0},
+ test_uint8{fn: and_255_uint8, fnname: "and_255_uint8", in: 1, want: 1},
+ test_uint8{fn: and_uint8_255, fnname: "and_uint8_255", in: 1, want: 1},
+ test_uint8{fn: and_255_uint8, fnname: "and_255_uint8", in: 255, want: 255},
+ test_uint8{fn: and_uint8_255, fnname: "and_uint8_255", in: 255, want: 255},
+ test_uint8{fn: or_0_uint8, fnname: "or_0_uint8", in: 0, want: 0},
+ test_uint8{fn: or_uint8_0, fnname: "or_uint8_0", in: 0, want: 0},
+ test_uint8{fn: or_0_uint8, fnname: "or_0_uint8", in: 1, want: 1},
+ test_uint8{fn: or_uint8_0, fnname: "or_uint8_0", in: 1, want: 1},
+ test_uint8{fn: or_0_uint8, fnname: "or_0_uint8", in: 255, want: 255},
+ test_uint8{fn: or_uint8_0, fnname: "or_uint8_0", in: 255, want: 255},
+ test_uint8{fn: or_1_uint8, fnname: "or_1_uint8", in: 0, want: 1},
+ test_uint8{fn: or_uint8_1, fnname: "or_uint8_1", in: 0, want: 1},
+ test_uint8{fn: or_1_uint8, fnname: "or_1_uint8", in: 1, want: 1},
+ test_uint8{fn: or_uint8_1, fnname: "or_uint8_1", in: 1, want: 1},
+ test_uint8{fn: or_1_uint8, fnname: "or_1_uint8", in: 255, want: 255},
+ test_uint8{fn: or_uint8_1, fnname: "or_uint8_1", in: 255, want: 255},
+ test_uint8{fn: or_255_uint8, fnname: "or_255_uint8", in: 0, want: 255},
+ test_uint8{fn: or_uint8_255, fnname: "or_uint8_255", in: 0, want: 255},
+ test_uint8{fn: or_255_uint8, fnname: "or_255_uint8", in: 1, want: 255},
+ test_uint8{fn: or_uint8_255, fnname: "or_uint8_255", in: 1, want: 255},
+ test_uint8{fn: or_255_uint8, fnname: "or_255_uint8", in: 255, want: 255},
+ test_uint8{fn: or_uint8_255, fnname: "or_uint8_255", in: 255, want: 255},
+ test_uint8{fn: xor_0_uint8, fnname: "xor_0_uint8", in: 0, want: 0},
+ test_uint8{fn: xor_uint8_0, fnname: "xor_uint8_0", in: 0, want: 0},
+ test_uint8{fn: xor_0_uint8, fnname: "xor_0_uint8", in: 1, want: 1},
+ test_uint8{fn: xor_uint8_0, fnname: "xor_uint8_0", in: 1, want: 1},
+ test_uint8{fn: xor_0_uint8, fnname: "xor_0_uint8", in: 255, want: 255},
+ test_uint8{fn: xor_uint8_0, fnname: "xor_uint8_0", in: 255, want: 255},
+ test_uint8{fn: xor_1_uint8, fnname: "xor_1_uint8", in: 0, want: 1},
+ test_uint8{fn: xor_uint8_1, fnname: "xor_uint8_1", in: 0, want: 1},
+ test_uint8{fn: xor_1_uint8, fnname: "xor_1_uint8", in: 1, want: 0},
+ test_uint8{fn: xor_uint8_1, fnname: "xor_uint8_1", in: 1, want: 0},
+ test_uint8{fn: xor_1_uint8, fnname: "xor_1_uint8", in: 255, want: 254},
+ test_uint8{fn: xor_uint8_1, fnname: "xor_uint8_1", in: 255, want: 254},
+ test_uint8{fn: xor_255_uint8, fnname: "xor_255_uint8", in: 0, want: 255},
+ test_uint8{fn: xor_uint8_255, fnname: "xor_uint8_255", in: 0, want: 255},
+ test_uint8{fn: xor_255_uint8, fnname: "xor_255_uint8", in: 1, want: 254},
+ test_uint8{fn: xor_uint8_255, fnname: "xor_uint8_255", in: 1, want: 254},
+ test_uint8{fn: xor_255_uint8, fnname: "xor_255_uint8", in: 255, want: 0},
+ test_uint8{fn: xor_uint8_255, fnname: "xor_uint8_255", in: 255, want: 0}}
+
+type test_int8 struct {
+ fn func(int8) int8
+ fnname string
+ in int8
+ want int8
+}
+
+var tests_int8 = []test_int8{
+
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: -127, want: 1},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: -127, want: 1},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: -1, want: 127},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: -1, want: 127},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 0, want: -128},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 0, want: -128},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 1, want: -127},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 1, want: -127},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 126, want: -2},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 126, want: -2},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 127, want: -1},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: -128, want: 1},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: -128, want: 1},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: -127, want: 2},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: -127, want: 2},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: -1, want: -128},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: -1, want: -128},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 0, want: -127},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 0, want: -127},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 1, want: -126},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 1, want: -126},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 126, want: -1},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 127, want: 0},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 127, want: 0},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: -128, want: 127},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: -128, want: 127},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: -127, want: -128},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: -127, want: -128},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: -1, want: -2},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: -1, want: -2},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 0, want: -1},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 0, want: -1},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 1, want: 0},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 1, want: 0},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 126, want: 125},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 126, want: 125},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 127, want: 126},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 127, want: 126},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: -128, want: -128},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: -128, want: -128},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: -127, want: -127},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: -127, want: -127},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: -1, want: -1},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: -1, want: -1},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 0, want: 0},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 0, want: 0},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 1, want: 1},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 1, want: 1},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 126, want: 126},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 126, want: 126},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 127, want: 127},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 127, want: 127},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: -128, want: -127},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: -128, want: -127},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: -127, want: -126},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: -127, want: -126},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: -1, want: 0},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: -1, want: 0},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 0, want: 1},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 0, want: 1},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 1, want: 2},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 1, want: 2},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 126, want: 127},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 126, want: 127},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 127, want: -128},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 127, want: -128},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: -128, want: -2},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: -128, want: -2},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: -127, want: -1},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: -127, want: -1},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: -1, want: 125},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: -1, want: 125},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 0, want: 126},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 0, want: 126},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 1, want: 127},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 1, want: 127},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 126, want: -4},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 126, want: -4},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 127, want: -3},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 127, want: -3},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: -128, want: -1},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: -128, want: -1},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: -127, want: 0},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: -127, want: 0},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: -1, want: 126},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: -1, want: 126},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 0, want: 127},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 0, want: 127},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 1, want: -128},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 1, want: -128},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 126, want: -3},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 126, want: -3},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 127, want: -2},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 127, want: -2},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: -127, want: -1},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: -127, want: 1},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: -1, want: -127},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: -1, want: 127},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 0, want: -128},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 0, want: -128},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 1, want: 127},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 1, want: -127},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 126, want: 2},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 126, want: -2},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 127, want: 1},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 127, want: -1},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: -128, want: 1},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: -128, want: -1},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: -127, want: 0},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: -127, want: 0},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: -1, want: -126},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: -1, want: 126},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 0, want: -127},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 0, want: 127},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 1, want: -128},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 1, want: -128},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 126, want: 3},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 126, want: -3},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 127, want: 2},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 127, want: -2},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: -128, want: 127},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: -128, want: -127},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: -127, want: 126},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: -127, want: -126},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: -1, want: 0},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: -1, want: 0},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 0, want: -1},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 0, want: 1},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 1, want: -2},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 1, want: 2},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 126, want: -127},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 126, want: 127},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 127, want: -128},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 127, want: -128},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: -128, want: -128},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: -128, want: -128},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: -127, want: 127},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: -127, want: -127},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: -1, want: 1},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: -1, want: -1},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 0, want: 0},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 0, want: 0},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 1, want: -1},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 1, want: 1},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 126, want: -126},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 126, want: 126},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 127, want: -127},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 127, want: 127},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: -128, want: -127},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: -128, want: 127},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: -127, want: -128},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: -127, want: -128},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: -1, want: 2},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: -1, want: -2},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 0, want: 1},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 0, want: -1},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 1, want: 0},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 1, want: 0},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 126, want: -125},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 126, want: 125},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 127, want: -126},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 127, want: 126},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: -128, want: -2},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: -128, want: 2},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: -127, want: -3},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: -127, want: 3},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: -1, want: 127},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: -1, want: -127},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 0, want: 126},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 0, want: -126},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 1, want: 125},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 1, want: -125},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 126, want: 0},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 126, want: 0},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 127, want: -1},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 127, want: 1},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: -128, want: -1},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: -128, want: 1},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: -127, want: -2},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: -127, want: 2},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: -1, want: -128},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: -1, want: -128},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 0, want: 127},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 0, want: -127},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 1, want: 126},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 1, want: -126},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 126, want: 1},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 126, want: -1},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 127, want: 0},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 127, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: -128, want: 1},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: -128, want: 1},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: -127, want: 1},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: -127, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: -1, want: -128},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: -1, want: 0},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 0, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: 1, want: -128},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 1, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: 126, want: -1},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 126, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 127, want: 0},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: -128, want: 1},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: -127, want: 1},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: -127, want: 1},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: -1, want: 127},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: -1, want: 0},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 0, want: 0},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: 1, want: -127},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 1, want: 0},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 126, want: 0},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: 127, want: -1},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 127, want: -1},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: -128, want: -128},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: -127, want: 0},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: -127, want: 127},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: -1, want: 1},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: -1, want: 1},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 0, want: 0},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: 1, want: -1},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 1, want: -1},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: 126, want: 0},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 126, want: -126},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: 127, want: 0},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 127, want: -127},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: -128, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: -127, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: -1, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: 1, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: 126, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: 127, want: 0},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: -128, want: -128},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: -127, want: 0},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: -127, want: -127},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: -1, want: -1},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: -1, want: -1},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 0, want: 0},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: 1, want: 1},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 1, want: 1},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: 126, want: 0},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 126, want: 126},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: 127, want: 0},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 127, want: 127},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: -128, want: -1},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: -127, want: 0},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: -127, want: -1},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: -1, want: -126},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: -1, want: 0},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 0, want: 0},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: 1, want: 126},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 1, want: 0},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: 126, want: 1},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 126, want: 1},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: 127, want: 0},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 127, want: 1},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: -128, want: -1},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: -127, want: -1},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: -127, want: -1},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: -1, want: -127},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: -1, want: 0},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 0, want: 0},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: 1, want: 127},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 1, want: 0},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: 126, want: 1},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 126, want: 0},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: 127, want: 1},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 127, want: 1},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: -127, want: -128},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: -127, want: -128},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: -1, want: -128},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: -1, want: -128},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 0, want: 0},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 1, want: -128},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 1, want: -128},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 126, want: 0},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 126, want: 0},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 127, want: -128},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 127, want: -128},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: -128, want: -128},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: -128, want: -128},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: -127, want: 1},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: -127, want: 1},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: -1, want: 127},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: -1, want: 127},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 0, want: 0},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 1, want: -127},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 1, want: -127},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 126, want: 126},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 126, want: 126},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 127, want: -1},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 127, want: -1},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: -128, want: -128},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: -128, want: -128},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: -127, want: 127},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: -127, want: 127},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: -1, want: 1},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: -1, want: 1},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 0, want: 0},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 1, want: -1},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 1, want: -1},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 126, want: -126},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 126, want: -126},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 127, want: -127},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 127, want: -127},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: -128, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: -128, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: -127, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: -127, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: -1, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: -1, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 0, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 1, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 1, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 126, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 126, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 127, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 127, want: 0},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: -128, want: -128},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: -128, want: -128},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: -127, want: -127},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: -127, want: -127},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: -1, want: -1},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: -1, want: -1},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 0, want: 0},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 1, want: 1},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 1, want: 1},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 126, want: 126},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 126, want: 126},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 127, want: 127},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 127, want: 127},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: -128, want: 0},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: -128, want: 0},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: -127, want: 126},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: -127, want: 126},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: -1, want: -126},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: -1, want: -126},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 0, want: 0},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 1, want: 126},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 1, want: 126},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 126, want: 4},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 126, want: 4},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 127, want: -126},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 127, want: -126},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: -128, want: -128},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: -128, want: -128},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: -127, want: -1},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: -127, want: -1},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: -1, want: -127},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: -1, want: -127},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 0, want: 0},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 1, want: 127},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 1, want: 127},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 126, want: -126},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 126, want: -126},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 127, want: 1},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 127, want: 1},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: -127, want: -1},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: -127, want: -127},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: -1, want: -1},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 0, want: 0},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 1, want: 1},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: 126, want: -2},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 126, want: 126},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 127, want: 127},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: -128, want: -127},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: -128, want: -1},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: -127, want: 0},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: -127, want: 0},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: -1, want: -1},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 0, want: 0},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 1, want: 1},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 126, want: 126},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: 127, want: 0},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 127, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: -128, want: -1},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: -128, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: -127, want: -1},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: -127, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: -1, want: 0},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 0, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 1, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: 126, want: -1},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 126, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: 127, want: -1},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 127, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: -128, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: -127, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: -1, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: 1, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: 126, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: 127, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: -128, want: 1},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: -128, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: -127, want: 1},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: -127, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: -1, want: 0},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 0, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 1, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: 126, want: 1},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 126, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: 127, want: 1},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 127, want: 0},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: -128, want: 126},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: -128, want: -2},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: -127, want: 126},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: -127, want: -1},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: -1, want: -1},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 0, want: 0},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 1, want: 1},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: 126, want: 0},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 126, want: 0},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: 127, want: 126},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 127, want: 1},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: -128, want: 127},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: -128, want: -1},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: -127, want: 0},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: -127, want: 0},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: -1, want: -1},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 0, want: 0},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 1, want: 1},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: 126, want: 1},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 126, want: 126},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: 127, want: 0},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 127, want: 0},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: -128, want: -128},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: -128, want: -128},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: -127, want: -128},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: -127, want: -128},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: -1, want: -128},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: -1, want: -128},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 0, want: 0},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 1, want: 0},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 1, want: 0},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 126, want: 0},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 126, want: 0},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 127, want: 0},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 127, want: 0},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: -128, want: -128},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: -128, want: -128},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: -127, want: -127},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: -127, want: -127},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: -1, want: -127},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: -1, want: -127},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 0, want: 0},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 1, want: 1},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 1, want: 1},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 126, want: 0},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 126, want: 0},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 127, want: 1},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 127, want: 1},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: -128, want: -128},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: -128, want: -128},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: -127, want: -127},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: -127, want: -127},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: -1, want: -1},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: -1, want: -1},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 0, want: 0},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 1, want: 1},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 1, want: 1},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 126, want: 126},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 126, want: 126},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 127, want: 127},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 127, want: 127},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: -128, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: -128, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: -127, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: -127, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: -1, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: -1, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 0, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 1, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 1, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 126, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 126, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 127, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 127, want: 0},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: -128, want: 0},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: -128, want: 0},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: -127, want: 1},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: -127, want: 1},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: -1, want: 1},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: -1, want: 1},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 0, want: 0},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 1, want: 1},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 1, want: 1},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 126, want: 0},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 126, want: 0},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 127, want: 1},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 127, want: 1},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: -128, want: 0},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: -128, want: 0},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: -127, want: 0},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: -127, want: 0},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: -1, want: 126},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: -1, want: 126},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 0, want: 0},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 1, want: 0},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 1, want: 0},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 126, want: 126},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 126, want: 126},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 127, want: 126},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 127, want: 126},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: -128, want: 0},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: -128, want: 0},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: -127, want: 1},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: -127, want: 1},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: -1, want: 127},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: -1, want: 127},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 0, want: 0},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 1, want: 1},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 1, want: 1},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 126, want: 126},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 126, want: 126},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 127, want: 127},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 127, want: 127},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: -128, want: -128},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: -128, want: -128},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: -127, want: -127},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: -127, want: -127},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: -1, want: -1},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 0, want: -128},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 0, want: -128},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 1, want: -127},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 1, want: -127},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 126, want: -2},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 126, want: -2},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 127, want: -1},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: -128, want: -127},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: -128, want: -127},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: -127, want: -127},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: -127, want: -127},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: -1, want: -1},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 0, want: -127},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 0, want: -127},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 1, want: -127},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 1, want: -127},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 126, want: -1},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 127, want: -1},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 127, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: -128, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: -128, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: -127, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: -127, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: -1, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 0, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 0, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 1, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 1, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 126, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 126, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 127, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 127, want: -1},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: -128, want: -128},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: -128, want: -128},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: -127, want: -127},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: -127, want: -127},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: -1, want: -1},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 0, want: 0},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 0, want: 0},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 1, want: 1},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 1, want: 1},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 126, want: 126},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 126, want: 126},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 127, want: 127},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 127, want: 127},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: -128, want: -127},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: -128, want: -127},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: -127, want: -127},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: -127, want: -127},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: -1, want: -1},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 0, want: 1},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 0, want: 1},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 1, want: 1},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 1, want: 1},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 126, want: 127},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 126, want: 127},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 127, want: 127},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 127, want: 127},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: -128, want: -2},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: -128, want: -2},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: -127, want: -1},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: -127, want: -1},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: -1, want: -1},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 0, want: 126},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 0, want: 126},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 1, want: 127},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 1, want: 127},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 126, want: 126},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 126, want: 126},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 127, want: 127},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 127, want: 127},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: -128, want: -1},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: -128, want: -1},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: -127, want: -1},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: -127, want: -1},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: -1, want: -1},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 0, want: 127},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 0, want: 127},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 1, want: 127},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 1, want: 127},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 126, want: 127},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 126, want: 127},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 127, want: 127},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 127, want: 127},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: -127, want: 1},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: -127, want: 1},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: -1, want: 127},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: -1, want: 127},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 0, want: -128},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 0, want: -128},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 1, want: -127},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 1, want: -127},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 126, want: -2},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 126, want: -2},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 127, want: -1},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: -128, want: 1},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: -128, want: 1},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: -127, want: 0},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: -127, want: 0},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: -1, want: 126},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: -1, want: 126},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 0, want: -127},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 0, want: -127},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 1, want: -128},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 1, want: -128},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 126, want: -1},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 127, want: -2},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 127, want: -2},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: -128, want: 127},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: -128, want: 127},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: -127, want: 126},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: -127, want: 126},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: -1, want: 0},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: -1, want: 0},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 0, want: -1},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 0, want: -1},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 1, want: -2},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 1, want: -2},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 126, want: -127},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 126, want: -127},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 127, want: -128},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 127, want: -128},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: -128, want: -128},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: -128, want: -128},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: -127, want: -127},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: -127, want: -127},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: -1, want: -1},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: -1, want: -1},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 0, want: 0},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 0, want: 0},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 1, want: 1},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 1, want: 1},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 126, want: 126},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 126, want: 126},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 127, want: 127},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 127, want: 127},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: -128, want: -127},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: -128, want: -127},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: -127, want: -128},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: -127, want: -128},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: -1, want: -2},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: -1, want: -2},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 0, want: 1},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 0, want: 1},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 1, want: 0},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 1, want: 0},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 126, want: 127},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 126, want: 127},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 127, want: 126},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 127, want: 126},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: -128, want: -2},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: -128, want: -2},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: -127, want: -1},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: -127, want: -1},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: -1, want: -127},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: -1, want: -127},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 0, want: 126},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 0, want: 126},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 1, want: 127},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 1, want: 127},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 126, want: 0},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 126, want: 0},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 127, want: 1},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 127, want: 1},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: -128, want: -1},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: -128, want: -1},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: -127, want: -2},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: -127, want: -2},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: -1, want: -128},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: -1, want: -128},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 0, want: 127},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 0, want: 127},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 1, want: 126},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 1, want: 126},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 126, want: 1},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 126, want: 1},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 127, want: 0},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 127, want: 0}}
+
+// TestArithmeticConst tests results for arithmetic operations against constants.
+func TestArithmeticConst(t *testing.T) {
+ for _, test := range tests_uint64 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint64mul {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int64 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int64mul {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint32 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint32mul {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int32 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int32mul {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint16 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int16 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint8 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int8 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+
+}
diff --git a/src/cmd/compile/internal/gc/testdata/arith_test.go b/src/cmd/compile/internal/gc/testdata/arith_test.go
new file mode 100644
index 0000000..158fedc
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/arith_test.go
@@ -0,0 +1,1454 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests arithmetic expressions
+
+package main
+
+import (
+ "math"
+ "runtime"
+ "testing"
+)
+
+const (
+ y = 0x0fffFFFF
+)
+
+var (
+ g8 int8
+ g16 int16
+ g32 int32
+ g64 int64
+)
+
+//go:noinline
+func lshNop1(x uint64) uint64 {
+ // two outer shifts should be removed
+ return (((x << 5) >> 2) << 2)
+}
+
+//go:noinline
+func lshNop2(x uint64) uint64 {
+ return (((x << 5) >> 2) << 3)
+}
+
+//go:noinline
+func lshNop3(x uint64) uint64 {
+ return (((x << 5) >> 2) << 6)
+}
+
+//go:noinline
+func lshNotNop(x uint64) uint64 {
+ // outer shift can't be removed
+ return (((x << 5) >> 2) << 1)
+}
+
+//go:noinline
+func rshNop1(x uint64) uint64 {
+ return (((x >> 5) << 2) >> 2)
+}
+
+//go:noinline
+func rshNop2(x uint64) uint64 {
+ return (((x >> 5) << 2) >> 3)
+}
+
+//go:noinline
+func rshNop3(x uint64) uint64 {
+ return (((x >> 5) << 2) >> 6)
+}
+
+//go:noinline
+func rshNotNop(x uint64) uint64 {
+ return (((x >> 5) << 2) >> 1)
+}
+
+func testShiftRemoval(t *testing.T) {
+ allSet := ^uint64(0)
+ if want, got := uint64(0x7ffffffffffffff), rshNop1(allSet); want != got {
+ t.Errorf("testShiftRemoval rshNop1 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0x3ffffffffffffff), rshNop2(allSet); want != got {
+ t.Errorf("testShiftRemoval rshNop2 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0x7fffffffffffff), rshNop3(allSet); want != got {
+ t.Errorf("testShiftRemoval rshNop3 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0xffffffffffffffe), rshNotNop(allSet); want != got {
+ t.Errorf("testShiftRemoval rshNotNop failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0xffffffffffffffe0), lshNop1(allSet); want != got {
+ t.Errorf("testShiftRemoval lshNop1 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0xffffffffffffffc0), lshNop2(allSet); want != got {
+ t.Errorf("testShiftRemoval lshNop2 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0xfffffffffffffe00), lshNop3(allSet); want != got {
+ t.Errorf("testShiftRemoval lshNop3 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0x7ffffffffffffff0), lshNotNop(allSet); want != got {
+ t.Errorf("testShiftRemoval lshNotNop failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func parseLE64(b []byte) uint64 {
+ // skip the first two bytes, and parse the remaining 8 as a uint64
+ return uint64(b[2]) | uint64(b[3])<<8 | uint64(b[4])<<16 | uint64(b[5])<<24 |
+ uint64(b[6])<<32 | uint64(b[7])<<40 | uint64(b[8])<<48 | uint64(b[9])<<56
+}
+
+//go:noinline
+func parseLE32(b []byte) uint32 {
+ return uint32(b[2]) | uint32(b[3])<<8 | uint32(b[4])<<16 | uint32(b[5])<<24
+}
+
+//go:noinline
+func parseLE16(b []byte) uint16 {
+ return uint16(b[2]) | uint16(b[3])<<8
+}
+
+// testLoadCombine tests for issue #14694 where load combining didn't respect the pointer offset.
+func testLoadCombine(t *testing.T) {
+ testData := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09}
+ if want, got := uint64(0x0908070605040302), parseLE64(testData); want != got {
+ t.Errorf("testLoadCombine failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(0x05040302), parseLE32(testData); want != got {
+ t.Errorf("testLoadCombine failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint16(0x0302), parseLE16(testData); want != got {
+ t.Errorf("testLoadCombine failed, wanted %d got %d", want, got)
+ }
+}
+
+var loadSymData = [...]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}
+
+func testLoadSymCombine(t *testing.T) {
+ w2 := uint16(0x0201)
+ g2 := uint16(loadSymData[0]) | uint16(loadSymData[1])<<8
+ if g2 != w2 {
+ t.Errorf("testLoadSymCombine failed, wanted %d got %d", w2, g2)
+ }
+ w4 := uint32(0x04030201)
+ g4 := uint32(loadSymData[0]) | uint32(loadSymData[1])<<8 |
+ uint32(loadSymData[2])<<16 | uint32(loadSymData[3])<<24
+ if g4 != w4 {
+ t.Errorf("testLoadSymCombine failed, wanted %d got %d", w4, g4)
+ }
+ w8 := uint64(0x0807060504030201)
+ g8 := uint64(loadSymData[0]) | uint64(loadSymData[1])<<8 |
+ uint64(loadSymData[2])<<16 | uint64(loadSymData[3])<<24 |
+ uint64(loadSymData[4])<<32 | uint64(loadSymData[5])<<40 |
+ uint64(loadSymData[6])<<48 | uint64(loadSymData[7])<<56
+ if g8 != w8 {
+ t.Errorf("testLoadSymCombine failed, wanted %d got %d", w8, g8)
+ }
+}
+
+//go:noinline
+func invalidAdd_ssa(x uint32) uint32 {
+ return x + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y
+}
+
+//go:noinline
+func invalidSub_ssa(x uint32) uint32 {
+ return x - y - y - y - y - y - y - y - y - y - y - y - y - y - y - y - y - y
+}
+
+//go:noinline
+func invalidMul_ssa(x uint32) uint32 {
+ return x * y * y * y * y * y * y * y * y * y * y * y * y * y * y * y * y * y
+}
+
+// testLargeConst tests a situation where larger than 32 bit consts were passed to ADDL
+// causing an invalid instruction error.
+func testLargeConst(t *testing.T) {
+ if want, got := uint32(268435440), invalidAdd_ssa(1); want != got {
+ t.Errorf("testLargeConst add failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(4026531858), invalidSub_ssa(1); want != got {
+ t.Errorf("testLargeConst sub failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(268435455), invalidMul_ssa(1); want != got {
+ t.Errorf("testLargeConst mul failed, wanted %d got %d", want, got)
+ }
+}
+
+// testArithRshConst ensures that "const >> const" right shifts correctly perform
+// sign extension on the lhs constant
+func testArithRshConst(t *testing.T) {
+ wantu := uint64(0x4000000000000000)
+ if got := arithRshuConst_ssa(); got != wantu {
+ t.Errorf("arithRshuConst failed, wanted %d got %d", wantu, got)
+ }
+
+ wants := int64(-0x4000000000000000)
+ if got := arithRshConst_ssa(); got != wants {
+ t.Errorf("arithRshConst failed, wanted %d got %d", wants, got)
+ }
+}
+
+//go:noinline
+func arithRshuConst_ssa() uint64 {
+ y := uint64(0x8000000000000001)
+ z := uint64(1)
+ return uint64(y >> z)
+}
+
+//go:noinline
+func arithRshConst_ssa() int64 {
+ y := int64(-0x8000000000000000)
+ z := uint64(1)
+ return int64(y >> z)
+}
+
+//go:noinline
+func arithConstShift_ssa(x int64) int64 {
+ return x >> 100
+}
+
+// testArithConstShift tests that right shift by large constants preserve
+// the sign of the input.
+func testArithConstShift(t *testing.T) {
+ want := int64(-1)
+ if got := arithConstShift_ssa(-1); want != got {
+ t.Errorf("arithConstShift_ssa(-1) failed, wanted %d got %d", want, got)
+ }
+ want = 0
+ if got := arithConstShift_ssa(1); want != got {
+ t.Errorf("arithConstShift_ssa(1) failed, wanted %d got %d", want, got)
+ }
+}
+
+// overflowConstShift_ssa verifes that constant folding for shift
+// doesn't wrap (i.e. x << MAX_INT << 1 doesn't get folded to x << 0).
+//go:noinline
+func overflowConstShift64_ssa(x int64) int64 {
+ return x << uint64(0xffffffffffffffff) << uint64(1)
+}
+
+//go:noinline
+func overflowConstShift32_ssa(x int64) int32 {
+ return int32(x) << uint32(0xffffffff) << uint32(1)
+}
+
+//go:noinline
+func overflowConstShift16_ssa(x int64) int16 {
+ return int16(x) << uint16(0xffff) << uint16(1)
+}
+
+//go:noinline
+func overflowConstShift8_ssa(x int64) int8 {
+ return int8(x) << uint8(0xff) << uint8(1)
+}
+
+func testOverflowConstShift(t *testing.T) {
+ want := int64(0)
+ for x := int64(-127); x < int64(127); x++ {
+ got := overflowConstShift64_ssa(x)
+ if want != got {
+ t.Errorf("overflowShift64 failed, wanted %d got %d", want, got)
+ }
+ got = int64(overflowConstShift32_ssa(x))
+ if want != got {
+ t.Errorf("overflowShift32 failed, wanted %d got %d", want, got)
+ }
+ got = int64(overflowConstShift16_ssa(x))
+ if want != got {
+ t.Errorf("overflowShift16 failed, wanted %d got %d", want, got)
+ }
+ got = int64(overflowConstShift8_ssa(x))
+ if want != got {
+ t.Errorf("overflowShift8 failed, wanted %d got %d", want, got)
+ }
+ }
+}
+
+// test64BitConstMult tests that rewrite rules don't fold 64 bit constants
+// into multiply instructions.
+func test64BitConstMult(t *testing.T) {
+ want := int64(103079215109)
+ if got := test64BitConstMult_ssa(1, 2); want != got {
+ t.Errorf("test64BitConstMult failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func test64BitConstMult_ssa(a, b int64) int64 {
+ return 34359738369*a + b*34359738370
+}
+
+// test64BitConstAdd tests that rewrite rules don't fold 64 bit constants
+// into add instructions.
+func test64BitConstAdd(t *testing.T) {
+ want := int64(3567671782835376650)
+ if got := test64BitConstAdd_ssa(1, 2); want != got {
+ t.Errorf("test64BitConstAdd failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func test64BitConstAdd_ssa(a, b int64) int64 {
+ return a + 575815584948629622 + b + 2991856197886747025
+}
+
+// testRegallocCVSpill tests that regalloc spills a value whose last use is the
+// current value.
+func testRegallocCVSpill(t *testing.T) {
+ want := int8(-9)
+ if got := testRegallocCVSpill_ssa(1, 2, 3, 4); want != got {
+ t.Errorf("testRegallocCVSpill failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func testRegallocCVSpill_ssa(a, b, c, d int8) int8 {
+ return a + -32 + b + 63*c*-87*d
+}
+
+func testBitwiseLogic(t *testing.T) {
+ a, b := uint32(57623283), uint32(1314713839)
+ if want, got := uint32(38551779), testBitwiseAnd_ssa(a, b); want != got {
+ t.Errorf("testBitwiseAnd failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(1333785343), testBitwiseOr_ssa(a, b); want != got {
+ t.Errorf("testBitwiseOr failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(1295233564), testBitwiseXor_ssa(a, b); want != got {
+ t.Errorf("testBitwiseXor failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(832), testBitwiseLsh_ssa(13, 4, 2); want != got {
+ t.Errorf("testBitwiseLsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(0), testBitwiseLsh_ssa(13, 25, 15); want != got {
+ t.Errorf("testBitwiseLsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(0), testBitwiseLsh_ssa(-13, 25, 15); want != got {
+ t.Errorf("testBitwiseLsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(-13), testBitwiseRsh_ssa(-832, 4, 2); want != got {
+ t.Errorf("testBitwiseRsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(0), testBitwiseRsh_ssa(13, 25, 15); want != got {
+ t.Errorf("testBitwiseRsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(-1), testBitwiseRsh_ssa(-13, 25, 15); want != got {
+ t.Errorf("testBitwiseRsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(0x3ffffff), testBitwiseRshU_ssa(0xffffffff, 4, 2); want != got {
+ t.Errorf("testBitwiseRshU failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(0), testBitwiseRshU_ssa(13, 25, 15); want != got {
+ t.Errorf("testBitwiseRshU failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(0), testBitwiseRshU_ssa(0x8aaaaaaa, 25, 15); want != got {
+ t.Errorf("testBitwiseRshU failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func testBitwiseAnd_ssa(a, b uint32) uint32 {
+ return a & b
+}
+
+//go:noinline
+func testBitwiseOr_ssa(a, b uint32) uint32 {
+ return a | b
+}
+
+//go:noinline
+func testBitwiseXor_ssa(a, b uint32) uint32 {
+ return a ^ b
+}
+
+//go:noinline
+func testBitwiseLsh_ssa(a int32, b, c uint32) int32 {
+ return a << b << c
+}
+
+//go:noinline
+func testBitwiseRsh_ssa(a int32, b, c uint32) int32 {
+ return a >> b >> c
+}
+
+//go:noinline
+func testBitwiseRshU_ssa(a uint32, b, c uint32) uint32 {
+ return a >> b >> c
+}
+
+//go:noinline
+func testShiftCX_ssa() int {
+ v1 := uint8(3)
+ v4 := (v1 * v1) ^ v1 | v1 - v1 - v1&v1 ^ uint8(3+2) + v1*1>>0 - v1 | 1 | v1<<(2*3|0-0*0^1)
+ v5 := v4>>(3-0-uint(3)) | v1 | v1 + v1 ^ v4<<(0+1|3&1)<<(uint64(1)<<0*2*0<<0) ^ v1
+ v6 := v5 ^ (v1+v1)*v1 | v1 | v1*v1>>(v1&v1)>>(uint(1)<<0*uint(3)>>1)*v1<<2*v1<<v1 - v1>>2 | (v4 - v1) ^ v1 + v1 ^ v1>>1 | v1 + v1 - v1 ^ v1
+ v7 := v6 & v5 << 0
+ v1++
+ v11 := 2&1 ^ 0 + 3 | int(0^0)<<1>>(1*0*3) ^ 0*0 ^ 3&0*3&3 ^ 3*3 ^ 1 ^ int(2)<<(2*3) + 2 | 2 | 2 ^ 2 + 1 | 3 | 0 ^ int(1)>>1 ^ 2 // int
+ v7--
+ return int(uint64(2*1)<<(3-2)<<uint(3>>v7)-2)&v11 | v11 - int(2)<<0>>(2-1)*(v11*0&v11<<1<<(uint8(2)+v4))
+}
+
+func testShiftCX(t *testing.T) {
+ want := 141
+ if got := testShiftCX_ssa(); want != got {
+ t.Errorf("testShiftCX failed, wanted %d got %d", want, got)
+ }
+}
+
+// testSubqToNegq ensures that the SUBQ -> NEGQ translation works correctly.
+func testSubqToNegq(t *testing.T) {
+ want := int64(-318294940372190156)
+ if got := testSubqToNegq_ssa(1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2); want != got {
+ t.Errorf("testSubqToNegq failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func testSubqToNegq_ssa(a, b, c, d, e, f, g, h, i, j, k int64) int64 {
+ return a + 8207351403619448057 - b - 1779494519303207690 + c*8810076340510052032*d - 4465874067674546219 - e*4361839741470334295 - f + 8688847565426072650*g*8065564729145417479
+}
+
+func testOcom(t *testing.T) {
+ want1, want2 := int32(0x55555555), int32(-0x55555556)
+ if got1, got2 := testOcom_ssa(0x55555555, 0x55555555); want1 != got1 || want2 != got2 {
+ t.Errorf("testOcom failed, wanted %d and %d got %d and %d", want1, want2, got1, got2)
+ }
+}
+
+//go:noinline
+func testOcom_ssa(a, b int32) (int32, int32) {
+ return ^^^^a, ^^^^^b
+}
+
+func lrot1_ssa(w uint8, x uint16, y uint32, z uint64) (a uint8, b uint16, c uint32, d uint64) {
+ a = (w << 5) | (w >> 3)
+ b = (x << 13) | (x >> 3)
+ c = (y << 29) | (y >> 3)
+ d = (z << 61) | (z >> 3)
+ return
+}
+
+//go:noinline
+func lrot2_ssa(w, n uint32) uint32 {
+ // Want to be sure that a "rotate by 32" which
+ // is really 0 | (w >> 0) == w
+ // is correctly compiled.
+ return (w << n) | (w >> (32 - n))
+}
+
+//go:noinline
+func lrot3_ssa(w uint32) uint32 {
+ // Want to be sure that a "rotate by 32" which
+ // is really 0 | (w >> 0) == w
+ // is correctly compiled.
+ return (w << 32) | (w >> (32 - 32))
+}
+
+func testLrot(t *testing.T) {
+ wantA, wantB, wantC, wantD := uint8(0xe1), uint16(0xe001),
+ uint32(0xe0000001), uint64(0xe000000000000001)
+ a, b, c, d := lrot1_ssa(0xf, 0xf, 0xf, 0xf)
+ if a != wantA || b != wantB || c != wantC || d != wantD {
+ t.Errorf("lrot1_ssa(0xf, 0xf, 0xf, 0xf)=%d %d %d %d, got %d %d %d %d", wantA, wantB, wantC, wantD, a, b, c, d)
+ }
+ x := lrot2_ssa(0xb0000001, 32)
+ wantX := uint32(0xb0000001)
+ if x != wantX {
+ t.Errorf("lrot2_ssa(0xb0000001, 32)=%d, got %d", wantX, x)
+ }
+ x = lrot3_ssa(0xb0000001)
+ if x != wantX {
+ t.Errorf("lrot3_ssa(0xb0000001)=%d, got %d", wantX, x)
+ }
+
+}
+
+//go:noinline
+func sub1_ssa() uint64 {
+ v1 := uint64(3) // uint64
+ return v1*v1 - (v1&v1)&v1
+}
+
+//go:noinline
+func sub2_ssa() uint8 {
+ v1 := uint8(0)
+ v3 := v1 + v1 + v1 ^ v1 | 3 + v1 ^ v1 | v1 ^ v1
+ v1-- // dev.ssa doesn't see this one
+ return v1 ^ v1*v1 - v3
+}
+
+func testSubConst(t *testing.T) {
+ x1 := sub1_ssa()
+ want1 := uint64(6)
+ if x1 != want1 {
+ t.Errorf("sub1_ssa()=%d, got %d", want1, x1)
+ }
+ x2 := sub2_ssa()
+ want2 := uint8(251)
+ if x2 != want2 {
+ t.Errorf("sub2_ssa()=%d, got %d", want2, x2)
+ }
+}
+
+//go:noinline
+func orPhi_ssa(a bool, x int) int {
+ v := 0
+ if a {
+ v = -1
+ } else {
+ v = -1
+ }
+ return x | v
+}
+
+func testOrPhi(t *testing.T) {
+ if want, got := -1, orPhi_ssa(true, 4); got != want {
+ t.Errorf("orPhi_ssa(true, 4)=%d, want %d", got, want)
+ }
+ if want, got := -1, orPhi_ssa(false, 0); got != want {
+ t.Errorf("orPhi_ssa(false, 0)=%d, want %d", got, want)
+ }
+}
+
+//go:noinline
+func addshiftLL_ssa(a, b uint32) uint32 {
+ return a + b<<3
+}
+
+//go:noinline
+func subshiftLL_ssa(a, b uint32) uint32 {
+ return a - b<<3
+}
+
+//go:noinline
+func rsbshiftLL_ssa(a, b uint32) uint32 {
+ return a<<3 - b
+}
+
+//go:noinline
+func andshiftLL_ssa(a, b uint32) uint32 {
+ return a & (b << 3)
+}
+
+//go:noinline
+func orshiftLL_ssa(a, b uint32) uint32 {
+ return a | b<<3
+}
+
+//go:noinline
+func xorshiftLL_ssa(a, b uint32) uint32 {
+ return a ^ b<<3
+}
+
+//go:noinline
+func bicshiftLL_ssa(a, b uint32) uint32 {
+ return a &^ (b << 3)
+}
+
+//go:noinline
+func notshiftLL_ssa(a uint32) uint32 {
+ return ^(a << 3)
+}
+
+//go:noinline
+func addshiftRL_ssa(a, b uint32) uint32 {
+ return a + b>>3
+}
+
+//go:noinline
+func subshiftRL_ssa(a, b uint32) uint32 {
+ return a - b>>3
+}
+
+//go:noinline
+func rsbshiftRL_ssa(a, b uint32) uint32 {
+ return a>>3 - b
+}
+
+//go:noinline
+func andshiftRL_ssa(a, b uint32) uint32 {
+ return a & (b >> 3)
+}
+
+//go:noinline
+func orshiftRL_ssa(a, b uint32) uint32 {
+ return a | b>>3
+}
+
+//go:noinline
+func xorshiftRL_ssa(a, b uint32) uint32 {
+ return a ^ b>>3
+}
+
+//go:noinline
+func bicshiftRL_ssa(a, b uint32) uint32 {
+ return a &^ (b >> 3)
+}
+
+//go:noinline
+func notshiftRL_ssa(a uint32) uint32 {
+ return ^(a >> 3)
+}
+
+//go:noinline
+func addshiftRA_ssa(a, b int32) int32 {
+ return a + b>>3
+}
+
+//go:noinline
+func subshiftRA_ssa(a, b int32) int32 {
+ return a - b>>3
+}
+
+//go:noinline
+func rsbshiftRA_ssa(a, b int32) int32 {
+ return a>>3 - b
+}
+
+//go:noinline
+func andshiftRA_ssa(a, b int32) int32 {
+ return a & (b >> 3)
+}
+
+//go:noinline
+func orshiftRA_ssa(a, b int32) int32 {
+ return a | b>>3
+}
+
+//go:noinline
+func xorshiftRA_ssa(a, b int32) int32 {
+ return a ^ b>>3
+}
+
+//go:noinline
+func bicshiftRA_ssa(a, b int32) int32 {
+ return a &^ (b >> 3)
+}
+
+//go:noinline
+func notshiftRA_ssa(a int32) int32 {
+ return ^(a >> 3)
+}
+
+//go:noinline
+func addshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a + b<<s
+}
+
+//go:noinline
+func subshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a - b<<s
+}
+
+//go:noinline
+func rsbshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a<<s - b
+}
+
+//go:noinline
+func andshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a & (b << s)
+}
+
+//go:noinline
+func orshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a | b<<s
+}
+
+//go:noinline
+func xorshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a ^ b<<s
+}
+
+//go:noinline
+func bicshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a &^ (b << s)
+}
+
+//go:noinline
+func notshiftLLreg_ssa(a uint32, s uint8) uint32 {
+ return ^(a << s)
+}
+
+//go:noinline
+func addshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a + b>>s
+}
+
+//go:noinline
+func subshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a - b>>s
+}
+
+//go:noinline
+func rsbshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a>>s - b
+}
+
+//go:noinline
+func andshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a & (b >> s)
+}
+
+//go:noinline
+func orshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a | b>>s
+}
+
+//go:noinline
+func xorshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a ^ b>>s
+}
+
+//go:noinline
+func bicshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a &^ (b >> s)
+}
+
+//go:noinline
+func notshiftRLreg_ssa(a uint32, s uint8) uint32 {
+ return ^(a >> s)
+}
+
+//go:noinline
+func addshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a + b>>s
+}
+
+//go:noinline
+func subshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a - b>>s
+}
+
+//go:noinline
+func rsbshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a>>s - b
+}
+
+//go:noinline
+func andshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a & (b >> s)
+}
+
+//go:noinline
+func orshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a | b>>s
+}
+
+//go:noinline
+func xorshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a ^ b>>s
+}
+
+//go:noinline
+func bicshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a &^ (b >> s)
+}
+
+//go:noinline
+func notshiftRAreg_ssa(a int32, s uint8) int32 {
+ return ^(a >> s)
+}
+
+// test ARM shifted ops
+func testShiftedOps(t *testing.T) {
+ a, b := uint32(10), uint32(42)
+ if want, got := a+b<<3, addshiftLL_ssa(a, b); got != want {
+ t.Errorf("addshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a-b<<3, subshiftLL_ssa(a, b); got != want {
+ t.Errorf("subshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a<<3-b, rsbshiftLL_ssa(a, b); got != want {
+ t.Errorf("rsbshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a&(b<<3), andshiftLL_ssa(a, b); got != want {
+ t.Errorf("andshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a|b<<3, orshiftLL_ssa(a, b); got != want {
+ t.Errorf("orshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a^b<<3, xorshiftLL_ssa(a, b); got != want {
+ t.Errorf("xorshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a&^(b<<3), bicshiftLL_ssa(a, b); got != want {
+ t.Errorf("bicshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := ^(a << 3), notshiftLL_ssa(a); got != want {
+ t.Errorf("notshiftLL_ssa(10) = %d want %d", got, want)
+ }
+ if want, got := a+b>>3, addshiftRL_ssa(a, b); got != want {
+ t.Errorf("addshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a-b>>3, subshiftRL_ssa(a, b); got != want {
+ t.Errorf("subshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a>>3-b, rsbshiftRL_ssa(a, b); got != want {
+ t.Errorf("rsbshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a&(b>>3), andshiftRL_ssa(a, b); got != want {
+ t.Errorf("andshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a|b>>3, orshiftRL_ssa(a, b); got != want {
+ t.Errorf("orshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a^b>>3, xorshiftRL_ssa(a, b); got != want {
+ t.Errorf("xorshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a&^(b>>3), bicshiftRL_ssa(a, b); got != want {
+ t.Errorf("bicshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := ^(a >> 3), notshiftRL_ssa(a); got != want {
+ t.Errorf("notshiftRL_ssa(10) = %d want %d", got, want)
+ }
+ c, d := int32(10), int32(-42)
+ if want, got := c+d>>3, addshiftRA_ssa(c, d); got != want {
+ t.Errorf("addshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c-d>>3, subshiftRA_ssa(c, d); got != want {
+ t.Errorf("subshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c>>3-d, rsbshiftRA_ssa(c, d); got != want {
+ t.Errorf("rsbshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c&(d>>3), andshiftRA_ssa(c, d); got != want {
+ t.Errorf("andshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c|d>>3, orshiftRA_ssa(c, d); got != want {
+ t.Errorf("orshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c^d>>3, xorshiftRA_ssa(c, d); got != want {
+ t.Errorf("xorshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c&^(d>>3), bicshiftRA_ssa(c, d); got != want {
+ t.Errorf("bicshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := ^(d >> 3), notshiftRA_ssa(d); got != want {
+ t.Errorf("notshiftRA_ssa(-42) = %d want %d", got, want)
+ }
+ s := uint8(3)
+ if want, got := a+b<<s, addshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("addshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a-b<<s, subshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("subshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a<<s-b, rsbshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("rsbshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a&(b<<s), andshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("andshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a|b<<s, orshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("orshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a^b<<s, xorshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("xorshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a&^(b<<s), bicshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("bicshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := ^(a << s), notshiftLLreg_ssa(a, s); got != want {
+ t.Errorf("notshiftLLreg_ssa(10) = %d want %d", got, want)
+ }
+ if want, got := a+b>>s, addshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("addshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a-b>>s, subshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("subshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a>>s-b, rsbshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("rsbshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a&(b>>s), andshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("andshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a|b>>s, orshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("orshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a^b>>s, xorshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("xorshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a&^(b>>s), bicshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("bicshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := ^(a >> s), notshiftRLreg_ssa(a, s); got != want {
+ t.Errorf("notshiftRLreg_ssa(10) = %d want %d", got, want)
+ }
+ if want, got := c+d>>s, addshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("addshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c-d>>s, subshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("subshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c>>s-d, rsbshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("rsbshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c&(d>>s), andshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("andshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c|d>>s, orshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("orshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c^d>>s, xorshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("xorshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c&^(d>>s), bicshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("bicshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := ^(d >> s), notshiftRAreg_ssa(d, s); got != want {
+ t.Errorf("notshiftRAreg_ssa(-42, 3) = %d want %d", got, want)
+ }
+}
+
+// TestArithmetic tests that both backends have the same result for arithmetic expressions.
+func TestArithmetic(t *testing.T) {
+ test64BitConstMult(t)
+ test64BitConstAdd(t)
+ testRegallocCVSpill(t)
+ testSubqToNegq(t)
+ testBitwiseLogic(t)
+ testOcom(t)
+ testLrot(t)
+ testShiftCX(t)
+ testSubConst(t)
+ testOverflowConstShift(t)
+ testArithConstShift(t)
+ testArithRshConst(t)
+ testLargeConst(t)
+ testLoadCombine(t)
+ testLoadSymCombine(t)
+ testShiftRemoval(t)
+ testShiftedOps(t)
+ testDivFixUp(t)
+ testDivisibleSignedPow2(t)
+ testDivisibility(t)
+}
+
+// testDivFixUp ensures that signed division fix-ups are being generated.
+func testDivFixUp(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Error("testDivFixUp failed")
+ if e, ok := r.(runtime.Error); ok {
+ t.Logf("%v\n", e.Error())
+ }
+ }
+ }()
+ var w int8 = -128
+ var x int16 = -32768
+ var y int32 = -2147483648
+ var z int64 = -9223372036854775808
+
+ for i := -5; i < 0; i++ {
+ g8 = w / int8(i)
+ g16 = x / int16(i)
+ g32 = y / int32(i)
+ g64 = z / int64(i)
+ g8 = w % int8(i)
+ g16 = x % int16(i)
+ g32 = y % int32(i)
+ g64 = z % int64(i)
+ }
+}
+
+//go:noinline
+func divisible_int8_2to1(x int8) bool {
+ return x%(1<<1) == 0
+}
+
+//go:noinline
+func divisible_int8_2to2(x int8) bool {
+ return x%(1<<2) == 0
+}
+
+//go:noinline
+func divisible_int8_2to3(x int8) bool {
+ return x%(1<<3) == 0
+}
+
+//go:noinline
+func divisible_int8_2to4(x int8) bool {
+ return x%(1<<4) == 0
+}
+
+//go:noinline
+func divisible_int8_2to5(x int8) bool {
+ return x%(1<<5) == 0
+}
+
+//go:noinline
+func divisible_int8_2to6(x int8) bool {
+ return x%(1<<6) == 0
+}
+
+//go:noinline
+func divisible_int16_2to1(x int16) bool {
+ return x%(1<<1) == 0
+}
+
+//go:noinline
+func divisible_int16_2to2(x int16) bool {
+ return x%(1<<2) == 0
+}
+
+//go:noinline
+func divisible_int16_2to3(x int16) bool {
+ return x%(1<<3) == 0
+}
+
+//go:noinline
+func divisible_int16_2to4(x int16) bool {
+ return x%(1<<4) == 0
+}
+
+//go:noinline
+func divisible_int16_2to5(x int16) bool {
+ return x%(1<<5) == 0
+}
+
+//go:noinline
+func divisible_int16_2to6(x int16) bool {
+ return x%(1<<6) == 0
+}
+
+//go:noinline
+func divisible_int16_2to7(x int16) bool {
+ return x%(1<<7) == 0
+}
+
+//go:noinline
+func divisible_int16_2to8(x int16) bool {
+ return x%(1<<8) == 0
+}
+
+//go:noinline
+func divisible_int16_2to9(x int16) bool {
+ return x%(1<<9) == 0
+}
+
+//go:noinline
+func divisible_int16_2to10(x int16) bool {
+ return x%(1<<10) == 0
+}
+
+//go:noinline
+func divisible_int16_2to11(x int16) bool {
+ return x%(1<<11) == 0
+}
+
+//go:noinline
+func divisible_int16_2to12(x int16) bool {
+ return x%(1<<12) == 0
+}
+
+//go:noinline
+func divisible_int16_2to13(x int16) bool {
+ return x%(1<<13) == 0
+}
+
+//go:noinline
+func divisible_int16_2to14(x int16) bool {
+ return x%(1<<14) == 0
+}
+
+//go:noinline
+func divisible_int32_2to4(x int32) bool {
+ return x%(1<<4) == 0
+}
+
+//go:noinline
+func divisible_int32_2to15(x int32) bool {
+ return x%(1<<15) == 0
+}
+
+//go:noinline
+func divisible_int32_2to26(x int32) bool {
+ return x%(1<<26) == 0
+}
+
+//go:noinline
+func divisible_int64_2to4(x int64) bool {
+ return x%(1<<4) == 0
+}
+
+//go:noinline
+func divisible_int64_2to15(x int64) bool {
+ return x%(1<<15) == 0
+}
+
+//go:noinline
+func divisible_int64_2to26(x int64) bool {
+ return x%(1<<26) == 0
+}
+
+//go:noinline
+func divisible_int64_2to34(x int64) bool {
+ return x%(1<<34) == 0
+}
+
+//go:noinline
+func divisible_int64_2to48(x int64) bool {
+ return x%(1<<48) == 0
+}
+
+//go:noinline
+func divisible_int64_2to57(x int64) bool {
+ return x%(1<<57) == 0
+}
+
+// testDivisibleSignedPow2 confirms that x%(1<<k)==0 is rewritten correctly
+func testDivisibleSignedPow2(t *testing.T) {
+ var i int64
+ var pow2 = []int64{
+ 1,
+ 1 << 1,
+ 1 << 2,
+ 1 << 3,
+ 1 << 4,
+ 1 << 5,
+ 1 << 6,
+ 1 << 7,
+ 1 << 8,
+ 1 << 9,
+ 1 << 10,
+ 1 << 11,
+ 1 << 12,
+ 1 << 13,
+ 1 << 14,
+ }
+ // exhaustive test for int8
+ for i = math.MinInt8; i <= math.MaxInt8; i++ {
+ if want, got := int8(i)%int8(pow2[1]) == 0, divisible_int8_2to1(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to1(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[2]) == 0, divisible_int8_2to2(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to2(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[3]) == 0, divisible_int8_2to3(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to3(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[4]) == 0, divisible_int8_2to4(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to4(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[5]) == 0, divisible_int8_2to5(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to5(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[6]) == 0, divisible_int8_2to6(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to6(%d) = %v want %v", i, got, want)
+ }
+ }
+ // exhaustive test for int16
+ for i = math.MinInt16; i <= math.MaxInt16; i++ {
+ if want, got := int16(i)%int16(pow2[1]) == 0, divisible_int16_2to1(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to1(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[2]) == 0, divisible_int16_2to2(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to2(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[3]) == 0, divisible_int16_2to3(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to3(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[4]) == 0, divisible_int16_2to4(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to4(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[5]) == 0, divisible_int16_2to5(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to5(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[6]) == 0, divisible_int16_2to6(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to6(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[7]) == 0, divisible_int16_2to7(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to7(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[8]) == 0, divisible_int16_2to8(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to8(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[9]) == 0, divisible_int16_2to9(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to9(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[10]) == 0, divisible_int16_2to10(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to10(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[11]) == 0, divisible_int16_2to11(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to11(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[12]) == 0, divisible_int16_2to12(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to12(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[13]) == 0, divisible_int16_2to13(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to13(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[14]) == 0, divisible_int16_2to14(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to14(%d) = %v want %v", i, got, want)
+ }
+ }
+ // spot check for int32 and int64
+ var (
+ two4 int64 = 1 << 4
+ two15 int64 = 1 << 15
+ two26 int64 = 1 << 26
+ two34 int64 = 1 << 34
+ two48 int64 = 1 << 48
+ two57 int64 = 1 << 57
+ )
+ var xs = []int64{two4, two4 + 3, -3 * two4, -3*two4 + 1,
+ two15, two15 + 3, -3 * two15, -3*two15 + 1,
+ two26, two26 + 37, -5 * two26, -5*two26 + 2,
+ two34, two34 + 356, -7 * two34, -7*two34 + 13,
+ two48, two48 + 3000, -12 * two48, -12*two48 + 1111,
+ two57, two57 + 397654, -15 * two57, -15*two57 + 11234,
+ }
+ for _, x := range xs {
+ if int64(int32(x)) == x {
+ if want, got := int32(x)%int32(two4) == 0, divisible_int32_2to4(int32(x)); got != want {
+ t.Errorf("divisible_int32_2to4(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := int32(x)%int32(two15) == 0, divisible_int32_2to15(int32(x)); got != want {
+ t.Errorf("divisible_int32_2to15(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := int32(x)%int32(two26) == 0, divisible_int32_2to26(int32(x)); got != want {
+ t.Errorf("divisible_int32_2to26(%d) = %v want %v", x, got, want)
+ }
+ }
+ // spot check for int64
+ if want, got := x%two4 == 0, divisible_int64_2to4(x); got != want {
+ t.Errorf("divisible_int64_2to4(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two15 == 0, divisible_int64_2to15(x); got != want {
+ t.Errorf("divisible_int64_2to15(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two26 == 0, divisible_int64_2to26(x); got != want {
+ t.Errorf("divisible_int64_2to26(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two34 == 0, divisible_int64_2to34(x); got != want {
+ t.Errorf("divisible_int64_2to34(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two48 == 0, divisible_int64_2to48(x); got != want {
+ t.Errorf("divisible_int64_2to48(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two57 == 0, divisible_int64_2to57(x); got != want {
+ t.Errorf("divisible_int64_2to57(%d) = %v want %v", x, got, want)
+ }
+ }
+}
+
+func div6_uint8(n uint8) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_uint16(n uint16) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_uint32(n uint32) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_uint64(n uint64) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div19_uint8(n uint8) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_uint16(n uint16) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_uint32(n uint32) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_uint64(n uint64) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div6_int8(n int8) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_int16(n int16) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_int32(n int32) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_int64(n int64) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div19_int8(n int8) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_int16(n int16) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_int32(n int32) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_int64(n int64) bool {
+ return n%19 == 0
+}
+
+// testDivisibility confirms that rewrite rules x%c ==0 for c constant are correct.
+func testDivisibility(t *testing.T) {
+ // unsigned tests
+ // test an even and an odd divisor
+ var sixU, nineteenU uint64 = 6, 19
+ // test all inputs for uint8, uint16
+ for i := uint64(0); i <= math.MaxUint16; i++ {
+ if i <= math.MaxUint8 {
+ if want, got := uint8(i)%uint8(sixU) == 0, div6_uint8(uint8(i)); got != want {
+ t.Errorf("div6_uint8(%d) = %v want %v", i, got, want)
+ }
+ if want, got := uint8(i)%uint8(nineteenU) == 0, div19_uint8(uint8(i)); got != want {
+ t.Errorf("div6_uint19(%d) = %v want %v", i, got, want)
+ }
+ }
+ if want, got := uint16(i)%uint16(sixU) == 0, div6_uint16(uint16(i)); got != want {
+ t.Errorf("div6_uint16(%d) = %v want %v", i, got, want)
+ }
+ if want, got := uint16(i)%uint16(nineteenU) == 0, div19_uint16(uint16(i)); got != want {
+ t.Errorf("div19_uint16(%d) = %v want %v", i, got, want)
+ }
+ }
+ var maxU32, maxU64 uint64 = math.MaxUint32, math.MaxUint64
+ // spot check inputs for uint32 and uint64
+ xu := []uint64{
+ 0, 1, 2, 3, 4, 5,
+ sixU, 2 * sixU, 3 * sixU, 5 * sixU, 12345 * sixU,
+ sixU + 1, 2*sixU - 5, 3*sixU + 3, 5*sixU + 4, 12345*sixU - 2,
+ nineteenU, 2 * nineteenU, 3 * nineteenU, 5 * nineteenU, 12345 * nineteenU,
+ nineteenU + 1, 2*nineteenU - 5, 3*nineteenU + 3, 5*nineteenU + 4, 12345*nineteenU - 2,
+ maxU32, maxU32 - 1, maxU32 - 2, maxU32 - 3, maxU32 - 4,
+ maxU32 - 5, maxU32 - 6, maxU32 - 7, maxU32 - 8,
+ maxU32 - 9, maxU32 - 10, maxU32 - 11, maxU32 - 12,
+ maxU32 - 13, maxU32 - 14, maxU32 - 15, maxU32 - 16,
+ maxU32 - 17, maxU32 - 18, maxU32 - 19, maxU32 - 20,
+ maxU64, maxU64 - 1, maxU64 - 2, maxU64 - 3, maxU64 - 4,
+ maxU64 - 5, maxU64 - 6, maxU64 - 7, maxU64 - 8,
+ maxU64 - 9, maxU64 - 10, maxU64 - 11, maxU64 - 12,
+ maxU64 - 13, maxU64 - 14, maxU64 - 15, maxU64 - 16,
+ maxU64 - 17, maxU64 - 18, maxU64 - 19, maxU64 - 20,
+ }
+ for _, x := range xu {
+ if x <= maxU32 {
+ if want, got := uint32(x)%uint32(sixU) == 0, div6_uint32(uint32(x)); got != want {
+ t.Errorf("div6_uint32(%d) = %v want %v", x, got, want)
+ }
+ if want, got := uint32(x)%uint32(nineteenU) == 0, div19_uint32(uint32(x)); got != want {
+ t.Errorf("div19_uint32(%d) = %v want %v", x, got, want)
+ }
+ }
+ if want, got := x%sixU == 0, div6_uint64(x); got != want {
+ t.Errorf("div6_uint64(%d) = %v want %v", x, got, want)
+ }
+ if want, got := x%nineteenU == 0, div19_uint64(x); got != want {
+ t.Errorf("div19_uint64(%d) = %v want %v", x, got, want)
+ }
+ }
+
+ // signed tests
+ // test an even and an odd divisor
+ var sixS, nineteenS int64 = 6, 19
+ // test all inputs for int8, int16
+ for i := int64(math.MinInt16); i <= math.MaxInt16; i++ {
+ if math.MinInt8 <= i && i <= math.MaxInt8 {
+ if want, got := int8(i)%int8(sixS) == 0, div6_int8(int8(i)); got != want {
+ t.Errorf("div6_int8(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(nineteenS) == 0, div19_int8(int8(i)); got != want {
+ t.Errorf("div6_int19(%d) = %v want %v", i, got, want)
+ }
+ }
+ if want, got := int16(i)%int16(sixS) == 0, div6_int16(int16(i)); got != want {
+ t.Errorf("div6_int16(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(nineteenS) == 0, div19_int16(int16(i)); got != want {
+ t.Errorf("div19_int16(%d) = %v want %v", i, got, want)
+ }
+ }
+ var minI32, maxI32, minI64, maxI64 int64 = math.MinInt32, math.MaxInt32, math.MinInt64, math.MaxInt64
+ // spot check inputs for int32 and int64
+ xs := []int64{
+ 0, 1, 2, 3, 4, 5,
+ -1, -2, -3, -4, -5,
+ sixS, 2 * sixS, 3 * sixS, 5 * sixS, 12345 * sixS,
+ sixS + 1, 2*sixS - 5, 3*sixS + 3, 5*sixS + 4, 12345*sixS - 2,
+ -sixS, -2 * sixS, -3 * sixS, -5 * sixS, -12345 * sixS,
+ -sixS + 1, -2*sixS - 5, -3*sixS + 3, -5*sixS + 4, -12345*sixS - 2,
+ nineteenS, 2 * nineteenS, 3 * nineteenS, 5 * nineteenS, 12345 * nineteenS,
+ nineteenS + 1, 2*nineteenS - 5, 3*nineteenS + 3, 5*nineteenS + 4, 12345*nineteenS - 2,
+ -nineteenS, -2 * nineteenS, -3 * nineteenS, -5 * nineteenS, -12345 * nineteenS,
+ -nineteenS + 1, -2*nineteenS - 5, -3*nineteenS + 3, -5*nineteenS + 4, -12345*nineteenS - 2,
+ minI32, minI32 + 1, minI32 + 2, minI32 + 3, minI32 + 4,
+ minI32 + 5, minI32 + 6, minI32 + 7, minI32 + 8,
+ minI32 + 9, minI32 + 10, minI32 + 11, minI32 + 12,
+ minI32 + 13, minI32 + 14, minI32 + 15, minI32 + 16,
+ minI32 + 17, minI32 + 18, minI32 + 19, minI32 + 20,
+ maxI32, maxI32 - 1, maxI32 - 2, maxI32 - 3, maxI32 - 4,
+ maxI32 - 5, maxI32 - 6, maxI32 - 7, maxI32 - 8,
+ maxI32 - 9, maxI32 - 10, maxI32 - 11, maxI32 - 12,
+ maxI32 - 13, maxI32 - 14, maxI32 - 15, maxI32 - 16,
+ maxI32 - 17, maxI32 - 18, maxI32 - 19, maxI32 - 20,
+ minI64, minI64 + 1, minI64 + 2, minI64 + 3, minI64 + 4,
+ minI64 + 5, minI64 + 6, minI64 + 7, minI64 + 8,
+ minI64 + 9, minI64 + 10, minI64 + 11, minI64 + 12,
+ minI64 + 13, minI64 + 14, minI64 + 15, minI64 + 16,
+ minI64 + 17, minI64 + 18, minI64 + 19, minI64 + 20,
+ maxI64, maxI64 - 1, maxI64 - 2, maxI64 - 3, maxI64 - 4,
+ maxI64 - 5, maxI64 - 6, maxI64 - 7, maxI64 - 8,
+ maxI64 - 9, maxI64 - 10, maxI64 - 11, maxI64 - 12,
+ maxI64 - 13, maxI64 - 14, maxI64 - 15, maxI64 - 16,
+ maxI64 - 17, maxI64 - 18, maxI64 - 19, maxI64 - 20,
+ }
+ for _, x := range xs {
+ if minI32 <= x && x <= maxI32 {
+ if want, got := int32(x)%int32(sixS) == 0, div6_int32(int32(x)); got != want {
+ t.Errorf("div6_int32(%d) = %v want %v", x, got, want)
+ }
+ if want, got := int32(x)%int32(nineteenS) == 0, div19_int32(int32(x)); got != want {
+ t.Errorf("div19_int32(%d) = %v want %v", x, got, want)
+ }
+ }
+ if want, got := x%sixS == 0, div6_int64(x); got != want {
+ t.Errorf("div6_int64(%d) = %v want %v", x, got, want)
+ }
+ if want, got := x%nineteenS == 0, div19_int64(x); got != want {
+ t.Errorf("div19_int64(%d) = %v want %v", x, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/array_test.go b/src/cmd/compile/internal/gc/testdata/array_test.go
new file mode 100644
index 0000000..efa00d0
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/array_test.go
@@ -0,0 +1,132 @@
+package main
+
+import "testing"
+
+//go:noinline
+func testSliceLenCap12_ssa(a [10]int, i, j int) (int, int) {
+ b := a[i:j]
+ return len(b), cap(b)
+}
+
+//go:noinline
+func testSliceLenCap1_ssa(a [10]int, i, j int) (int, int) {
+ b := a[i:]
+ return len(b), cap(b)
+}
+
+//go:noinline
+func testSliceLenCap2_ssa(a [10]int, i, j int) (int, int) {
+ b := a[:j]
+ return len(b), cap(b)
+}
+
+func testSliceLenCap(t *testing.T) {
+ a := [10]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ tests := [...]struct {
+ fn func(a [10]int, i, j int) (int, int)
+ i, j int // slice range
+ l, c int // len, cap
+ }{
+ // -1 means the value is not used.
+ {testSliceLenCap12_ssa, 0, 0, 0, 10},
+ {testSliceLenCap12_ssa, 0, 1, 1, 10},
+ {testSliceLenCap12_ssa, 0, 10, 10, 10},
+ {testSliceLenCap12_ssa, 10, 10, 0, 0},
+ {testSliceLenCap12_ssa, 0, 5, 5, 10},
+ {testSliceLenCap12_ssa, 5, 5, 0, 5},
+ {testSliceLenCap12_ssa, 5, 10, 5, 5},
+ {testSliceLenCap1_ssa, 0, -1, 0, 10},
+ {testSliceLenCap1_ssa, 5, -1, 5, 5},
+ {testSliceLenCap1_ssa, 10, -1, 0, 0},
+ {testSliceLenCap2_ssa, -1, 0, 0, 10},
+ {testSliceLenCap2_ssa, -1, 5, 5, 10},
+ {testSliceLenCap2_ssa, -1, 10, 10, 10},
+ }
+
+ for i, test := range tests {
+ if l, c := test.fn(a, test.i, test.j); l != test.l && c != test.c {
+ t.Errorf("#%d len(a[%d:%d]), cap(a[%d:%d]) = %d %d, want %d %d", i, test.i, test.j, test.i, test.j, l, c, test.l, test.c)
+ }
+ }
+}
+
+//go:noinline
+func testSliceGetElement_ssa(a [10]int, i, j, p int) int {
+ return a[i:j][p]
+}
+
+func testSliceGetElement(t *testing.T) {
+ a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}
+ tests := [...]struct {
+ i, j, p int
+ want int // a[i:j][p]
+ }{
+ {0, 10, 2, 20},
+ {0, 5, 4, 40},
+ {5, 10, 3, 80},
+ {1, 9, 7, 80},
+ }
+
+ for i, test := range tests {
+ if got := testSliceGetElement_ssa(a, test.i, test.j, test.p); got != test.want {
+ t.Errorf("#%d a[%d:%d][%d] = %d, wanted %d", i, test.i, test.j, test.p, got, test.want)
+ }
+ }
+}
+
+//go:noinline
+func testSliceSetElement_ssa(a *[10]int, i, j, p, x int) {
+ (*a)[i:j][p] = x
+}
+
+func testSliceSetElement(t *testing.T) {
+ a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}
+ tests := [...]struct {
+ i, j, p int
+ want int // a[i:j][p]
+ }{
+ {0, 10, 2, 17},
+ {0, 5, 4, 11},
+ {5, 10, 3, 28},
+ {1, 9, 7, 99},
+ }
+
+ for i, test := range tests {
+ testSliceSetElement_ssa(&a, test.i, test.j, test.p, test.want)
+ if got := a[test.i+test.p]; got != test.want {
+ t.Errorf("#%d a[%d:%d][%d] = %d, wanted %d", i, test.i, test.j, test.p, got, test.want)
+ }
+ }
+}
+
+func testSlicePanic1(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}
+ testSliceLenCap12_ssa(a, 3, 12)
+ t.Errorf("expected to panic, but didn't")
+}
+
+func testSlicePanic2(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}
+ testSliceGetElement_ssa(a, 3, 7, 4)
+ t.Errorf("expected to panic, but didn't")
+}
+
+func TestArray(t *testing.T) {
+ testSliceLenCap(t)
+ testSliceGetElement(t)
+ testSliceSetElement(t)
+ testSlicePanic1(t)
+ testSlicePanic2(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/assert_test.go b/src/cmd/compile/internal/gc/testdata/assert_test.go
new file mode 100644
index 0000000..4326be8
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/assert_test.go
@@ -0,0 +1,128 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests type assertion expressions and statements
+
+package main
+
+import (
+ "runtime"
+ "testing"
+)
+
+type (
+ S struct{}
+ U struct{}
+
+ I interface {
+ F()
+ }
+)
+
+var (
+ s *S
+ u *U
+)
+
+func (s *S) F() {}
+func (u *U) F() {}
+
+func e2t_ssa(e interface{}) *U {
+ return e.(*U)
+}
+
+func i2t_ssa(i I) *U {
+ return i.(*U)
+}
+
+func testAssertE2TOk(t *testing.T) {
+ if got := e2t_ssa(u); got != u {
+ t.Errorf("e2t_ssa(u)=%v want %v", got, u)
+ }
+}
+
+func testAssertE2TPanic(t *testing.T) {
+ var got *U
+ defer func() {
+ if got != nil {
+ t.Errorf("e2t_ssa(s)=%v want nil", got)
+ }
+ e := recover()
+ err, ok := e.(*runtime.TypeAssertionError)
+ if !ok {
+ t.Errorf("e2t_ssa(s) panic type %T", e)
+ }
+ want := "interface conversion: interface {} is *main.S, not *main.U"
+ if err.Error() != want {
+ t.Errorf("e2t_ssa(s) wrong error, want '%s', got '%s'", want, err.Error())
+ }
+ }()
+ got = e2t_ssa(s)
+ t.Errorf("e2t_ssa(s) should panic")
+
+}
+
+func testAssertI2TOk(t *testing.T) {
+ if got := i2t_ssa(u); got != u {
+ t.Errorf("i2t_ssa(u)=%v want %v", got, u)
+ }
+}
+
+func testAssertI2TPanic(t *testing.T) {
+ var got *U
+ defer func() {
+ if got != nil {
+ t.Errorf("i2t_ssa(s)=%v want nil", got)
+ }
+ e := recover()
+ err, ok := e.(*runtime.TypeAssertionError)
+ if !ok {
+ t.Errorf("i2t_ssa(s) panic type %T", e)
+ }
+ want := "interface conversion: main.I is *main.S, not *main.U"
+ if err.Error() != want {
+ t.Errorf("i2t_ssa(s) wrong error, want '%s', got '%s'", want, err.Error())
+ }
+ }()
+ got = i2t_ssa(s)
+ t.Errorf("i2t_ssa(s) should panic")
+}
+
+func e2t2_ssa(e interface{}) (*U, bool) {
+ u, ok := e.(*U)
+ return u, ok
+}
+
+func i2t2_ssa(i I) (*U, bool) {
+ u, ok := i.(*U)
+ return u, ok
+}
+
+func testAssertE2T2(t *testing.T) {
+ if got, ok := e2t2_ssa(u); !ok || got != u {
+ t.Errorf("e2t2_ssa(u)=(%v, %v) want (%v, %v)", got, ok, u, true)
+ }
+ if got, ok := e2t2_ssa(s); ok || got != nil {
+ t.Errorf("e2t2_ssa(s)=(%v, %v) want (%v, %v)", got, ok, nil, false)
+ }
+}
+
+func testAssertI2T2(t *testing.T) {
+ if got, ok := i2t2_ssa(u); !ok || got != u {
+ t.Errorf("i2t2_ssa(u)=(%v, %v) want (%v, %v)", got, ok, u, true)
+ }
+ if got, ok := i2t2_ssa(s); ok || got != nil {
+ t.Errorf("i2t2_ssa(s)=(%v, %v) want (%v, %v)", got, ok, nil, false)
+ }
+}
+
+// TestTypeAssertion tests type assertions.
+func TestTypeAssertion(t *testing.T) {
+ testAssertE2TOk(t)
+ testAssertE2TPanic(t)
+ testAssertI2TOk(t)
+ testAssertI2TPanic(t)
+ testAssertE2T2(t)
+ testAssertI2T2(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/break_test.go b/src/cmd/compile/internal/gc/testdata/break_test.go
new file mode 100644
index 0000000..50245df
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/break_test.go
@@ -0,0 +1,250 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests continue and break.
+
+package main
+
+import "testing"
+
+func continuePlain_ssa() int {
+ var n int
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue
+ }
+ n = i
+ }
+ return n
+}
+
+func continueLabeled_ssa() int {
+ var n int
+Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue Next
+ }
+ n = i
+ }
+ return n
+}
+
+func continuePlainInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func continueLabeledInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func continueLabeledOuter_ssa() int {
+ var n int
+Next:
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func breakPlain_ssa() int {
+ var n int
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break
+ }
+ n = i
+ }
+ return n
+}
+
+func breakLabeled_ssa() int {
+ var n int
+Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break Next
+ }
+ n = i
+ }
+ return n
+}
+
+func breakPlainInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func breakLabeledInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func breakLabeledOuter_ssa() int {
+ var n int
+Next:
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+var g, h int // globals to ensure optimizations don't collapse our switch statements
+
+func switchPlain_ssa() int {
+ var n int
+ switch g {
+ case 0:
+ n = 1
+ break
+ n = 2
+ }
+ return n
+}
+
+func switchLabeled_ssa() int {
+ var n int
+Done:
+ switch g {
+ case 0:
+ n = 1
+ break Done
+ n = 2
+ }
+ return n
+}
+
+func switchPlainInner_ssa() int {
+ var n int
+ switch g {
+ case 0:
+ n = 1
+ switch h {
+ case 0:
+ n += 10
+ break
+ }
+ n = 2
+ }
+ return n
+}
+
+func switchLabeledInner_ssa() int {
+ var n int
+ switch g {
+ case 0:
+ n = 1
+ Done:
+ switch h {
+ case 0:
+ n += 10
+ break Done
+ }
+ n = 2
+ }
+ return n
+}
+
+func switchLabeledOuter_ssa() int {
+ var n int
+Done:
+ switch g {
+ case 0:
+ n = 1
+ switch h {
+ case 0:
+ n += 10
+ break Done
+ }
+ n = 2
+ }
+ return n
+}
+
+// TestBreakContinue tests that continue and break statements do what they say.
+func TestBreakContinue(t *testing.T) {
+ tests := [...]struct {
+ name string
+ fn func() int
+ want int
+ }{
+ {"continuePlain_ssa", continuePlain_ssa, 9},
+ {"continueLabeled_ssa", continueLabeled_ssa, 9},
+ {"continuePlainInner_ssa", continuePlainInner_ssa, 29},
+ {"continueLabeledInner_ssa", continueLabeledInner_ssa, 29},
+ {"continueLabeledOuter_ssa", continueLabeledOuter_ssa, 5},
+
+ {"breakPlain_ssa", breakPlain_ssa, 5},
+ {"breakLabeled_ssa", breakLabeled_ssa, 5},
+ {"breakPlainInner_ssa", breakPlainInner_ssa, 25},
+ {"breakLabeledInner_ssa", breakLabeledInner_ssa, 25},
+ {"breakLabeledOuter_ssa", breakLabeledOuter_ssa, 5},
+
+ {"switchPlain_ssa", switchPlain_ssa, 1},
+ {"switchLabeled_ssa", switchLabeled_ssa, 1},
+ {"switchPlainInner_ssa", switchPlainInner_ssa, 2},
+ {"switchLabeledInner_ssa", switchLabeledInner_ssa, 2},
+ {"switchLabeledOuter_ssa", switchLabeledOuter_ssa, 11},
+
+ // no select tests; they're identical to switch
+ }
+
+ for _, test := range tests {
+ if got := test.fn(); got != test.want {
+ t.Errorf("%s()=%d, want %d", test.name, got, test.want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/chan_test.go b/src/cmd/compile/internal/gc/testdata/chan_test.go
new file mode 100644
index 0000000..628bd8f
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/chan_test.go
@@ -0,0 +1,63 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// chan.go tests chan operations.
+package main
+
+import "testing"
+
+//go:noinline
+func lenChan_ssa(v chan int) int {
+ return len(v)
+}
+
+//go:noinline
+func capChan_ssa(v chan int) int {
+ return cap(v)
+}
+
+func testLenChan(t *testing.T) {
+
+ v := make(chan int, 10)
+ v <- 1
+ v <- 1
+ v <- 1
+
+ if want, got := 3, lenChan_ssa(v); got != want {
+ t.Errorf("expected len(chan) = %d, got %d", want, got)
+ }
+}
+
+func testLenNilChan(t *testing.T) {
+
+ var v chan int
+ if want, got := 0, lenChan_ssa(v); got != want {
+ t.Errorf("expected len(nil) = %d, got %d", want, got)
+ }
+}
+
+func testCapChan(t *testing.T) {
+
+ v := make(chan int, 25)
+
+ if want, got := 25, capChan_ssa(v); got != want {
+ t.Errorf("expected cap(chan) = %d, got %d", want, got)
+ }
+}
+
+func testCapNilChan(t *testing.T) {
+
+ var v chan int
+ if want, got := 0, capChan_ssa(v); got != want {
+ t.Errorf("expected cap(nil) = %d, got %d", want, got)
+ }
+}
+
+func TestChan(t *testing.T) {
+ testLenChan(t)
+ testLenNilChan(t)
+
+ testCapChan(t)
+ testCapNilChan(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/closure_test.go b/src/cmd/compile/internal/gc/testdata/closure_test.go
new file mode 100644
index 0000000..6cddc2d
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/closure_test.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// closure.go tests closure operations.
+package main
+
+import "testing"
+
+//go:noinline
+func testCFunc_ssa() int {
+ a := 0
+ b := func() {
+ switch {
+ }
+ a++
+ }
+ b()
+ b()
+ return a
+}
+
+func testCFunc(t *testing.T) {
+ if want, got := 2, testCFunc_ssa(); got != want {
+ t.Errorf("expected %d, got %d", want, got)
+ }
+}
+
+// TestClosure tests closure related behavior.
+func TestClosure(t *testing.T) {
+ testCFunc(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/cmpConst_test.go b/src/cmd/compile/internal/gc/testdata/cmpConst_test.go
new file mode 100644
index 0000000..9400ef4
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/cmpConst_test.go
@@ -0,0 +1,2209 @@
+// Code generated by gen/cmpConstGen.go. DO NOT EDIT.
+
+package main
+
+import (
+ "reflect"
+ "runtime"
+ "testing"
+)
+
+// results show the expected result for the elements left of, equal to and right of the index.
+type result struct{ l, e, r bool }
+
+var (
+ eq = result{l: false, e: true, r: false}
+ ne = result{l: true, e: false, r: true}
+ lt = result{l: true, e: false, r: false}
+ le = result{l: true, e: true, r: false}
+ gt = result{l: false, e: false, r: true}
+ ge = result{l: false, e: true, r: true}
+)
+
+// uint64 tests
+var uint64_vals = []uint64{
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+ 65536,
+ 2147483646,
+ 2147483647,
+ 2147483648,
+ 4278190080,
+ 4294967294,
+ 4294967295,
+ 4294967296,
+ 1095216660480,
+ 9223372036854775806,
+ 9223372036854775807,
+ 9223372036854775808,
+ 18374686479671623680,
+ 18446744073709551614,
+ 18446744073709551615,
+}
+
+func lt_0_uint64(x uint64) bool { return x < 0 }
+func le_0_uint64(x uint64) bool { return x <= 0 }
+func gt_0_uint64(x uint64) bool { return x > 0 }
+func ge_0_uint64(x uint64) bool { return x >= 0 }
+func eq_0_uint64(x uint64) bool { return x == 0 }
+func ne_0_uint64(x uint64) bool { return x != 0 }
+func lt_1_uint64(x uint64) bool { return x < 1 }
+func le_1_uint64(x uint64) bool { return x <= 1 }
+func gt_1_uint64(x uint64) bool { return x > 1 }
+func ge_1_uint64(x uint64) bool { return x >= 1 }
+func eq_1_uint64(x uint64) bool { return x == 1 }
+func ne_1_uint64(x uint64) bool { return x != 1 }
+func lt_126_uint64(x uint64) bool { return x < 126 }
+func le_126_uint64(x uint64) bool { return x <= 126 }
+func gt_126_uint64(x uint64) bool { return x > 126 }
+func ge_126_uint64(x uint64) bool { return x >= 126 }
+func eq_126_uint64(x uint64) bool { return x == 126 }
+func ne_126_uint64(x uint64) bool { return x != 126 }
+func lt_127_uint64(x uint64) bool { return x < 127 }
+func le_127_uint64(x uint64) bool { return x <= 127 }
+func gt_127_uint64(x uint64) bool { return x > 127 }
+func ge_127_uint64(x uint64) bool { return x >= 127 }
+func eq_127_uint64(x uint64) bool { return x == 127 }
+func ne_127_uint64(x uint64) bool { return x != 127 }
+func lt_128_uint64(x uint64) bool { return x < 128 }
+func le_128_uint64(x uint64) bool { return x <= 128 }
+func gt_128_uint64(x uint64) bool { return x > 128 }
+func ge_128_uint64(x uint64) bool { return x >= 128 }
+func eq_128_uint64(x uint64) bool { return x == 128 }
+func ne_128_uint64(x uint64) bool { return x != 128 }
+func lt_254_uint64(x uint64) bool { return x < 254 }
+func le_254_uint64(x uint64) bool { return x <= 254 }
+func gt_254_uint64(x uint64) bool { return x > 254 }
+func ge_254_uint64(x uint64) bool { return x >= 254 }
+func eq_254_uint64(x uint64) bool { return x == 254 }
+func ne_254_uint64(x uint64) bool { return x != 254 }
+func lt_255_uint64(x uint64) bool { return x < 255 }
+func le_255_uint64(x uint64) bool { return x <= 255 }
+func gt_255_uint64(x uint64) bool { return x > 255 }
+func ge_255_uint64(x uint64) bool { return x >= 255 }
+func eq_255_uint64(x uint64) bool { return x == 255 }
+func ne_255_uint64(x uint64) bool { return x != 255 }
+func lt_256_uint64(x uint64) bool { return x < 256 }
+func le_256_uint64(x uint64) bool { return x <= 256 }
+func gt_256_uint64(x uint64) bool { return x > 256 }
+func ge_256_uint64(x uint64) bool { return x >= 256 }
+func eq_256_uint64(x uint64) bool { return x == 256 }
+func ne_256_uint64(x uint64) bool { return x != 256 }
+func lt_32766_uint64(x uint64) bool { return x < 32766 }
+func le_32766_uint64(x uint64) bool { return x <= 32766 }
+func gt_32766_uint64(x uint64) bool { return x > 32766 }
+func ge_32766_uint64(x uint64) bool { return x >= 32766 }
+func eq_32766_uint64(x uint64) bool { return x == 32766 }
+func ne_32766_uint64(x uint64) bool { return x != 32766 }
+func lt_32767_uint64(x uint64) bool { return x < 32767 }
+func le_32767_uint64(x uint64) bool { return x <= 32767 }
+func gt_32767_uint64(x uint64) bool { return x > 32767 }
+func ge_32767_uint64(x uint64) bool { return x >= 32767 }
+func eq_32767_uint64(x uint64) bool { return x == 32767 }
+func ne_32767_uint64(x uint64) bool { return x != 32767 }
+func lt_32768_uint64(x uint64) bool { return x < 32768 }
+func le_32768_uint64(x uint64) bool { return x <= 32768 }
+func gt_32768_uint64(x uint64) bool { return x > 32768 }
+func ge_32768_uint64(x uint64) bool { return x >= 32768 }
+func eq_32768_uint64(x uint64) bool { return x == 32768 }
+func ne_32768_uint64(x uint64) bool { return x != 32768 }
+func lt_65534_uint64(x uint64) bool { return x < 65534 }
+func le_65534_uint64(x uint64) bool { return x <= 65534 }
+func gt_65534_uint64(x uint64) bool { return x > 65534 }
+func ge_65534_uint64(x uint64) bool { return x >= 65534 }
+func eq_65534_uint64(x uint64) bool { return x == 65534 }
+func ne_65534_uint64(x uint64) bool { return x != 65534 }
+func lt_65535_uint64(x uint64) bool { return x < 65535 }
+func le_65535_uint64(x uint64) bool { return x <= 65535 }
+func gt_65535_uint64(x uint64) bool { return x > 65535 }
+func ge_65535_uint64(x uint64) bool { return x >= 65535 }
+func eq_65535_uint64(x uint64) bool { return x == 65535 }
+func ne_65535_uint64(x uint64) bool { return x != 65535 }
+func lt_65536_uint64(x uint64) bool { return x < 65536 }
+func le_65536_uint64(x uint64) bool { return x <= 65536 }
+func gt_65536_uint64(x uint64) bool { return x > 65536 }
+func ge_65536_uint64(x uint64) bool { return x >= 65536 }
+func eq_65536_uint64(x uint64) bool { return x == 65536 }
+func ne_65536_uint64(x uint64) bool { return x != 65536 }
+func lt_2147483646_uint64(x uint64) bool { return x < 2147483646 }
+func le_2147483646_uint64(x uint64) bool { return x <= 2147483646 }
+func gt_2147483646_uint64(x uint64) bool { return x > 2147483646 }
+func ge_2147483646_uint64(x uint64) bool { return x >= 2147483646 }
+func eq_2147483646_uint64(x uint64) bool { return x == 2147483646 }
+func ne_2147483646_uint64(x uint64) bool { return x != 2147483646 }
+func lt_2147483647_uint64(x uint64) bool { return x < 2147483647 }
+func le_2147483647_uint64(x uint64) bool { return x <= 2147483647 }
+func gt_2147483647_uint64(x uint64) bool { return x > 2147483647 }
+func ge_2147483647_uint64(x uint64) bool { return x >= 2147483647 }
+func eq_2147483647_uint64(x uint64) bool { return x == 2147483647 }
+func ne_2147483647_uint64(x uint64) bool { return x != 2147483647 }
+func lt_2147483648_uint64(x uint64) bool { return x < 2147483648 }
+func le_2147483648_uint64(x uint64) bool { return x <= 2147483648 }
+func gt_2147483648_uint64(x uint64) bool { return x > 2147483648 }
+func ge_2147483648_uint64(x uint64) bool { return x >= 2147483648 }
+func eq_2147483648_uint64(x uint64) bool { return x == 2147483648 }
+func ne_2147483648_uint64(x uint64) bool { return x != 2147483648 }
+func lt_4278190080_uint64(x uint64) bool { return x < 4278190080 }
+func le_4278190080_uint64(x uint64) bool { return x <= 4278190080 }
+func gt_4278190080_uint64(x uint64) bool { return x > 4278190080 }
+func ge_4278190080_uint64(x uint64) bool { return x >= 4278190080 }
+func eq_4278190080_uint64(x uint64) bool { return x == 4278190080 }
+func ne_4278190080_uint64(x uint64) bool { return x != 4278190080 }
+func lt_4294967294_uint64(x uint64) bool { return x < 4294967294 }
+func le_4294967294_uint64(x uint64) bool { return x <= 4294967294 }
+func gt_4294967294_uint64(x uint64) bool { return x > 4294967294 }
+func ge_4294967294_uint64(x uint64) bool { return x >= 4294967294 }
+func eq_4294967294_uint64(x uint64) bool { return x == 4294967294 }
+func ne_4294967294_uint64(x uint64) bool { return x != 4294967294 }
+func lt_4294967295_uint64(x uint64) bool { return x < 4294967295 }
+func le_4294967295_uint64(x uint64) bool { return x <= 4294967295 }
+func gt_4294967295_uint64(x uint64) bool { return x > 4294967295 }
+func ge_4294967295_uint64(x uint64) bool { return x >= 4294967295 }
+func eq_4294967295_uint64(x uint64) bool { return x == 4294967295 }
+func ne_4294967295_uint64(x uint64) bool { return x != 4294967295 }
+func lt_4294967296_uint64(x uint64) bool { return x < 4294967296 }
+func le_4294967296_uint64(x uint64) bool { return x <= 4294967296 }
+func gt_4294967296_uint64(x uint64) bool { return x > 4294967296 }
+func ge_4294967296_uint64(x uint64) bool { return x >= 4294967296 }
+func eq_4294967296_uint64(x uint64) bool { return x == 4294967296 }
+func ne_4294967296_uint64(x uint64) bool { return x != 4294967296 }
+func lt_1095216660480_uint64(x uint64) bool { return x < 1095216660480 }
+func le_1095216660480_uint64(x uint64) bool { return x <= 1095216660480 }
+func gt_1095216660480_uint64(x uint64) bool { return x > 1095216660480 }
+func ge_1095216660480_uint64(x uint64) bool { return x >= 1095216660480 }
+func eq_1095216660480_uint64(x uint64) bool { return x == 1095216660480 }
+func ne_1095216660480_uint64(x uint64) bool { return x != 1095216660480 }
+func lt_9223372036854775806_uint64(x uint64) bool { return x < 9223372036854775806 }
+func le_9223372036854775806_uint64(x uint64) bool { return x <= 9223372036854775806 }
+func gt_9223372036854775806_uint64(x uint64) bool { return x > 9223372036854775806 }
+func ge_9223372036854775806_uint64(x uint64) bool { return x >= 9223372036854775806 }
+func eq_9223372036854775806_uint64(x uint64) bool { return x == 9223372036854775806 }
+func ne_9223372036854775806_uint64(x uint64) bool { return x != 9223372036854775806 }
+func lt_9223372036854775807_uint64(x uint64) bool { return x < 9223372036854775807 }
+func le_9223372036854775807_uint64(x uint64) bool { return x <= 9223372036854775807 }
+func gt_9223372036854775807_uint64(x uint64) bool { return x > 9223372036854775807 }
+func ge_9223372036854775807_uint64(x uint64) bool { return x >= 9223372036854775807 }
+func eq_9223372036854775807_uint64(x uint64) bool { return x == 9223372036854775807 }
+func ne_9223372036854775807_uint64(x uint64) bool { return x != 9223372036854775807 }
+func lt_9223372036854775808_uint64(x uint64) bool { return x < 9223372036854775808 }
+func le_9223372036854775808_uint64(x uint64) bool { return x <= 9223372036854775808 }
+func gt_9223372036854775808_uint64(x uint64) bool { return x > 9223372036854775808 }
+func ge_9223372036854775808_uint64(x uint64) bool { return x >= 9223372036854775808 }
+func eq_9223372036854775808_uint64(x uint64) bool { return x == 9223372036854775808 }
+func ne_9223372036854775808_uint64(x uint64) bool { return x != 9223372036854775808 }
+func lt_18374686479671623680_uint64(x uint64) bool { return x < 18374686479671623680 }
+func le_18374686479671623680_uint64(x uint64) bool { return x <= 18374686479671623680 }
+func gt_18374686479671623680_uint64(x uint64) bool { return x > 18374686479671623680 }
+func ge_18374686479671623680_uint64(x uint64) bool { return x >= 18374686479671623680 }
+func eq_18374686479671623680_uint64(x uint64) bool { return x == 18374686479671623680 }
+func ne_18374686479671623680_uint64(x uint64) bool { return x != 18374686479671623680 }
+func lt_18446744073709551614_uint64(x uint64) bool { return x < 18446744073709551614 }
+func le_18446744073709551614_uint64(x uint64) bool { return x <= 18446744073709551614 }
+func gt_18446744073709551614_uint64(x uint64) bool { return x > 18446744073709551614 }
+func ge_18446744073709551614_uint64(x uint64) bool { return x >= 18446744073709551614 }
+func eq_18446744073709551614_uint64(x uint64) bool { return x == 18446744073709551614 }
+func ne_18446744073709551614_uint64(x uint64) bool { return x != 18446744073709551614 }
+func lt_18446744073709551615_uint64(x uint64) bool { return x < 18446744073709551615 }
+func le_18446744073709551615_uint64(x uint64) bool { return x <= 18446744073709551615 }
+func gt_18446744073709551615_uint64(x uint64) bool { return x > 18446744073709551615 }
+func ge_18446744073709551615_uint64(x uint64) bool { return x >= 18446744073709551615 }
+func eq_18446744073709551615_uint64(x uint64) bool { return x == 18446744073709551615 }
+func ne_18446744073709551615_uint64(x uint64) bool { return x != 18446744073709551615 }
+
+var uint64_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(uint64) bool
+}{
+ {idx: 0, exp: lt, fn: lt_0_uint64},
+ {idx: 0, exp: le, fn: le_0_uint64},
+ {idx: 0, exp: gt, fn: gt_0_uint64},
+ {idx: 0, exp: ge, fn: ge_0_uint64},
+ {idx: 0, exp: eq, fn: eq_0_uint64},
+ {idx: 0, exp: ne, fn: ne_0_uint64},
+ {idx: 1, exp: lt, fn: lt_1_uint64},
+ {idx: 1, exp: le, fn: le_1_uint64},
+ {idx: 1, exp: gt, fn: gt_1_uint64},
+ {idx: 1, exp: ge, fn: ge_1_uint64},
+ {idx: 1, exp: eq, fn: eq_1_uint64},
+ {idx: 1, exp: ne, fn: ne_1_uint64},
+ {idx: 2, exp: lt, fn: lt_126_uint64},
+ {idx: 2, exp: le, fn: le_126_uint64},
+ {idx: 2, exp: gt, fn: gt_126_uint64},
+ {idx: 2, exp: ge, fn: ge_126_uint64},
+ {idx: 2, exp: eq, fn: eq_126_uint64},
+ {idx: 2, exp: ne, fn: ne_126_uint64},
+ {idx: 3, exp: lt, fn: lt_127_uint64},
+ {idx: 3, exp: le, fn: le_127_uint64},
+ {idx: 3, exp: gt, fn: gt_127_uint64},
+ {idx: 3, exp: ge, fn: ge_127_uint64},
+ {idx: 3, exp: eq, fn: eq_127_uint64},
+ {idx: 3, exp: ne, fn: ne_127_uint64},
+ {idx: 4, exp: lt, fn: lt_128_uint64},
+ {idx: 4, exp: le, fn: le_128_uint64},
+ {idx: 4, exp: gt, fn: gt_128_uint64},
+ {idx: 4, exp: ge, fn: ge_128_uint64},
+ {idx: 4, exp: eq, fn: eq_128_uint64},
+ {idx: 4, exp: ne, fn: ne_128_uint64},
+ {idx: 5, exp: lt, fn: lt_254_uint64},
+ {idx: 5, exp: le, fn: le_254_uint64},
+ {idx: 5, exp: gt, fn: gt_254_uint64},
+ {idx: 5, exp: ge, fn: ge_254_uint64},
+ {idx: 5, exp: eq, fn: eq_254_uint64},
+ {idx: 5, exp: ne, fn: ne_254_uint64},
+ {idx: 6, exp: lt, fn: lt_255_uint64},
+ {idx: 6, exp: le, fn: le_255_uint64},
+ {idx: 6, exp: gt, fn: gt_255_uint64},
+ {idx: 6, exp: ge, fn: ge_255_uint64},
+ {idx: 6, exp: eq, fn: eq_255_uint64},
+ {idx: 6, exp: ne, fn: ne_255_uint64},
+ {idx: 7, exp: lt, fn: lt_256_uint64},
+ {idx: 7, exp: le, fn: le_256_uint64},
+ {idx: 7, exp: gt, fn: gt_256_uint64},
+ {idx: 7, exp: ge, fn: ge_256_uint64},
+ {idx: 7, exp: eq, fn: eq_256_uint64},
+ {idx: 7, exp: ne, fn: ne_256_uint64},
+ {idx: 8, exp: lt, fn: lt_32766_uint64},
+ {idx: 8, exp: le, fn: le_32766_uint64},
+ {idx: 8, exp: gt, fn: gt_32766_uint64},
+ {idx: 8, exp: ge, fn: ge_32766_uint64},
+ {idx: 8, exp: eq, fn: eq_32766_uint64},
+ {idx: 8, exp: ne, fn: ne_32766_uint64},
+ {idx: 9, exp: lt, fn: lt_32767_uint64},
+ {idx: 9, exp: le, fn: le_32767_uint64},
+ {idx: 9, exp: gt, fn: gt_32767_uint64},
+ {idx: 9, exp: ge, fn: ge_32767_uint64},
+ {idx: 9, exp: eq, fn: eq_32767_uint64},
+ {idx: 9, exp: ne, fn: ne_32767_uint64},
+ {idx: 10, exp: lt, fn: lt_32768_uint64},
+ {idx: 10, exp: le, fn: le_32768_uint64},
+ {idx: 10, exp: gt, fn: gt_32768_uint64},
+ {idx: 10, exp: ge, fn: ge_32768_uint64},
+ {idx: 10, exp: eq, fn: eq_32768_uint64},
+ {idx: 10, exp: ne, fn: ne_32768_uint64},
+ {idx: 11, exp: lt, fn: lt_65534_uint64},
+ {idx: 11, exp: le, fn: le_65534_uint64},
+ {idx: 11, exp: gt, fn: gt_65534_uint64},
+ {idx: 11, exp: ge, fn: ge_65534_uint64},
+ {idx: 11, exp: eq, fn: eq_65534_uint64},
+ {idx: 11, exp: ne, fn: ne_65534_uint64},
+ {idx: 12, exp: lt, fn: lt_65535_uint64},
+ {idx: 12, exp: le, fn: le_65535_uint64},
+ {idx: 12, exp: gt, fn: gt_65535_uint64},
+ {idx: 12, exp: ge, fn: ge_65535_uint64},
+ {idx: 12, exp: eq, fn: eq_65535_uint64},
+ {idx: 12, exp: ne, fn: ne_65535_uint64},
+ {idx: 13, exp: lt, fn: lt_65536_uint64},
+ {idx: 13, exp: le, fn: le_65536_uint64},
+ {idx: 13, exp: gt, fn: gt_65536_uint64},
+ {idx: 13, exp: ge, fn: ge_65536_uint64},
+ {idx: 13, exp: eq, fn: eq_65536_uint64},
+ {idx: 13, exp: ne, fn: ne_65536_uint64},
+ {idx: 14, exp: lt, fn: lt_2147483646_uint64},
+ {idx: 14, exp: le, fn: le_2147483646_uint64},
+ {idx: 14, exp: gt, fn: gt_2147483646_uint64},
+ {idx: 14, exp: ge, fn: ge_2147483646_uint64},
+ {idx: 14, exp: eq, fn: eq_2147483646_uint64},
+ {idx: 14, exp: ne, fn: ne_2147483646_uint64},
+ {idx: 15, exp: lt, fn: lt_2147483647_uint64},
+ {idx: 15, exp: le, fn: le_2147483647_uint64},
+ {idx: 15, exp: gt, fn: gt_2147483647_uint64},
+ {idx: 15, exp: ge, fn: ge_2147483647_uint64},
+ {idx: 15, exp: eq, fn: eq_2147483647_uint64},
+ {idx: 15, exp: ne, fn: ne_2147483647_uint64},
+ {idx: 16, exp: lt, fn: lt_2147483648_uint64},
+ {idx: 16, exp: le, fn: le_2147483648_uint64},
+ {idx: 16, exp: gt, fn: gt_2147483648_uint64},
+ {idx: 16, exp: ge, fn: ge_2147483648_uint64},
+ {idx: 16, exp: eq, fn: eq_2147483648_uint64},
+ {idx: 16, exp: ne, fn: ne_2147483648_uint64},
+ {idx: 17, exp: lt, fn: lt_4278190080_uint64},
+ {idx: 17, exp: le, fn: le_4278190080_uint64},
+ {idx: 17, exp: gt, fn: gt_4278190080_uint64},
+ {idx: 17, exp: ge, fn: ge_4278190080_uint64},
+ {idx: 17, exp: eq, fn: eq_4278190080_uint64},
+ {idx: 17, exp: ne, fn: ne_4278190080_uint64},
+ {idx: 18, exp: lt, fn: lt_4294967294_uint64},
+ {idx: 18, exp: le, fn: le_4294967294_uint64},
+ {idx: 18, exp: gt, fn: gt_4294967294_uint64},
+ {idx: 18, exp: ge, fn: ge_4294967294_uint64},
+ {idx: 18, exp: eq, fn: eq_4294967294_uint64},
+ {idx: 18, exp: ne, fn: ne_4294967294_uint64},
+ {idx: 19, exp: lt, fn: lt_4294967295_uint64},
+ {idx: 19, exp: le, fn: le_4294967295_uint64},
+ {idx: 19, exp: gt, fn: gt_4294967295_uint64},
+ {idx: 19, exp: ge, fn: ge_4294967295_uint64},
+ {idx: 19, exp: eq, fn: eq_4294967295_uint64},
+ {idx: 19, exp: ne, fn: ne_4294967295_uint64},
+ {idx: 20, exp: lt, fn: lt_4294967296_uint64},
+ {idx: 20, exp: le, fn: le_4294967296_uint64},
+ {idx: 20, exp: gt, fn: gt_4294967296_uint64},
+ {idx: 20, exp: ge, fn: ge_4294967296_uint64},
+ {idx: 20, exp: eq, fn: eq_4294967296_uint64},
+ {idx: 20, exp: ne, fn: ne_4294967296_uint64},
+ {idx: 21, exp: lt, fn: lt_1095216660480_uint64},
+ {idx: 21, exp: le, fn: le_1095216660480_uint64},
+ {idx: 21, exp: gt, fn: gt_1095216660480_uint64},
+ {idx: 21, exp: ge, fn: ge_1095216660480_uint64},
+ {idx: 21, exp: eq, fn: eq_1095216660480_uint64},
+ {idx: 21, exp: ne, fn: ne_1095216660480_uint64},
+ {idx: 22, exp: lt, fn: lt_9223372036854775806_uint64},
+ {idx: 22, exp: le, fn: le_9223372036854775806_uint64},
+ {idx: 22, exp: gt, fn: gt_9223372036854775806_uint64},
+ {idx: 22, exp: ge, fn: ge_9223372036854775806_uint64},
+ {idx: 22, exp: eq, fn: eq_9223372036854775806_uint64},
+ {idx: 22, exp: ne, fn: ne_9223372036854775806_uint64},
+ {idx: 23, exp: lt, fn: lt_9223372036854775807_uint64},
+ {idx: 23, exp: le, fn: le_9223372036854775807_uint64},
+ {idx: 23, exp: gt, fn: gt_9223372036854775807_uint64},
+ {idx: 23, exp: ge, fn: ge_9223372036854775807_uint64},
+ {idx: 23, exp: eq, fn: eq_9223372036854775807_uint64},
+ {idx: 23, exp: ne, fn: ne_9223372036854775807_uint64},
+ {idx: 24, exp: lt, fn: lt_9223372036854775808_uint64},
+ {idx: 24, exp: le, fn: le_9223372036854775808_uint64},
+ {idx: 24, exp: gt, fn: gt_9223372036854775808_uint64},
+ {idx: 24, exp: ge, fn: ge_9223372036854775808_uint64},
+ {idx: 24, exp: eq, fn: eq_9223372036854775808_uint64},
+ {idx: 24, exp: ne, fn: ne_9223372036854775808_uint64},
+ {idx: 25, exp: lt, fn: lt_18374686479671623680_uint64},
+ {idx: 25, exp: le, fn: le_18374686479671623680_uint64},
+ {idx: 25, exp: gt, fn: gt_18374686479671623680_uint64},
+ {idx: 25, exp: ge, fn: ge_18374686479671623680_uint64},
+ {idx: 25, exp: eq, fn: eq_18374686479671623680_uint64},
+ {idx: 25, exp: ne, fn: ne_18374686479671623680_uint64},
+ {idx: 26, exp: lt, fn: lt_18446744073709551614_uint64},
+ {idx: 26, exp: le, fn: le_18446744073709551614_uint64},
+ {idx: 26, exp: gt, fn: gt_18446744073709551614_uint64},
+ {idx: 26, exp: ge, fn: ge_18446744073709551614_uint64},
+ {idx: 26, exp: eq, fn: eq_18446744073709551614_uint64},
+ {idx: 26, exp: ne, fn: ne_18446744073709551614_uint64},
+ {idx: 27, exp: lt, fn: lt_18446744073709551615_uint64},
+ {idx: 27, exp: le, fn: le_18446744073709551615_uint64},
+ {idx: 27, exp: gt, fn: gt_18446744073709551615_uint64},
+ {idx: 27, exp: ge, fn: ge_18446744073709551615_uint64},
+ {idx: 27, exp: eq, fn: eq_18446744073709551615_uint64},
+ {idx: 27, exp: ne, fn: ne_18446744073709551615_uint64},
+}
+
+// uint32 tests
+var uint32_vals = []uint32{
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+ 65536,
+ 2147483646,
+ 2147483647,
+ 2147483648,
+ 4278190080,
+ 4294967294,
+ 4294967295,
+}
+
+func lt_0_uint32(x uint32) bool { return x < 0 }
+func le_0_uint32(x uint32) bool { return x <= 0 }
+func gt_0_uint32(x uint32) bool { return x > 0 }
+func ge_0_uint32(x uint32) bool { return x >= 0 }
+func eq_0_uint32(x uint32) bool { return x == 0 }
+func ne_0_uint32(x uint32) bool { return x != 0 }
+func lt_1_uint32(x uint32) bool { return x < 1 }
+func le_1_uint32(x uint32) bool { return x <= 1 }
+func gt_1_uint32(x uint32) bool { return x > 1 }
+func ge_1_uint32(x uint32) bool { return x >= 1 }
+func eq_1_uint32(x uint32) bool { return x == 1 }
+func ne_1_uint32(x uint32) bool { return x != 1 }
+func lt_126_uint32(x uint32) bool { return x < 126 }
+func le_126_uint32(x uint32) bool { return x <= 126 }
+func gt_126_uint32(x uint32) bool { return x > 126 }
+func ge_126_uint32(x uint32) bool { return x >= 126 }
+func eq_126_uint32(x uint32) bool { return x == 126 }
+func ne_126_uint32(x uint32) bool { return x != 126 }
+func lt_127_uint32(x uint32) bool { return x < 127 }
+func le_127_uint32(x uint32) bool { return x <= 127 }
+func gt_127_uint32(x uint32) bool { return x > 127 }
+func ge_127_uint32(x uint32) bool { return x >= 127 }
+func eq_127_uint32(x uint32) bool { return x == 127 }
+func ne_127_uint32(x uint32) bool { return x != 127 }
+func lt_128_uint32(x uint32) bool { return x < 128 }
+func le_128_uint32(x uint32) bool { return x <= 128 }
+func gt_128_uint32(x uint32) bool { return x > 128 }
+func ge_128_uint32(x uint32) bool { return x >= 128 }
+func eq_128_uint32(x uint32) bool { return x == 128 }
+func ne_128_uint32(x uint32) bool { return x != 128 }
+func lt_254_uint32(x uint32) bool { return x < 254 }
+func le_254_uint32(x uint32) bool { return x <= 254 }
+func gt_254_uint32(x uint32) bool { return x > 254 }
+func ge_254_uint32(x uint32) bool { return x >= 254 }
+func eq_254_uint32(x uint32) bool { return x == 254 }
+func ne_254_uint32(x uint32) bool { return x != 254 }
+func lt_255_uint32(x uint32) bool { return x < 255 }
+func le_255_uint32(x uint32) bool { return x <= 255 }
+func gt_255_uint32(x uint32) bool { return x > 255 }
+func ge_255_uint32(x uint32) bool { return x >= 255 }
+func eq_255_uint32(x uint32) bool { return x == 255 }
+func ne_255_uint32(x uint32) bool { return x != 255 }
+func lt_256_uint32(x uint32) bool { return x < 256 }
+func le_256_uint32(x uint32) bool { return x <= 256 }
+func gt_256_uint32(x uint32) bool { return x > 256 }
+func ge_256_uint32(x uint32) bool { return x >= 256 }
+func eq_256_uint32(x uint32) bool { return x == 256 }
+func ne_256_uint32(x uint32) bool { return x != 256 }
+func lt_32766_uint32(x uint32) bool { return x < 32766 }
+func le_32766_uint32(x uint32) bool { return x <= 32766 }
+func gt_32766_uint32(x uint32) bool { return x > 32766 }
+func ge_32766_uint32(x uint32) bool { return x >= 32766 }
+func eq_32766_uint32(x uint32) bool { return x == 32766 }
+func ne_32766_uint32(x uint32) bool { return x != 32766 }
+func lt_32767_uint32(x uint32) bool { return x < 32767 }
+func le_32767_uint32(x uint32) bool { return x <= 32767 }
+func gt_32767_uint32(x uint32) bool { return x > 32767 }
+func ge_32767_uint32(x uint32) bool { return x >= 32767 }
+func eq_32767_uint32(x uint32) bool { return x == 32767 }
+func ne_32767_uint32(x uint32) bool { return x != 32767 }
+func lt_32768_uint32(x uint32) bool { return x < 32768 }
+func le_32768_uint32(x uint32) bool { return x <= 32768 }
+func gt_32768_uint32(x uint32) bool { return x > 32768 }
+func ge_32768_uint32(x uint32) bool { return x >= 32768 }
+func eq_32768_uint32(x uint32) bool { return x == 32768 }
+func ne_32768_uint32(x uint32) bool { return x != 32768 }
+func lt_65534_uint32(x uint32) bool { return x < 65534 }
+func le_65534_uint32(x uint32) bool { return x <= 65534 }
+func gt_65534_uint32(x uint32) bool { return x > 65534 }
+func ge_65534_uint32(x uint32) bool { return x >= 65534 }
+func eq_65534_uint32(x uint32) bool { return x == 65534 }
+func ne_65534_uint32(x uint32) bool { return x != 65534 }
+func lt_65535_uint32(x uint32) bool { return x < 65535 }
+func le_65535_uint32(x uint32) bool { return x <= 65535 }
+func gt_65535_uint32(x uint32) bool { return x > 65535 }
+func ge_65535_uint32(x uint32) bool { return x >= 65535 }
+func eq_65535_uint32(x uint32) bool { return x == 65535 }
+func ne_65535_uint32(x uint32) bool { return x != 65535 }
+func lt_65536_uint32(x uint32) bool { return x < 65536 }
+func le_65536_uint32(x uint32) bool { return x <= 65536 }
+func gt_65536_uint32(x uint32) bool { return x > 65536 }
+func ge_65536_uint32(x uint32) bool { return x >= 65536 }
+func eq_65536_uint32(x uint32) bool { return x == 65536 }
+func ne_65536_uint32(x uint32) bool { return x != 65536 }
+func lt_2147483646_uint32(x uint32) bool { return x < 2147483646 }
+func le_2147483646_uint32(x uint32) bool { return x <= 2147483646 }
+func gt_2147483646_uint32(x uint32) bool { return x > 2147483646 }
+func ge_2147483646_uint32(x uint32) bool { return x >= 2147483646 }
+func eq_2147483646_uint32(x uint32) bool { return x == 2147483646 }
+func ne_2147483646_uint32(x uint32) bool { return x != 2147483646 }
+func lt_2147483647_uint32(x uint32) bool { return x < 2147483647 }
+func le_2147483647_uint32(x uint32) bool { return x <= 2147483647 }
+func gt_2147483647_uint32(x uint32) bool { return x > 2147483647 }
+func ge_2147483647_uint32(x uint32) bool { return x >= 2147483647 }
+func eq_2147483647_uint32(x uint32) bool { return x == 2147483647 }
+func ne_2147483647_uint32(x uint32) bool { return x != 2147483647 }
+func lt_2147483648_uint32(x uint32) bool { return x < 2147483648 }
+func le_2147483648_uint32(x uint32) bool { return x <= 2147483648 }
+func gt_2147483648_uint32(x uint32) bool { return x > 2147483648 }
+func ge_2147483648_uint32(x uint32) bool { return x >= 2147483648 }
+func eq_2147483648_uint32(x uint32) bool { return x == 2147483648 }
+func ne_2147483648_uint32(x uint32) bool { return x != 2147483648 }
+func lt_4278190080_uint32(x uint32) bool { return x < 4278190080 }
+func le_4278190080_uint32(x uint32) bool { return x <= 4278190080 }
+func gt_4278190080_uint32(x uint32) bool { return x > 4278190080 }
+func ge_4278190080_uint32(x uint32) bool { return x >= 4278190080 }
+func eq_4278190080_uint32(x uint32) bool { return x == 4278190080 }
+func ne_4278190080_uint32(x uint32) bool { return x != 4278190080 }
+func lt_4294967294_uint32(x uint32) bool { return x < 4294967294 }
+func le_4294967294_uint32(x uint32) bool { return x <= 4294967294 }
+func gt_4294967294_uint32(x uint32) bool { return x > 4294967294 }
+func ge_4294967294_uint32(x uint32) bool { return x >= 4294967294 }
+func eq_4294967294_uint32(x uint32) bool { return x == 4294967294 }
+func ne_4294967294_uint32(x uint32) bool { return x != 4294967294 }
+func lt_4294967295_uint32(x uint32) bool { return x < 4294967295 }
+func le_4294967295_uint32(x uint32) bool { return x <= 4294967295 }
+func gt_4294967295_uint32(x uint32) bool { return x > 4294967295 }
+func ge_4294967295_uint32(x uint32) bool { return x >= 4294967295 }
+func eq_4294967295_uint32(x uint32) bool { return x == 4294967295 }
+func ne_4294967295_uint32(x uint32) bool { return x != 4294967295 }
+
+var uint32_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(uint32) bool
+}{
+ {idx: 0, exp: lt, fn: lt_0_uint32},
+ {idx: 0, exp: le, fn: le_0_uint32},
+ {idx: 0, exp: gt, fn: gt_0_uint32},
+ {idx: 0, exp: ge, fn: ge_0_uint32},
+ {idx: 0, exp: eq, fn: eq_0_uint32},
+ {idx: 0, exp: ne, fn: ne_0_uint32},
+ {idx: 1, exp: lt, fn: lt_1_uint32},
+ {idx: 1, exp: le, fn: le_1_uint32},
+ {idx: 1, exp: gt, fn: gt_1_uint32},
+ {idx: 1, exp: ge, fn: ge_1_uint32},
+ {idx: 1, exp: eq, fn: eq_1_uint32},
+ {idx: 1, exp: ne, fn: ne_1_uint32},
+ {idx: 2, exp: lt, fn: lt_126_uint32},
+ {idx: 2, exp: le, fn: le_126_uint32},
+ {idx: 2, exp: gt, fn: gt_126_uint32},
+ {idx: 2, exp: ge, fn: ge_126_uint32},
+ {idx: 2, exp: eq, fn: eq_126_uint32},
+ {idx: 2, exp: ne, fn: ne_126_uint32},
+ {idx: 3, exp: lt, fn: lt_127_uint32},
+ {idx: 3, exp: le, fn: le_127_uint32},
+ {idx: 3, exp: gt, fn: gt_127_uint32},
+ {idx: 3, exp: ge, fn: ge_127_uint32},
+ {idx: 3, exp: eq, fn: eq_127_uint32},
+ {idx: 3, exp: ne, fn: ne_127_uint32},
+ {idx: 4, exp: lt, fn: lt_128_uint32},
+ {idx: 4, exp: le, fn: le_128_uint32},
+ {idx: 4, exp: gt, fn: gt_128_uint32},
+ {idx: 4, exp: ge, fn: ge_128_uint32},
+ {idx: 4, exp: eq, fn: eq_128_uint32},
+ {idx: 4, exp: ne, fn: ne_128_uint32},
+ {idx: 5, exp: lt, fn: lt_254_uint32},
+ {idx: 5, exp: le, fn: le_254_uint32},
+ {idx: 5, exp: gt, fn: gt_254_uint32},
+ {idx: 5, exp: ge, fn: ge_254_uint32},
+ {idx: 5, exp: eq, fn: eq_254_uint32},
+ {idx: 5, exp: ne, fn: ne_254_uint32},
+ {idx: 6, exp: lt, fn: lt_255_uint32},
+ {idx: 6, exp: le, fn: le_255_uint32},
+ {idx: 6, exp: gt, fn: gt_255_uint32},
+ {idx: 6, exp: ge, fn: ge_255_uint32},
+ {idx: 6, exp: eq, fn: eq_255_uint32},
+ {idx: 6, exp: ne, fn: ne_255_uint32},
+ {idx: 7, exp: lt, fn: lt_256_uint32},
+ {idx: 7, exp: le, fn: le_256_uint32},
+ {idx: 7, exp: gt, fn: gt_256_uint32},
+ {idx: 7, exp: ge, fn: ge_256_uint32},
+ {idx: 7, exp: eq, fn: eq_256_uint32},
+ {idx: 7, exp: ne, fn: ne_256_uint32},
+ {idx: 8, exp: lt, fn: lt_32766_uint32},
+ {idx: 8, exp: le, fn: le_32766_uint32},
+ {idx: 8, exp: gt, fn: gt_32766_uint32},
+ {idx: 8, exp: ge, fn: ge_32766_uint32},
+ {idx: 8, exp: eq, fn: eq_32766_uint32},
+ {idx: 8, exp: ne, fn: ne_32766_uint32},
+ {idx: 9, exp: lt, fn: lt_32767_uint32},
+ {idx: 9, exp: le, fn: le_32767_uint32},
+ {idx: 9, exp: gt, fn: gt_32767_uint32},
+ {idx: 9, exp: ge, fn: ge_32767_uint32},
+ {idx: 9, exp: eq, fn: eq_32767_uint32},
+ {idx: 9, exp: ne, fn: ne_32767_uint32},
+ {idx: 10, exp: lt, fn: lt_32768_uint32},
+ {idx: 10, exp: le, fn: le_32768_uint32},
+ {idx: 10, exp: gt, fn: gt_32768_uint32},
+ {idx: 10, exp: ge, fn: ge_32768_uint32},
+ {idx: 10, exp: eq, fn: eq_32768_uint32},
+ {idx: 10, exp: ne, fn: ne_32768_uint32},
+ {idx: 11, exp: lt, fn: lt_65534_uint32},
+ {idx: 11, exp: le, fn: le_65534_uint32},
+ {idx: 11, exp: gt, fn: gt_65534_uint32},
+ {idx: 11, exp: ge, fn: ge_65534_uint32},
+ {idx: 11, exp: eq, fn: eq_65534_uint32},
+ {idx: 11, exp: ne, fn: ne_65534_uint32},
+ {idx: 12, exp: lt, fn: lt_65535_uint32},
+ {idx: 12, exp: le, fn: le_65535_uint32},
+ {idx: 12, exp: gt, fn: gt_65535_uint32},
+ {idx: 12, exp: ge, fn: ge_65535_uint32},
+ {idx: 12, exp: eq, fn: eq_65535_uint32},
+ {idx: 12, exp: ne, fn: ne_65535_uint32},
+ {idx: 13, exp: lt, fn: lt_65536_uint32},
+ {idx: 13, exp: le, fn: le_65536_uint32},
+ {idx: 13, exp: gt, fn: gt_65536_uint32},
+ {idx: 13, exp: ge, fn: ge_65536_uint32},
+ {idx: 13, exp: eq, fn: eq_65536_uint32},
+ {idx: 13, exp: ne, fn: ne_65536_uint32},
+ {idx: 14, exp: lt, fn: lt_2147483646_uint32},
+ {idx: 14, exp: le, fn: le_2147483646_uint32},
+ {idx: 14, exp: gt, fn: gt_2147483646_uint32},
+ {idx: 14, exp: ge, fn: ge_2147483646_uint32},
+ {idx: 14, exp: eq, fn: eq_2147483646_uint32},
+ {idx: 14, exp: ne, fn: ne_2147483646_uint32},
+ {idx: 15, exp: lt, fn: lt_2147483647_uint32},
+ {idx: 15, exp: le, fn: le_2147483647_uint32},
+ {idx: 15, exp: gt, fn: gt_2147483647_uint32},
+ {idx: 15, exp: ge, fn: ge_2147483647_uint32},
+ {idx: 15, exp: eq, fn: eq_2147483647_uint32},
+ {idx: 15, exp: ne, fn: ne_2147483647_uint32},
+ {idx: 16, exp: lt, fn: lt_2147483648_uint32},
+ {idx: 16, exp: le, fn: le_2147483648_uint32},
+ {idx: 16, exp: gt, fn: gt_2147483648_uint32},
+ {idx: 16, exp: ge, fn: ge_2147483648_uint32},
+ {idx: 16, exp: eq, fn: eq_2147483648_uint32},
+ {idx: 16, exp: ne, fn: ne_2147483648_uint32},
+ {idx: 17, exp: lt, fn: lt_4278190080_uint32},
+ {idx: 17, exp: le, fn: le_4278190080_uint32},
+ {idx: 17, exp: gt, fn: gt_4278190080_uint32},
+ {idx: 17, exp: ge, fn: ge_4278190080_uint32},
+ {idx: 17, exp: eq, fn: eq_4278190080_uint32},
+ {idx: 17, exp: ne, fn: ne_4278190080_uint32},
+ {idx: 18, exp: lt, fn: lt_4294967294_uint32},
+ {idx: 18, exp: le, fn: le_4294967294_uint32},
+ {idx: 18, exp: gt, fn: gt_4294967294_uint32},
+ {idx: 18, exp: ge, fn: ge_4294967294_uint32},
+ {idx: 18, exp: eq, fn: eq_4294967294_uint32},
+ {idx: 18, exp: ne, fn: ne_4294967294_uint32},
+ {idx: 19, exp: lt, fn: lt_4294967295_uint32},
+ {idx: 19, exp: le, fn: le_4294967295_uint32},
+ {idx: 19, exp: gt, fn: gt_4294967295_uint32},
+ {idx: 19, exp: ge, fn: ge_4294967295_uint32},
+ {idx: 19, exp: eq, fn: eq_4294967295_uint32},
+ {idx: 19, exp: ne, fn: ne_4294967295_uint32},
+}
+
+// uint16 tests
+var uint16_vals = []uint16{
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+}
+
+func lt_0_uint16(x uint16) bool { return x < 0 }
+func le_0_uint16(x uint16) bool { return x <= 0 }
+func gt_0_uint16(x uint16) bool { return x > 0 }
+func ge_0_uint16(x uint16) bool { return x >= 0 }
+func eq_0_uint16(x uint16) bool { return x == 0 }
+func ne_0_uint16(x uint16) bool { return x != 0 }
+func lt_1_uint16(x uint16) bool { return x < 1 }
+func le_1_uint16(x uint16) bool { return x <= 1 }
+func gt_1_uint16(x uint16) bool { return x > 1 }
+func ge_1_uint16(x uint16) bool { return x >= 1 }
+func eq_1_uint16(x uint16) bool { return x == 1 }
+func ne_1_uint16(x uint16) bool { return x != 1 }
+func lt_126_uint16(x uint16) bool { return x < 126 }
+func le_126_uint16(x uint16) bool { return x <= 126 }
+func gt_126_uint16(x uint16) bool { return x > 126 }
+func ge_126_uint16(x uint16) bool { return x >= 126 }
+func eq_126_uint16(x uint16) bool { return x == 126 }
+func ne_126_uint16(x uint16) bool { return x != 126 }
+func lt_127_uint16(x uint16) bool { return x < 127 }
+func le_127_uint16(x uint16) bool { return x <= 127 }
+func gt_127_uint16(x uint16) bool { return x > 127 }
+func ge_127_uint16(x uint16) bool { return x >= 127 }
+func eq_127_uint16(x uint16) bool { return x == 127 }
+func ne_127_uint16(x uint16) bool { return x != 127 }
+func lt_128_uint16(x uint16) bool { return x < 128 }
+func le_128_uint16(x uint16) bool { return x <= 128 }
+func gt_128_uint16(x uint16) bool { return x > 128 }
+func ge_128_uint16(x uint16) bool { return x >= 128 }
+func eq_128_uint16(x uint16) bool { return x == 128 }
+func ne_128_uint16(x uint16) bool { return x != 128 }
+func lt_254_uint16(x uint16) bool { return x < 254 }
+func le_254_uint16(x uint16) bool { return x <= 254 }
+func gt_254_uint16(x uint16) bool { return x > 254 }
+func ge_254_uint16(x uint16) bool { return x >= 254 }
+func eq_254_uint16(x uint16) bool { return x == 254 }
+func ne_254_uint16(x uint16) bool { return x != 254 }
+func lt_255_uint16(x uint16) bool { return x < 255 }
+func le_255_uint16(x uint16) bool { return x <= 255 }
+func gt_255_uint16(x uint16) bool { return x > 255 }
+func ge_255_uint16(x uint16) bool { return x >= 255 }
+func eq_255_uint16(x uint16) bool { return x == 255 }
+func ne_255_uint16(x uint16) bool { return x != 255 }
+func lt_256_uint16(x uint16) bool { return x < 256 }
+func le_256_uint16(x uint16) bool { return x <= 256 }
+func gt_256_uint16(x uint16) bool { return x > 256 }
+func ge_256_uint16(x uint16) bool { return x >= 256 }
+func eq_256_uint16(x uint16) bool { return x == 256 }
+func ne_256_uint16(x uint16) bool { return x != 256 }
+func lt_32766_uint16(x uint16) bool { return x < 32766 }
+func le_32766_uint16(x uint16) bool { return x <= 32766 }
+func gt_32766_uint16(x uint16) bool { return x > 32766 }
+func ge_32766_uint16(x uint16) bool { return x >= 32766 }
+func eq_32766_uint16(x uint16) bool { return x == 32766 }
+func ne_32766_uint16(x uint16) bool { return x != 32766 }
+func lt_32767_uint16(x uint16) bool { return x < 32767 }
+func le_32767_uint16(x uint16) bool { return x <= 32767 }
+func gt_32767_uint16(x uint16) bool { return x > 32767 }
+func ge_32767_uint16(x uint16) bool { return x >= 32767 }
+func eq_32767_uint16(x uint16) bool { return x == 32767 }
+func ne_32767_uint16(x uint16) bool { return x != 32767 }
+func lt_32768_uint16(x uint16) bool { return x < 32768 }
+func le_32768_uint16(x uint16) bool { return x <= 32768 }
+func gt_32768_uint16(x uint16) bool { return x > 32768 }
+func ge_32768_uint16(x uint16) bool { return x >= 32768 }
+func eq_32768_uint16(x uint16) bool { return x == 32768 }
+func ne_32768_uint16(x uint16) bool { return x != 32768 }
+func lt_65534_uint16(x uint16) bool { return x < 65534 }
+func le_65534_uint16(x uint16) bool { return x <= 65534 }
+func gt_65534_uint16(x uint16) bool { return x > 65534 }
+func ge_65534_uint16(x uint16) bool { return x >= 65534 }
+func eq_65534_uint16(x uint16) bool { return x == 65534 }
+func ne_65534_uint16(x uint16) bool { return x != 65534 }
+func lt_65535_uint16(x uint16) bool { return x < 65535 }
+func le_65535_uint16(x uint16) bool { return x <= 65535 }
+func gt_65535_uint16(x uint16) bool { return x > 65535 }
+func ge_65535_uint16(x uint16) bool { return x >= 65535 }
+func eq_65535_uint16(x uint16) bool { return x == 65535 }
+func ne_65535_uint16(x uint16) bool { return x != 65535 }
+
+var uint16_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(uint16) bool
+}{
+ {idx: 0, exp: lt, fn: lt_0_uint16},
+ {idx: 0, exp: le, fn: le_0_uint16},
+ {idx: 0, exp: gt, fn: gt_0_uint16},
+ {idx: 0, exp: ge, fn: ge_0_uint16},
+ {idx: 0, exp: eq, fn: eq_0_uint16},
+ {idx: 0, exp: ne, fn: ne_0_uint16},
+ {idx: 1, exp: lt, fn: lt_1_uint16},
+ {idx: 1, exp: le, fn: le_1_uint16},
+ {idx: 1, exp: gt, fn: gt_1_uint16},
+ {idx: 1, exp: ge, fn: ge_1_uint16},
+ {idx: 1, exp: eq, fn: eq_1_uint16},
+ {idx: 1, exp: ne, fn: ne_1_uint16},
+ {idx: 2, exp: lt, fn: lt_126_uint16},
+ {idx: 2, exp: le, fn: le_126_uint16},
+ {idx: 2, exp: gt, fn: gt_126_uint16},
+ {idx: 2, exp: ge, fn: ge_126_uint16},
+ {idx: 2, exp: eq, fn: eq_126_uint16},
+ {idx: 2, exp: ne, fn: ne_126_uint16},
+ {idx: 3, exp: lt, fn: lt_127_uint16},
+ {idx: 3, exp: le, fn: le_127_uint16},
+ {idx: 3, exp: gt, fn: gt_127_uint16},
+ {idx: 3, exp: ge, fn: ge_127_uint16},
+ {idx: 3, exp: eq, fn: eq_127_uint16},
+ {idx: 3, exp: ne, fn: ne_127_uint16},
+ {idx: 4, exp: lt, fn: lt_128_uint16},
+ {idx: 4, exp: le, fn: le_128_uint16},
+ {idx: 4, exp: gt, fn: gt_128_uint16},
+ {idx: 4, exp: ge, fn: ge_128_uint16},
+ {idx: 4, exp: eq, fn: eq_128_uint16},
+ {idx: 4, exp: ne, fn: ne_128_uint16},
+ {idx: 5, exp: lt, fn: lt_254_uint16},
+ {idx: 5, exp: le, fn: le_254_uint16},
+ {idx: 5, exp: gt, fn: gt_254_uint16},
+ {idx: 5, exp: ge, fn: ge_254_uint16},
+ {idx: 5, exp: eq, fn: eq_254_uint16},
+ {idx: 5, exp: ne, fn: ne_254_uint16},
+ {idx: 6, exp: lt, fn: lt_255_uint16},
+ {idx: 6, exp: le, fn: le_255_uint16},
+ {idx: 6, exp: gt, fn: gt_255_uint16},
+ {idx: 6, exp: ge, fn: ge_255_uint16},
+ {idx: 6, exp: eq, fn: eq_255_uint16},
+ {idx: 6, exp: ne, fn: ne_255_uint16},
+ {idx: 7, exp: lt, fn: lt_256_uint16},
+ {idx: 7, exp: le, fn: le_256_uint16},
+ {idx: 7, exp: gt, fn: gt_256_uint16},
+ {idx: 7, exp: ge, fn: ge_256_uint16},
+ {idx: 7, exp: eq, fn: eq_256_uint16},
+ {idx: 7, exp: ne, fn: ne_256_uint16},
+ {idx: 8, exp: lt, fn: lt_32766_uint16},
+ {idx: 8, exp: le, fn: le_32766_uint16},
+ {idx: 8, exp: gt, fn: gt_32766_uint16},
+ {idx: 8, exp: ge, fn: ge_32766_uint16},
+ {idx: 8, exp: eq, fn: eq_32766_uint16},
+ {idx: 8, exp: ne, fn: ne_32766_uint16},
+ {idx: 9, exp: lt, fn: lt_32767_uint16},
+ {idx: 9, exp: le, fn: le_32767_uint16},
+ {idx: 9, exp: gt, fn: gt_32767_uint16},
+ {idx: 9, exp: ge, fn: ge_32767_uint16},
+ {idx: 9, exp: eq, fn: eq_32767_uint16},
+ {idx: 9, exp: ne, fn: ne_32767_uint16},
+ {idx: 10, exp: lt, fn: lt_32768_uint16},
+ {idx: 10, exp: le, fn: le_32768_uint16},
+ {idx: 10, exp: gt, fn: gt_32768_uint16},
+ {idx: 10, exp: ge, fn: ge_32768_uint16},
+ {idx: 10, exp: eq, fn: eq_32768_uint16},
+ {idx: 10, exp: ne, fn: ne_32768_uint16},
+ {idx: 11, exp: lt, fn: lt_65534_uint16},
+ {idx: 11, exp: le, fn: le_65534_uint16},
+ {idx: 11, exp: gt, fn: gt_65534_uint16},
+ {idx: 11, exp: ge, fn: ge_65534_uint16},
+ {idx: 11, exp: eq, fn: eq_65534_uint16},
+ {idx: 11, exp: ne, fn: ne_65534_uint16},
+ {idx: 12, exp: lt, fn: lt_65535_uint16},
+ {idx: 12, exp: le, fn: le_65535_uint16},
+ {idx: 12, exp: gt, fn: gt_65535_uint16},
+ {idx: 12, exp: ge, fn: ge_65535_uint16},
+ {idx: 12, exp: eq, fn: eq_65535_uint16},
+ {idx: 12, exp: ne, fn: ne_65535_uint16},
+}
+
+// uint8 tests
+var uint8_vals = []uint8{
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+}
+
+func lt_0_uint8(x uint8) bool { return x < 0 }
+func le_0_uint8(x uint8) bool { return x <= 0 }
+func gt_0_uint8(x uint8) bool { return x > 0 }
+func ge_0_uint8(x uint8) bool { return x >= 0 }
+func eq_0_uint8(x uint8) bool { return x == 0 }
+func ne_0_uint8(x uint8) bool { return x != 0 }
+func lt_1_uint8(x uint8) bool { return x < 1 }
+func le_1_uint8(x uint8) bool { return x <= 1 }
+func gt_1_uint8(x uint8) bool { return x > 1 }
+func ge_1_uint8(x uint8) bool { return x >= 1 }
+func eq_1_uint8(x uint8) bool { return x == 1 }
+func ne_1_uint8(x uint8) bool { return x != 1 }
+func lt_126_uint8(x uint8) bool { return x < 126 }
+func le_126_uint8(x uint8) bool { return x <= 126 }
+func gt_126_uint8(x uint8) bool { return x > 126 }
+func ge_126_uint8(x uint8) bool { return x >= 126 }
+func eq_126_uint8(x uint8) bool { return x == 126 }
+func ne_126_uint8(x uint8) bool { return x != 126 }
+func lt_127_uint8(x uint8) bool { return x < 127 }
+func le_127_uint8(x uint8) bool { return x <= 127 }
+func gt_127_uint8(x uint8) bool { return x > 127 }
+func ge_127_uint8(x uint8) bool { return x >= 127 }
+func eq_127_uint8(x uint8) bool { return x == 127 }
+func ne_127_uint8(x uint8) bool { return x != 127 }
+func lt_128_uint8(x uint8) bool { return x < 128 }
+func le_128_uint8(x uint8) bool { return x <= 128 }
+func gt_128_uint8(x uint8) bool { return x > 128 }
+func ge_128_uint8(x uint8) bool { return x >= 128 }
+func eq_128_uint8(x uint8) bool { return x == 128 }
+func ne_128_uint8(x uint8) bool { return x != 128 }
+func lt_254_uint8(x uint8) bool { return x < 254 }
+func le_254_uint8(x uint8) bool { return x <= 254 }
+func gt_254_uint8(x uint8) bool { return x > 254 }
+func ge_254_uint8(x uint8) bool { return x >= 254 }
+func eq_254_uint8(x uint8) bool { return x == 254 }
+func ne_254_uint8(x uint8) bool { return x != 254 }
+func lt_255_uint8(x uint8) bool { return x < 255 }
+func le_255_uint8(x uint8) bool { return x <= 255 }
+func gt_255_uint8(x uint8) bool { return x > 255 }
+func ge_255_uint8(x uint8) bool { return x >= 255 }
+func eq_255_uint8(x uint8) bool { return x == 255 }
+func ne_255_uint8(x uint8) bool { return x != 255 }
+
+var uint8_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(uint8) bool
+}{
+ {idx: 0, exp: lt, fn: lt_0_uint8},
+ {idx: 0, exp: le, fn: le_0_uint8},
+ {idx: 0, exp: gt, fn: gt_0_uint8},
+ {idx: 0, exp: ge, fn: ge_0_uint8},
+ {idx: 0, exp: eq, fn: eq_0_uint8},
+ {idx: 0, exp: ne, fn: ne_0_uint8},
+ {idx: 1, exp: lt, fn: lt_1_uint8},
+ {idx: 1, exp: le, fn: le_1_uint8},
+ {idx: 1, exp: gt, fn: gt_1_uint8},
+ {idx: 1, exp: ge, fn: ge_1_uint8},
+ {idx: 1, exp: eq, fn: eq_1_uint8},
+ {idx: 1, exp: ne, fn: ne_1_uint8},
+ {idx: 2, exp: lt, fn: lt_126_uint8},
+ {idx: 2, exp: le, fn: le_126_uint8},
+ {idx: 2, exp: gt, fn: gt_126_uint8},
+ {idx: 2, exp: ge, fn: ge_126_uint8},
+ {idx: 2, exp: eq, fn: eq_126_uint8},
+ {idx: 2, exp: ne, fn: ne_126_uint8},
+ {idx: 3, exp: lt, fn: lt_127_uint8},
+ {idx: 3, exp: le, fn: le_127_uint8},
+ {idx: 3, exp: gt, fn: gt_127_uint8},
+ {idx: 3, exp: ge, fn: ge_127_uint8},
+ {idx: 3, exp: eq, fn: eq_127_uint8},
+ {idx: 3, exp: ne, fn: ne_127_uint8},
+ {idx: 4, exp: lt, fn: lt_128_uint8},
+ {idx: 4, exp: le, fn: le_128_uint8},
+ {idx: 4, exp: gt, fn: gt_128_uint8},
+ {idx: 4, exp: ge, fn: ge_128_uint8},
+ {idx: 4, exp: eq, fn: eq_128_uint8},
+ {idx: 4, exp: ne, fn: ne_128_uint8},
+ {idx: 5, exp: lt, fn: lt_254_uint8},
+ {idx: 5, exp: le, fn: le_254_uint8},
+ {idx: 5, exp: gt, fn: gt_254_uint8},
+ {idx: 5, exp: ge, fn: ge_254_uint8},
+ {idx: 5, exp: eq, fn: eq_254_uint8},
+ {idx: 5, exp: ne, fn: ne_254_uint8},
+ {idx: 6, exp: lt, fn: lt_255_uint8},
+ {idx: 6, exp: le, fn: le_255_uint8},
+ {idx: 6, exp: gt, fn: gt_255_uint8},
+ {idx: 6, exp: ge, fn: ge_255_uint8},
+ {idx: 6, exp: eq, fn: eq_255_uint8},
+ {idx: 6, exp: ne, fn: ne_255_uint8},
+}
+
+// int64 tests
+var int64_vals = []int64{
+ -9223372036854775808,
+ -9223372036854775807,
+ -2147483649,
+ -2147483648,
+ -2147483647,
+ -32769,
+ -32768,
+ -32767,
+ -129,
+ -128,
+ -127,
+ -1,
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+ 65536,
+ 2147483646,
+ 2147483647,
+ 2147483648,
+ 4278190080,
+ 4294967294,
+ 4294967295,
+ 4294967296,
+ 1095216660480,
+ 9223372036854775806,
+ 9223372036854775807,
+}
+
+func lt_neg9223372036854775808_int64(x int64) bool { return x < -9223372036854775808 }
+func le_neg9223372036854775808_int64(x int64) bool { return x <= -9223372036854775808 }
+func gt_neg9223372036854775808_int64(x int64) bool { return x > -9223372036854775808 }
+func ge_neg9223372036854775808_int64(x int64) bool { return x >= -9223372036854775808 }
+func eq_neg9223372036854775808_int64(x int64) bool { return x == -9223372036854775808 }
+func ne_neg9223372036854775808_int64(x int64) bool { return x != -9223372036854775808 }
+func lt_neg9223372036854775807_int64(x int64) bool { return x < -9223372036854775807 }
+func le_neg9223372036854775807_int64(x int64) bool { return x <= -9223372036854775807 }
+func gt_neg9223372036854775807_int64(x int64) bool { return x > -9223372036854775807 }
+func ge_neg9223372036854775807_int64(x int64) bool { return x >= -9223372036854775807 }
+func eq_neg9223372036854775807_int64(x int64) bool { return x == -9223372036854775807 }
+func ne_neg9223372036854775807_int64(x int64) bool { return x != -9223372036854775807 }
+func lt_neg2147483649_int64(x int64) bool { return x < -2147483649 }
+func le_neg2147483649_int64(x int64) bool { return x <= -2147483649 }
+func gt_neg2147483649_int64(x int64) bool { return x > -2147483649 }
+func ge_neg2147483649_int64(x int64) bool { return x >= -2147483649 }
+func eq_neg2147483649_int64(x int64) bool { return x == -2147483649 }
+func ne_neg2147483649_int64(x int64) bool { return x != -2147483649 }
+func lt_neg2147483648_int64(x int64) bool { return x < -2147483648 }
+func le_neg2147483648_int64(x int64) bool { return x <= -2147483648 }
+func gt_neg2147483648_int64(x int64) bool { return x > -2147483648 }
+func ge_neg2147483648_int64(x int64) bool { return x >= -2147483648 }
+func eq_neg2147483648_int64(x int64) bool { return x == -2147483648 }
+func ne_neg2147483648_int64(x int64) bool { return x != -2147483648 }
+func lt_neg2147483647_int64(x int64) bool { return x < -2147483647 }
+func le_neg2147483647_int64(x int64) bool { return x <= -2147483647 }
+func gt_neg2147483647_int64(x int64) bool { return x > -2147483647 }
+func ge_neg2147483647_int64(x int64) bool { return x >= -2147483647 }
+func eq_neg2147483647_int64(x int64) bool { return x == -2147483647 }
+func ne_neg2147483647_int64(x int64) bool { return x != -2147483647 }
+func lt_neg32769_int64(x int64) bool { return x < -32769 }
+func le_neg32769_int64(x int64) bool { return x <= -32769 }
+func gt_neg32769_int64(x int64) bool { return x > -32769 }
+func ge_neg32769_int64(x int64) bool { return x >= -32769 }
+func eq_neg32769_int64(x int64) bool { return x == -32769 }
+func ne_neg32769_int64(x int64) bool { return x != -32769 }
+func lt_neg32768_int64(x int64) bool { return x < -32768 }
+func le_neg32768_int64(x int64) bool { return x <= -32768 }
+func gt_neg32768_int64(x int64) bool { return x > -32768 }
+func ge_neg32768_int64(x int64) bool { return x >= -32768 }
+func eq_neg32768_int64(x int64) bool { return x == -32768 }
+func ne_neg32768_int64(x int64) bool { return x != -32768 }
+func lt_neg32767_int64(x int64) bool { return x < -32767 }
+func le_neg32767_int64(x int64) bool { return x <= -32767 }
+func gt_neg32767_int64(x int64) bool { return x > -32767 }
+func ge_neg32767_int64(x int64) bool { return x >= -32767 }
+func eq_neg32767_int64(x int64) bool { return x == -32767 }
+func ne_neg32767_int64(x int64) bool { return x != -32767 }
+func lt_neg129_int64(x int64) bool { return x < -129 }
+func le_neg129_int64(x int64) bool { return x <= -129 }
+func gt_neg129_int64(x int64) bool { return x > -129 }
+func ge_neg129_int64(x int64) bool { return x >= -129 }
+func eq_neg129_int64(x int64) bool { return x == -129 }
+func ne_neg129_int64(x int64) bool { return x != -129 }
+func lt_neg128_int64(x int64) bool { return x < -128 }
+func le_neg128_int64(x int64) bool { return x <= -128 }
+func gt_neg128_int64(x int64) bool { return x > -128 }
+func ge_neg128_int64(x int64) bool { return x >= -128 }
+func eq_neg128_int64(x int64) bool { return x == -128 }
+func ne_neg128_int64(x int64) bool { return x != -128 }
+func lt_neg127_int64(x int64) bool { return x < -127 }
+func le_neg127_int64(x int64) bool { return x <= -127 }
+func gt_neg127_int64(x int64) bool { return x > -127 }
+func ge_neg127_int64(x int64) bool { return x >= -127 }
+func eq_neg127_int64(x int64) bool { return x == -127 }
+func ne_neg127_int64(x int64) bool { return x != -127 }
+func lt_neg1_int64(x int64) bool { return x < -1 }
+func le_neg1_int64(x int64) bool { return x <= -1 }
+func gt_neg1_int64(x int64) bool { return x > -1 }
+func ge_neg1_int64(x int64) bool { return x >= -1 }
+func eq_neg1_int64(x int64) bool { return x == -1 }
+func ne_neg1_int64(x int64) bool { return x != -1 }
+func lt_0_int64(x int64) bool { return x < 0 }
+func le_0_int64(x int64) bool { return x <= 0 }
+func gt_0_int64(x int64) bool { return x > 0 }
+func ge_0_int64(x int64) bool { return x >= 0 }
+func eq_0_int64(x int64) bool { return x == 0 }
+func ne_0_int64(x int64) bool { return x != 0 }
+func lt_1_int64(x int64) bool { return x < 1 }
+func le_1_int64(x int64) bool { return x <= 1 }
+func gt_1_int64(x int64) bool { return x > 1 }
+func ge_1_int64(x int64) bool { return x >= 1 }
+func eq_1_int64(x int64) bool { return x == 1 }
+func ne_1_int64(x int64) bool { return x != 1 }
+func lt_126_int64(x int64) bool { return x < 126 }
+func le_126_int64(x int64) bool { return x <= 126 }
+func gt_126_int64(x int64) bool { return x > 126 }
+func ge_126_int64(x int64) bool { return x >= 126 }
+func eq_126_int64(x int64) bool { return x == 126 }
+func ne_126_int64(x int64) bool { return x != 126 }
+func lt_127_int64(x int64) bool { return x < 127 }
+func le_127_int64(x int64) bool { return x <= 127 }
+func gt_127_int64(x int64) bool { return x > 127 }
+func ge_127_int64(x int64) bool { return x >= 127 }
+func eq_127_int64(x int64) bool { return x == 127 }
+func ne_127_int64(x int64) bool { return x != 127 }
+func lt_128_int64(x int64) bool { return x < 128 }
+func le_128_int64(x int64) bool { return x <= 128 }
+func gt_128_int64(x int64) bool { return x > 128 }
+func ge_128_int64(x int64) bool { return x >= 128 }
+func eq_128_int64(x int64) bool { return x == 128 }
+func ne_128_int64(x int64) bool { return x != 128 }
+func lt_254_int64(x int64) bool { return x < 254 }
+func le_254_int64(x int64) bool { return x <= 254 }
+func gt_254_int64(x int64) bool { return x > 254 }
+func ge_254_int64(x int64) bool { return x >= 254 }
+func eq_254_int64(x int64) bool { return x == 254 }
+func ne_254_int64(x int64) bool { return x != 254 }
+func lt_255_int64(x int64) bool { return x < 255 }
+func le_255_int64(x int64) bool { return x <= 255 }
+func gt_255_int64(x int64) bool { return x > 255 }
+func ge_255_int64(x int64) bool { return x >= 255 }
+func eq_255_int64(x int64) bool { return x == 255 }
+func ne_255_int64(x int64) bool { return x != 255 }
+func lt_256_int64(x int64) bool { return x < 256 }
+func le_256_int64(x int64) bool { return x <= 256 }
+func gt_256_int64(x int64) bool { return x > 256 }
+func ge_256_int64(x int64) bool { return x >= 256 }
+func eq_256_int64(x int64) bool { return x == 256 }
+func ne_256_int64(x int64) bool { return x != 256 }
+func lt_32766_int64(x int64) bool { return x < 32766 }
+func le_32766_int64(x int64) bool { return x <= 32766 }
+func gt_32766_int64(x int64) bool { return x > 32766 }
+func ge_32766_int64(x int64) bool { return x >= 32766 }
+func eq_32766_int64(x int64) bool { return x == 32766 }
+func ne_32766_int64(x int64) bool { return x != 32766 }
+func lt_32767_int64(x int64) bool { return x < 32767 }
+func le_32767_int64(x int64) bool { return x <= 32767 }
+func gt_32767_int64(x int64) bool { return x > 32767 }
+func ge_32767_int64(x int64) bool { return x >= 32767 }
+func eq_32767_int64(x int64) bool { return x == 32767 }
+func ne_32767_int64(x int64) bool { return x != 32767 }
+func lt_32768_int64(x int64) bool { return x < 32768 }
+func le_32768_int64(x int64) bool { return x <= 32768 }
+func gt_32768_int64(x int64) bool { return x > 32768 }
+func ge_32768_int64(x int64) bool { return x >= 32768 }
+func eq_32768_int64(x int64) bool { return x == 32768 }
+func ne_32768_int64(x int64) bool { return x != 32768 }
+func lt_65534_int64(x int64) bool { return x < 65534 }
+func le_65534_int64(x int64) bool { return x <= 65534 }
+func gt_65534_int64(x int64) bool { return x > 65534 }
+func ge_65534_int64(x int64) bool { return x >= 65534 }
+func eq_65534_int64(x int64) bool { return x == 65534 }
+func ne_65534_int64(x int64) bool { return x != 65534 }
+func lt_65535_int64(x int64) bool { return x < 65535 }
+func le_65535_int64(x int64) bool { return x <= 65535 }
+func gt_65535_int64(x int64) bool { return x > 65535 }
+func ge_65535_int64(x int64) bool { return x >= 65535 }
+func eq_65535_int64(x int64) bool { return x == 65535 }
+func ne_65535_int64(x int64) bool { return x != 65535 }
+func lt_65536_int64(x int64) bool { return x < 65536 }
+func le_65536_int64(x int64) bool { return x <= 65536 }
+func gt_65536_int64(x int64) bool { return x > 65536 }
+func ge_65536_int64(x int64) bool { return x >= 65536 }
+func eq_65536_int64(x int64) bool { return x == 65536 }
+func ne_65536_int64(x int64) bool { return x != 65536 }
+func lt_2147483646_int64(x int64) bool { return x < 2147483646 }
+func le_2147483646_int64(x int64) bool { return x <= 2147483646 }
+func gt_2147483646_int64(x int64) bool { return x > 2147483646 }
+func ge_2147483646_int64(x int64) bool { return x >= 2147483646 }
+func eq_2147483646_int64(x int64) bool { return x == 2147483646 }
+func ne_2147483646_int64(x int64) bool { return x != 2147483646 }
+func lt_2147483647_int64(x int64) bool { return x < 2147483647 }
+func le_2147483647_int64(x int64) bool { return x <= 2147483647 }
+func gt_2147483647_int64(x int64) bool { return x > 2147483647 }
+func ge_2147483647_int64(x int64) bool { return x >= 2147483647 }
+func eq_2147483647_int64(x int64) bool { return x == 2147483647 }
+func ne_2147483647_int64(x int64) bool { return x != 2147483647 }
+func lt_2147483648_int64(x int64) bool { return x < 2147483648 }
+func le_2147483648_int64(x int64) bool { return x <= 2147483648 }
+func gt_2147483648_int64(x int64) bool { return x > 2147483648 }
+func ge_2147483648_int64(x int64) bool { return x >= 2147483648 }
+func eq_2147483648_int64(x int64) bool { return x == 2147483648 }
+func ne_2147483648_int64(x int64) bool { return x != 2147483648 }
+func lt_4278190080_int64(x int64) bool { return x < 4278190080 }
+func le_4278190080_int64(x int64) bool { return x <= 4278190080 }
+func gt_4278190080_int64(x int64) bool { return x > 4278190080 }
+func ge_4278190080_int64(x int64) bool { return x >= 4278190080 }
+func eq_4278190080_int64(x int64) bool { return x == 4278190080 }
+func ne_4278190080_int64(x int64) bool { return x != 4278190080 }
+func lt_4294967294_int64(x int64) bool { return x < 4294967294 }
+func le_4294967294_int64(x int64) bool { return x <= 4294967294 }
+func gt_4294967294_int64(x int64) bool { return x > 4294967294 }
+func ge_4294967294_int64(x int64) bool { return x >= 4294967294 }
+func eq_4294967294_int64(x int64) bool { return x == 4294967294 }
+func ne_4294967294_int64(x int64) bool { return x != 4294967294 }
+func lt_4294967295_int64(x int64) bool { return x < 4294967295 }
+func le_4294967295_int64(x int64) bool { return x <= 4294967295 }
+func gt_4294967295_int64(x int64) bool { return x > 4294967295 }
+func ge_4294967295_int64(x int64) bool { return x >= 4294967295 }
+func eq_4294967295_int64(x int64) bool { return x == 4294967295 }
+func ne_4294967295_int64(x int64) bool { return x != 4294967295 }
+func lt_4294967296_int64(x int64) bool { return x < 4294967296 }
+func le_4294967296_int64(x int64) bool { return x <= 4294967296 }
+func gt_4294967296_int64(x int64) bool { return x > 4294967296 }
+func ge_4294967296_int64(x int64) bool { return x >= 4294967296 }
+func eq_4294967296_int64(x int64) bool { return x == 4294967296 }
+func ne_4294967296_int64(x int64) bool { return x != 4294967296 }
+func lt_1095216660480_int64(x int64) bool { return x < 1095216660480 }
+func le_1095216660480_int64(x int64) bool { return x <= 1095216660480 }
+func gt_1095216660480_int64(x int64) bool { return x > 1095216660480 }
+func ge_1095216660480_int64(x int64) bool { return x >= 1095216660480 }
+func eq_1095216660480_int64(x int64) bool { return x == 1095216660480 }
+func ne_1095216660480_int64(x int64) bool { return x != 1095216660480 }
+func lt_9223372036854775806_int64(x int64) bool { return x < 9223372036854775806 }
+func le_9223372036854775806_int64(x int64) bool { return x <= 9223372036854775806 }
+func gt_9223372036854775806_int64(x int64) bool { return x > 9223372036854775806 }
+func ge_9223372036854775806_int64(x int64) bool { return x >= 9223372036854775806 }
+func eq_9223372036854775806_int64(x int64) bool { return x == 9223372036854775806 }
+func ne_9223372036854775806_int64(x int64) bool { return x != 9223372036854775806 }
+func lt_9223372036854775807_int64(x int64) bool { return x < 9223372036854775807 }
+func le_9223372036854775807_int64(x int64) bool { return x <= 9223372036854775807 }
+func gt_9223372036854775807_int64(x int64) bool { return x > 9223372036854775807 }
+func ge_9223372036854775807_int64(x int64) bool { return x >= 9223372036854775807 }
+func eq_9223372036854775807_int64(x int64) bool { return x == 9223372036854775807 }
+func ne_9223372036854775807_int64(x int64) bool { return x != 9223372036854775807 }
+
+var int64_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(int64) bool
+}{
+ {idx: 0, exp: lt, fn: lt_neg9223372036854775808_int64},
+ {idx: 0, exp: le, fn: le_neg9223372036854775808_int64},
+ {idx: 0, exp: gt, fn: gt_neg9223372036854775808_int64},
+ {idx: 0, exp: ge, fn: ge_neg9223372036854775808_int64},
+ {idx: 0, exp: eq, fn: eq_neg9223372036854775808_int64},
+ {idx: 0, exp: ne, fn: ne_neg9223372036854775808_int64},
+ {idx: 1, exp: lt, fn: lt_neg9223372036854775807_int64},
+ {idx: 1, exp: le, fn: le_neg9223372036854775807_int64},
+ {idx: 1, exp: gt, fn: gt_neg9223372036854775807_int64},
+ {idx: 1, exp: ge, fn: ge_neg9223372036854775807_int64},
+ {idx: 1, exp: eq, fn: eq_neg9223372036854775807_int64},
+ {idx: 1, exp: ne, fn: ne_neg9223372036854775807_int64},
+ {idx: 2, exp: lt, fn: lt_neg2147483649_int64},
+ {idx: 2, exp: le, fn: le_neg2147483649_int64},
+ {idx: 2, exp: gt, fn: gt_neg2147483649_int64},
+ {idx: 2, exp: ge, fn: ge_neg2147483649_int64},
+ {idx: 2, exp: eq, fn: eq_neg2147483649_int64},
+ {idx: 2, exp: ne, fn: ne_neg2147483649_int64},
+ {idx: 3, exp: lt, fn: lt_neg2147483648_int64},
+ {idx: 3, exp: le, fn: le_neg2147483648_int64},
+ {idx: 3, exp: gt, fn: gt_neg2147483648_int64},
+ {idx: 3, exp: ge, fn: ge_neg2147483648_int64},
+ {idx: 3, exp: eq, fn: eq_neg2147483648_int64},
+ {idx: 3, exp: ne, fn: ne_neg2147483648_int64},
+ {idx: 4, exp: lt, fn: lt_neg2147483647_int64},
+ {idx: 4, exp: le, fn: le_neg2147483647_int64},
+ {idx: 4, exp: gt, fn: gt_neg2147483647_int64},
+ {idx: 4, exp: ge, fn: ge_neg2147483647_int64},
+ {idx: 4, exp: eq, fn: eq_neg2147483647_int64},
+ {idx: 4, exp: ne, fn: ne_neg2147483647_int64},
+ {idx: 5, exp: lt, fn: lt_neg32769_int64},
+ {idx: 5, exp: le, fn: le_neg32769_int64},
+ {idx: 5, exp: gt, fn: gt_neg32769_int64},
+ {idx: 5, exp: ge, fn: ge_neg32769_int64},
+ {idx: 5, exp: eq, fn: eq_neg32769_int64},
+ {idx: 5, exp: ne, fn: ne_neg32769_int64},
+ {idx: 6, exp: lt, fn: lt_neg32768_int64},
+ {idx: 6, exp: le, fn: le_neg32768_int64},
+ {idx: 6, exp: gt, fn: gt_neg32768_int64},
+ {idx: 6, exp: ge, fn: ge_neg32768_int64},
+ {idx: 6, exp: eq, fn: eq_neg32768_int64},
+ {idx: 6, exp: ne, fn: ne_neg32768_int64},
+ {idx: 7, exp: lt, fn: lt_neg32767_int64},
+ {idx: 7, exp: le, fn: le_neg32767_int64},
+ {idx: 7, exp: gt, fn: gt_neg32767_int64},
+ {idx: 7, exp: ge, fn: ge_neg32767_int64},
+ {idx: 7, exp: eq, fn: eq_neg32767_int64},
+ {idx: 7, exp: ne, fn: ne_neg32767_int64},
+ {idx: 8, exp: lt, fn: lt_neg129_int64},
+ {idx: 8, exp: le, fn: le_neg129_int64},
+ {idx: 8, exp: gt, fn: gt_neg129_int64},
+ {idx: 8, exp: ge, fn: ge_neg129_int64},
+ {idx: 8, exp: eq, fn: eq_neg129_int64},
+ {idx: 8, exp: ne, fn: ne_neg129_int64},
+ {idx: 9, exp: lt, fn: lt_neg128_int64},
+ {idx: 9, exp: le, fn: le_neg128_int64},
+ {idx: 9, exp: gt, fn: gt_neg128_int64},
+ {idx: 9, exp: ge, fn: ge_neg128_int64},
+ {idx: 9, exp: eq, fn: eq_neg128_int64},
+ {idx: 9, exp: ne, fn: ne_neg128_int64},
+ {idx: 10, exp: lt, fn: lt_neg127_int64},
+ {idx: 10, exp: le, fn: le_neg127_int64},
+ {idx: 10, exp: gt, fn: gt_neg127_int64},
+ {idx: 10, exp: ge, fn: ge_neg127_int64},
+ {idx: 10, exp: eq, fn: eq_neg127_int64},
+ {idx: 10, exp: ne, fn: ne_neg127_int64},
+ {idx: 11, exp: lt, fn: lt_neg1_int64},
+ {idx: 11, exp: le, fn: le_neg1_int64},
+ {idx: 11, exp: gt, fn: gt_neg1_int64},
+ {idx: 11, exp: ge, fn: ge_neg1_int64},
+ {idx: 11, exp: eq, fn: eq_neg1_int64},
+ {idx: 11, exp: ne, fn: ne_neg1_int64},
+ {idx: 12, exp: lt, fn: lt_0_int64},
+ {idx: 12, exp: le, fn: le_0_int64},
+ {idx: 12, exp: gt, fn: gt_0_int64},
+ {idx: 12, exp: ge, fn: ge_0_int64},
+ {idx: 12, exp: eq, fn: eq_0_int64},
+ {idx: 12, exp: ne, fn: ne_0_int64},
+ {idx: 13, exp: lt, fn: lt_1_int64},
+ {idx: 13, exp: le, fn: le_1_int64},
+ {idx: 13, exp: gt, fn: gt_1_int64},
+ {idx: 13, exp: ge, fn: ge_1_int64},
+ {idx: 13, exp: eq, fn: eq_1_int64},
+ {idx: 13, exp: ne, fn: ne_1_int64},
+ {idx: 14, exp: lt, fn: lt_126_int64},
+ {idx: 14, exp: le, fn: le_126_int64},
+ {idx: 14, exp: gt, fn: gt_126_int64},
+ {idx: 14, exp: ge, fn: ge_126_int64},
+ {idx: 14, exp: eq, fn: eq_126_int64},
+ {idx: 14, exp: ne, fn: ne_126_int64},
+ {idx: 15, exp: lt, fn: lt_127_int64},
+ {idx: 15, exp: le, fn: le_127_int64},
+ {idx: 15, exp: gt, fn: gt_127_int64},
+ {idx: 15, exp: ge, fn: ge_127_int64},
+ {idx: 15, exp: eq, fn: eq_127_int64},
+ {idx: 15, exp: ne, fn: ne_127_int64},
+ {idx: 16, exp: lt, fn: lt_128_int64},
+ {idx: 16, exp: le, fn: le_128_int64},
+ {idx: 16, exp: gt, fn: gt_128_int64},
+ {idx: 16, exp: ge, fn: ge_128_int64},
+ {idx: 16, exp: eq, fn: eq_128_int64},
+ {idx: 16, exp: ne, fn: ne_128_int64},
+ {idx: 17, exp: lt, fn: lt_254_int64},
+ {idx: 17, exp: le, fn: le_254_int64},
+ {idx: 17, exp: gt, fn: gt_254_int64},
+ {idx: 17, exp: ge, fn: ge_254_int64},
+ {idx: 17, exp: eq, fn: eq_254_int64},
+ {idx: 17, exp: ne, fn: ne_254_int64},
+ {idx: 18, exp: lt, fn: lt_255_int64},
+ {idx: 18, exp: le, fn: le_255_int64},
+ {idx: 18, exp: gt, fn: gt_255_int64},
+ {idx: 18, exp: ge, fn: ge_255_int64},
+ {idx: 18, exp: eq, fn: eq_255_int64},
+ {idx: 18, exp: ne, fn: ne_255_int64},
+ {idx: 19, exp: lt, fn: lt_256_int64},
+ {idx: 19, exp: le, fn: le_256_int64},
+ {idx: 19, exp: gt, fn: gt_256_int64},
+ {idx: 19, exp: ge, fn: ge_256_int64},
+ {idx: 19, exp: eq, fn: eq_256_int64},
+ {idx: 19, exp: ne, fn: ne_256_int64},
+ {idx: 20, exp: lt, fn: lt_32766_int64},
+ {idx: 20, exp: le, fn: le_32766_int64},
+ {idx: 20, exp: gt, fn: gt_32766_int64},
+ {idx: 20, exp: ge, fn: ge_32766_int64},
+ {idx: 20, exp: eq, fn: eq_32766_int64},
+ {idx: 20, exp: ne, fn: ne_32766_int64},
+ {idx: 21, exp: lt, fn: lt_32767_int64},
+ {idx: 21, exp: le, fn: le_32767_int64},
+ {idx: 21, exp: gt, fn: gt_32767_int64},
+ {idx: 21, exp: ge, fn: ge_32767_int64},
+ {idx: 21, exp: eq, fn: eq_32767_int64},
+ {idx: 21, exp: ne, fn: ne_32767_int64},
+ {idx: 22, exp: lt, fn: lt_32768_int64},
+ {idx: 22, exp: le, fn: le_32768_int64},
+ {idx: 22, exp: gt, fn: gt_32768_int64},
+ {idx: 22, exp: ge, fn: ge_32768_int64},
+ {idx: 22, exp: eq, fn: eq_32768_int64},
+ {idx: 22, exp: ne, fn: ne_32768_int64},
+ {idx: 23, exp: lt, fn: lt_65534_int64},
+ {idx: 23, exp: le, fn: le_65534_int64},
+ {idx: 23, exp: gt, fn: gt_65534_int64},
+ {idx: 23, exp: ge, fn: ge_65534_int64},
+ {idx: 23, exp: eq, fn: eq_65534_int64},
+ {idx: 23, exp: ne, fn: ne_65534_int64},
+ {idx: 24, exp: lt, fn: lt_65535_int64},
+ {idx: 24, exp: le, fn: le_65535_int64},
+ {idx: 24, exp: gt, fn: gt_65535_int64},
+ {idx: 24, exp: ge, fn: ge_65535_int64},
+ {idx: 24, exp: eq, fn: eq_65535_int64},
+ {idx: 24, exp: ne, fn: ne_65535_int64},
+ {idx: 25, exp: lt, fn: lt_65536_int64},
+ {idx: 25, exp: le, fn: le_65536_int64},
+ {idx: 25, exp: gt, fn: gt_65536_int64},
+ {idx: 25, exp: ge, fn: ge_65536_int64},
+ {idx: 25, exp: eq, fn: eq_65536_int64},
+ {idx: 25, exp: ne, fn: ne_65536_int64},
+ {idx: 26, exp: lt, fn: lt_2147483646_int64},
+ {idx: 26, exp: le, fn: le_2147483646_int64},
+ {idx: 26, exp: gt, fn: gt_2147483646_int64},
+ {idx: 26, exp: ge, fn: ge_2147483646_int64},
+ {idx: 26, exp: eq, fn: eq_2147483646_int64},
+ {idx: 26, exp: ne, fn: ne_2147483646_int64},
+ {idx: 27, exp: lt, fn: lt_2147483647_int64},
+ {idx: 27, exp: le, fn: le_2147483647_int64},
+ {idx: 27, exp: gt, fn: gt_2147483647_int64},
+ {idx: 27, exp: ge, fn: ge_2147483647_int64},
+ {idx: 27, exp: eq, fn: eq_2147483647_int64},
+ {idx: 27, exp: ne, fn: ne_2147483647_int64},
+ {idx: 28, exp: lt, fn: lt_2147483648_int64},
+ {idx: 28, exp: le, fn: le_2147483648_int64},
+ {idx: 28, exp: gt, fn: gt_2147483648_int64},
+ {idx: 28, exp: ge, fn: ge_2147483648_int64},
+ {idx: 28, exp: eq, fn: eq_2147483648_int64},
+ {idx: 28, exp: ne, fn: ne_2147483648_int64},
+ {idx: 29, exp: lt, fn: lt_4278190080_int64},
+ {idx: 29, exp: le, fn: le_4278190080_int64},
+ {idx: 29, exp: gt, fn: gt_4278190080_int64},
+ {idx: 29, exp: ge, fn: ge_4278190080_int64},
+ {idx: 29, exp: eq, fn: eq_4278190080_int64},
+ {idx: 29, exp: ne, fn: ne_4278190080_int64},
+ {idx: 30, exp: lt, fn: lt_4294967294_int64},
+ {idx: 30, exp: le, fn: le_4294967294_int64},
+ {idx: 30, exp: gt, fn: gt_4294967294_int64},
+ {idx: 30, exp: ge, fn: ge_4294967294_int64},
+ {idx: 30, exp: eq, fn: eq_4294967294_int64},
+ {idx: 30, exp: ne, fn: ne_4294967294_int64},
+ {idx: 31, exp: lt, fn: lt_4294967295_int64},
+ {idx: 31, exp: le, fn: le_4294967295_int64},
+ {idx: 31, exp: gt, fn: gt_4294967295_int64},
+ {idx: 31, exp: ge, fn: ge_4294967295_int64},
+ {idx: 31, exp: eq, fn: eq_4294967295_int64},
+ {idx: 31, exp: ne, fn: ne_4294967295_int64},
+ {idx: 32, exp: lt, fn: lt_4294967296_int64},
+ {idx: 32, exp: le, fn: le_4294967296_int64},
+ {idx: 32, exp: gt, fn: gt_4294967296_int64},
+ {idx: 32, exp: ge, fn: ge_4294967296_int64},
+ {idx: 32, exp: eq, fn: eq_4294967296_int64},
+ {idx: 32, exp: ne, fn: ne_4294967296_int64},
+ {idx: 33, exp: lt, fn: lt_1095216660480_int64},
+ {idx: 33, exp: le, fn: le_1095216660480_int64},
+ {idx: 33, exp: gt, fn: gt_1095216660480_int64},
+ {idx: 33, exp: ge, fn: ge_1095216660480_int64},
+ {idx: 33, exp: eq, fn: eq_1095216660480_int64},
+ {idx: 33, exp: ne, fn: ne_1095216660480_int64},
+ {idx: 34, exp: lt, fn: lt_9223372036854775806_int64},
+ {idx: 34, exp: le, fn: le_9223372036854775806_int64},
+ {idx: 34, exp: gt, fn: gt_9223372036854775806_int64},
+ {idx: 34, exp: ge, fn: ge_9223372036854775806_int64},
+ {idx: 34, exp: eq, fn: eq_9223372036854775806_int64},
+ {idx: 34, exp: ne, fn: ne_9223372036854775806_int64},
+ {idx: 35, exp: lt, fn: lt_9223372036854775807_int64},
+ {idx: 35, exp: le, fn: le_9223372036854775807_int64},
+ {idx: 35, exp: gt, fn: gt_9223372036854775807_int64},
+ {idx: 35, exp: ge, fn: ge_9223372036854775807_int64},
+ {idx: 35, exp: eq, fn: eq_9223372036854775807_int64},
+ {idx: 35, exp: ne, fn: ne_9223372036854775807_int64},
+}
+
+// int32 tests
+var int32_vals = []int32{
+ -2147483648,
+ -2147483647,
+ -32769,
+ -32768,
+ -32767,
+ -129,
+ -128,
+ -127,
+ -1,
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+ 65536,
+ 2147483646,
+ 2147483647,
+}
+
+func lt_neg2147483648_int32(x int32) bool { return x < -2147483648 }
+func le_neg2147483648_int32(x int32) bool { return x <= -2147483648 }
+func gt_neg2147483648_int32(x int32) bool { return x > -2147483648 }
+func ge_neg2147483648_int32(x int32) bool { return x >= -2147483648 }
+func eq_neg2147483648_int32(x int32) bool { return x == -2147483648 }
+func ne_neg2147483648_int32(x int32) bool { return x != -2147483648 }
+func lt_neg2147483647_int32(x int32) bool { return x < -2147483647 }
+func le_neg2147483647_int32(x int32) bool { return x <= -2147483647 }
+func gt_neg2147483647_int32(x int32) bool { return x > -2147483647 }
+func ge_neg2147483647_int32(x int32) bool { return x >= -2147483647 }
+func eq_neg2147483647_int32(x int32) bool { return x == -2147483647 }
+func ne_neg2147483647_int32(x int32) bool { return x != -2147483647 }
+func lt_neg32769_int32(x int32) bool { return x < -32769 }
+func le_neg32769_int32(x int32) bool { return x <= -32769 }
+func gt_neg32769_int32(x int32) bool { return x > -32769 }
+func ge_neg32769_int32(x int32) bool { return x >= -32769 }
+func eq_neg32769_int32(x int32) bool { return x == -32769 }
+func ne_neg32769_int32(x int32) bool { return x != -32769 }
+func lt_neg32768_int32(x int32) bool { return x < -32768 }
+func le_neg32768_int32(x int32) bool { return x <= -32768 }
+func gt_neg32768_int32(x int32) bool { return x > -32768 }
+func ge_neg32768_int32(x int32) bool { return x >= -32768 }
+func eq_neg32768_int32(x int32) bool { return x == -32768 }
+func ne_neg32768_int32(x int32) bool { return x != -32768 }
+func lt_neg32767_int32(x int32) bool { return x < -32767 }
+func le_neg32767_int32(x int32) bool { return x <= -32767 }
+func gt_neg32767_int32(x int32) bool { return x > -32767 }
+func ge_neg32767_int32(x int32) bool { return x >= -32767 }
+func eq_neg32767_int32(x int32) bool { return x == -32767 }
+func ne_neg32767_int32(x int32) bool { return x != -32767 }
+func lt_neg129_int32(x int32) bool { return x < -129 }
+func le_neg129_int32(x int32) bool { return x <= -129 }
+func gt_neg129_int32(x int32) bool { return x > -129 }
+func ge_neg129_int32(x int32) bool { return x >= -129 }
+func eq_neg129_int32(x int32) bool { return x == -129 }
+func ne_neg129_int32(x int32) bool { return x != -129 }
+func lt_neg128_int32(x int32) bool { return x < -128 }
+func le_neg128_int32(x int32) bool { return x <= -128 }
+func gt_neg128_int32(x int32) bool { return x > -128 }
+func ge_neg128_int32(x int32) bool { return x >= -128 }
+func eq_neg128_int32(x int32) bool { return x == -128 }
+func ne_neg128_int32(x int32) bool { return x != -128 }
+func lt_neg127_int32(x int32) bool { return x < -127 }
+func le_neg127_int32(x int32) bool { return x <= -127 }
+func gt_neg127_int32(x int32) bool { return x > -127 }
+func ge_neg127_int32(x int32) bool { return x >= -127 }
+func eq_neg127_int32(x int32) bool { return x == -127 }
+func ne_neg127_int32(x int32) bool { return x != -127 }
+func lt_neg1_int32(x int32) bool { return x < -1 }
+func le_neg1_int32(x int32) bool { return x <= -1 }
+func gt_neg1_int32(x int32) bool { return x > -1 }
+func ge_neg1_int32(x int32) bool { return x >= -1 }
+func eq_neg1_int32(x int32) bool { return x == -1 }
+func ne_neg1_int32(x int32) bool { return x != -1 }
+func lt_0_int32(x int32) bool { return x < 0 }
+func le_0_int32(x int32) bool { return x <= 0 }
+func gt_0_int32(x int32) bool { return x > 0 }
+func ge_0_int32(x int32) bool { return x >= 0 }
+func eq_0_int32(x int32) bool { return x == 0 }
+func ne_0_int32(x int32) bool { return x != 0 }
+func lt_1_int32(x int32) bool { return x < 1 }
+func le_1_int32(x int32) bool { return x <= 1 }
+func gt_1_int32(x int32) bool { return x > 1 }
+func ge_1_int32(x int32) bool { return x >= 1 }
+func eq_1_int32(x int32) bool { return x == 1 }
+func ne_1_int32(x int32) bool { return x != 1 }
+func lt_126_int32(x int32) bool { return x < 126 }
+func le_126_int32(x int32) bool { return x <= 126 }
+func gt_126_int32(x int32) bool { return x > 126 }
+func ge_126_int32(x int32) bool { return x >= 126 }
+func eq_126_int32(x int32) bool { return x == 126 }
+func ne_126_int32(x int32) bool { return x != 126 }
+func lt_127_int32(x int32) bool { return x < 127 }
+func le_127_int32(x int32) bool { return x <= 127 }
+func gt_127_int32(x int32) bool { return x > 127 }
+func ge_127_int32(x int32) bool { return x >= 127 }
+func eq_127_int32(x int32) bool { return x == 127 }
+func ne_127_int32(x int32) bool { return x != 127 }
+func lt_128_int32(x int32) bool { return x < 128 }
+func le_128_int32(x int32) bool { return x <= 128 }
+func gt_128_int32(x int32) bool { return x > 128 }
+func ge_128_int32(x int32) bool { return x >= 128 }
+func eq_128_int32(x int32) bool { return x == 128 }
+func ne_128_int32(x int32) bool { return x != 128 }
+func lt_254_int32(x int32) bool { return x < 254 }
+func le_254_int32(x int32) bool { return x <= 254 }
+func gt_254_int32(x int32) bool { return x > 254 }
+func ge_254_int32(x int32) bool { return x >= 254 }
+func eq_254_int32(x int32) bool { return x == 254 }
+func ne_254_int32(x int32) bool { return x != 254 }
+func lt_255_int32(x int32) bool { return x < 255 }
+func le_255_int32(x int32) bool { return x <= 255 }
+func gt_255_int32(x int32) bool { return x > 255 }
+func ge_255_int32(x int32) bool { return x >= 255 }
+func eq_255_int32(x int32) bool { return x == 255 }
+func ne_255_int32(x int32) bool { return x != 255 }
+func lt_256_int32(x int32) bool { return x < 256 }
+func le_256_int32(x int32) bool { return x <= 256 }
+func gt_256_int32(x int32) bool { return x > 256 }
+func ge_256_int32(x int32) bool { return x >= 256 }
+func eq_256_int32(x int32) bool { return x == 256 }
+func ne_256_int32(x int32) bool { return x != 256 }
+func lt_32766_int32(x int32) bool { return x < 32766 }
+func le_32766_int32(x int32) bool { return x <= 32766 }
+func gt_32766_int32(x int32) bool { return x > 32766 }
+func ge_32766_int32(x int32) bool { return x >= 32766 }
+func eq_32766_int32(x int32) bool { return x == 32766 }
+func ne_32766_int32(x int32) bool { return x != 32766 }
+func lt_32767_int32(x int32) bool { return x < 32767 }
+func le_32767_int32(x int32) bool { return x <= 32767 }
+func gt_32767_int32(x int32) bool { return x > 32767 }
+func ge_32767_int32(x int32) bool { return x >= 32767 }
+func eq_32767_int32(x int32) bool { return x == 32767 }
+func ne_32767_int32(x int32) bool { return x != 32767 }
+func lt_32768_int32(x int32) bool { return x < 32768 }
+func le_32768_int32(x int32) bool { return x <= 32768 }
+func gt_32768_int32(x int32) bool { return x > 32768 }
+func ge_32768_int32(x int32) bool { return x >= 32768 }
+func eq_32768_int32(x int32) bool { return x == 32768 }
+func ne_32768_int32(x int32) bool { return x != 32768 }
+func lt_65534_int32(x int32) bool { return x < 65534 }
+func le_65534_int32(x int32) bool { return x <= 65534 }
+func gt_65534_int32(x int32) bool { return x > 65534 }
+func ge_65534_int32(x int32) bool { return x >= 65534 }
+func eq_65534_int32(x int32) bool { return x == 65534 }
+func ne_65534_int32(x int32) bool { return x != 65534 }
+func lt_65535_int32(x int32) bool { return x < 65535 }
+func le_65535_int32(x int32) bool { return x <= 65535 }
+func gt_65535_int32(x int32) bool { return x > 65535 }
+func ge_65535_int32(x int32) bool { return x >= 65535 }
+func eq_65535_int32(x int32) bool { return x == 65535 }
+func ne_65535_int32(x int32) bool { return x != 65535 }
+func lt_65536_int32(x int32) bool { return x < 65536 }
+func le_65536_int32(x int32) bool { return x <= 65536 }
+func gt_65536_int32(x int32) bool { return x > 65536 }
+func ge_65536_int32(x int32) bool { return x >= 65536 }
+func eq_65536_int32(x int32) bool { return x == 65536 }
+func ne_65536_int32(x int32) bool { return x != 65536 }
+func lt_2147483646_int32(x int32) bool { return x < 2147483646 }
+func le_2147483646_int32(x int32) bool { return x <= 2147483646 }
+func gt_2147483646_int32(x int32) bool { return x > 2147483646 }
+func ge_2147483646_int32(x int32) bool { return x >= 2147483646 }
+func eq_2147483646_int32(x int32) bool { return x == 2147483646 }
+func ne_2147483646_int32(x int32) bool { return x != 2147483646 }
+func lt_2147483647_int32(x int32) bool { return x < 2147483647 }
+func le_2147483647_int32(x int32) bool { return x <= 2147483647 }
+func gt_2147483647_int32(x int32) bool { return x > 2147483647 }
+func ge_2147483647_int32(x int32) bool { return x >= 2147483647 }
+func eq_2147483647_int32(x int32) bool { return x == 2147483647 }
+func ne_2147483647_int32(x int32) bool { return x != 2147483647 }
+
+var int32_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(int32) bool
+}{
+ {idx: 0, exp: lt, fn: lt_neg2147483648_int32},
+ {idx: 0, exp: le, fn: le_neg2147483648_int32},
+ {idx: 0, exp: gt, fn: gt_neg2147483648_int32},
+ {idx: 0, exp: ge, fn: ge_neg2147483648_int32},
+ {idx: 0, exp: eq, fn: eq_neg2147483648_int32},
+ {idx: 0, exp: ne, fn: ne_neg2147483648_int32},
+ {idx: 1, exp: lt, fn: lt_neg2147483647_int32},
+ {idx: 1, exp: le, fn: le_neg2147483647_int32},
+ {idx: 1, exp: gt, fn: gt_neg2147483647_int32},
+ {idx: 1, exp: ge, fn: ge_neg2147483647_int32},
+ {idx: 1, exp: eq, fn: eq_neg2147483647_int32},
+ {idx: 1, exp: ne, fn: ne_neg2147483647_int32},
+ {idx: 2, exp: lt, fn: lt_neg32769_int32},
+ {idx: 2, exp: le, fn: le_neg32769_int32},
+ {idx: 2, exp: gt, fn: gt_neg32769_int32},
+ {idx: 2, exp: ge, fn: ge_neg32769_int32},
+ {idx: 2, exp: eq, fn: eq_neg32769_int32},
+ {idx: 2, exp: ne, fn: ne_neg32769_int32},
+ {idx: 3, exp: lt, fn: lt_neg32768_int32},
+ {idx: 3, exp: le, fn: le_neg32768_int32},
+ {idx: 3, exp: gt, fn: gt_neg32768_int32},
+ {idx: 3, exp: ge, fn: ge_neg32768_int32},
+ {idx: 3, exp: eq, fn: eq_neg32768_int32},
+ {idx: 3, exp: ne, fn: ne_neg32768_int32},
+ {idx: 4, exp: lt, fn: lt_neg32767_int32},
+ {idx: 4, exp: le, fn: le_neg32767_int32},
+ {idx: 4, exp: gt, fn: gt_neg32767_int32},
+ {idx: 4, exp: ge, fn: ge_neg32767_int32},
+ {idx: 4, exp: eq, fn: eq_neg32767_int32},
+ {idx: 4, exp: ne, fn: ne_neg32767_int32},
+ {idx: 5, exp: lt, fn: lt_neg129_int32},
+ {idx: 5, exp: le, fn: le_neg129_int32},
+ {idx: 5, exp: gt, fn: gt_neg129_int32},
+ {idx: 5, exp: ge, fn: ge_neg129_int32},
+ {idx: 5, exp: eq, fn: eq_neg129_int32},
+ {idx: 5, exp: ne, fn: ne_neg129_int32},
+ {idx: 6, exp: lt, fn: lt_neg128_int32},
+ {idx: 6, exp: le, fn: le_neg128_int32},
+ {idx: 6, exp: gt, fn: gt_neg128_int32},
+ {idx: 6, exp: ge, fn: ge_neg128_int32},
+ {idx: 6, exp: eq, fn: eq_neg128_int32},
+ {idx: 6, exp: ne, fn: ne_neg128_int32},
+ {idx: 7, exp: lt, fn: lt_neg127_int32},
+ {idx: 7, exp: le, fn: le_neg127_int32},
+ {idx: 7, exp: gt, fn: gt_neg127_int32},
+ {idx: 7, exp: ge, fn: ge_neg127_int32},
+ {idx: 7, exp: eq, fn: eq_neg127_int32},
+ {idx: 7, exp: ne, fn: ne_neg127_int32},
+ {idx: 8, exp: lt, fn: lt_neg1_int32},
+ {idx: 8, exp: le, fn: le_neg1_int32},
+ {idx: 8, exp: gt, fn: gt_neg1_int32},
+ {idx: 8, exp: ge, fn: ge_neg1_int32},
+ {idx: 8, exp: eq, fn: eq_neg1_int32},
+ {idx: 8, exp: ne, fn: ne_neg1_int32},
+ {idx: 9, exp: lt, fn: lt_0_int32},
+ {idx: 9, exp: le, fn: le_0_int32},
+ {idx: 9, exp: gt, fn: gt_0_int32},
+ {idx: 9, exp: ge, fn: ge_0_int32},
+ {idx: 9, exp: eq, fn: eq_0_int32},
+ {idx: 9, exp: ne, fn: ne_0_int32},
+ {idx: 10, exp: lt, fn: lt_1_int32},
+ {idx: 10, exp: le, fn: le_1_int32},
+ {idx: 10, exp: gt, fn: gt_1_int32},
+ {idx: 10, exp: ge, fn: ge_1_int32},
+ {idx: 10, exp: eq, fn: eq_1_int32},
+ {idx: 10, exp: ne, fn: ne_1_int32},
+ {idx: 11, exp: lt, fn: lt_126_int32},
+ {idx: 11, exp: le, fn: le_126_int32},
+ {idx: 11, exp: gt, fn: gt_126_int32},
+ {idx: 11, exp: ge, fn: ge_126_int32},
+ {idx: 11, exp: eq, fn: eq_126_int32},
+ {idx: 11, exp: ne, fn: ne_126_int32},
+ {idx: 12, exp: lt, fn: lt_127_int32},
+ {idx: 12, exp: le, fn: le_127_int32},
+ {idx: 12, exp: gt, fn: gt_127_int32},
+ {idx: 12, exp: ge, fn: ge_127_int32},
+ {idx: 12, exp: eq, fn: eq_127_int32},
+ {idx: 12, exp: ne, fn: ne_127_int32},
+ {idx: 13, exp: lt, fn: lt_128_int32},
+ {idx: 13, exp: le, fn: le_128_int32},
+ {idx: 13, exp: gt, fn: gt_128_int32},
+ {idx: 13, exp: ge, fn: ge_128_int32},
+ {idx: 13, exp: eq, fn: eq_128_int32},
+ {idx: 13, exp: ne, fn: ne_128_int32},
+ {idx: 14, exp: lt, fn: lt_254_int32},
+ {idx: 14, exp: le, fn: le_254_int32},
+ {idx: 14, exp: gt, fn: gt_254_int32},
+ {idx: 14, exp: ge, fn: ge_254_int32},
+ {idx: 14, exp: eq, fn: eq_254_int32},
+ {idx: 14, exp: ne, fn: ne_254_int32},
+ {idx: 15, exp: lt, fn: lt_255_int32},
+ {idx: 15, exp: le, fn: le_255_int32},
+ {idx: 15, exp: gt, fn: gt_255_int32},
+ {idx: 15, exp: ge, fn: ge_255_int32},
+ {idx: 15, exp: eq, fn: eq_255_int32},
+ {idx: 15, exp: ne, fn: ne_255_int32},
+ {idx: 16, exp: lt, fn: lt_256_int32},
+ {idx: 16, exp: le, fn: le_256_int32},
+ {idx: 16, exp: gt, fn: gt_256_int32},
+ {idx: 16, exp: ge, fn: ge_256_int32},
+ {idx: 16, exp: eq, fn: eq_256_int32},
+ {idx: 16, exp: ne, fn: ne_256_int32},
+ {idx: 17, exp: lt, fn: lt_32766_int32},
+ {idx: 17, exp: le, fn: le_32766_int32},
+ {idx: 17, exp: gt, fn: gt_32766_int32},
+ {idx: 17, exp: ge, fn: ge_32766_int32},
+ {idx: 17, exp: eq, fn: eq_32766_int32},
+ {idx: 17, exp: ne, fn: ne_32766_int32},
+ {idx: 18, exp: lt, fn: lt_32767_int32},
+ {idx: 18, exp: le, fn: le_32767_int32},
+ {idx: 18, exp: gt, fn: gt_32767_int32},
+ {idx: 18, exp: ge, fn: ge_32767_int32},
+ {idx: 18, exp: eq, fn: eq_32767_int32},
+ {idx: 18, exp: ne, fn: ne_32767_int32},
+ {idx: 19, exp: lt, fn: lt_32768_int32},
+ {idx: 19, exp: le, fn: le_32768_int32},
+ {idx: 19, exp: gt, fn: gt_32768_int32},
+ {idx: 19, exp: ge, fn: ge_32768_int32},
+ {idx: 19, exp: eq, fn: eq_32768_int32},
+ {idx: 19, exp: ne, fn: ne_32768_int32},
+ {idx: 20, exp: lt, fn: lt_65534_int32},
+ {idx: 20, exp: le, fn: le_65534_int32},
+ {idx: 20, exp: gt, fn: gt_65534_int32},
+ {idx: 20, exp: ge, fn: ge_65534_int32},
+ {idx: 20, exp: eq, fn: eq_65534_int32},
+ {idx: 20, exp: ne, fn: ne_65534_int32},
+ {idx: 21, exp: lt, fn: lt_65535_int32},
+ {idx: 21, exp: le, fn: le_65535_int32},
+ {idx: 21, exp: gt, fn: gt_65535_int32},
+ {idx: 21, exp: ge, fn: ge_65535_int32},
+ {idx: 21, exp: eq, fn: eq_65535_int32},
+ {idx: 21, exp: ne, fn: ne_65535_int32},
+ {idx: 22, exp: lt, fn: lt_65536_int32},
+ {idx: 22, exp: le, fn: le_65536_int32},
+ {idx: 22, exp: gt, fn: gt_65536_int32},
+ {idx: 22, exp: ge, fn: ge_65536_int32},
+ {idx: 22, exp: eq, fn: eq_65536_int32},
+ {idx: 22, exp: ne, fn: ne_65536_int32},
+ {idx: 23, exp: lt, fn: lt_2147483646_int32},
+ {idx: 23, exp: le, fn: le_2147483646_int32},
+ {idx: 23, exp: gt, fn: gt_2147483646_int32},
+ {idx: 23, exp: ge, fn: ge_2147483646_int32},
+ {idx: 23, exp: eq, fn: eq_2147483646_int32},
+ {idx: 23, exp: ne, fn: ne_2147483646_int32},
+ {idx: 24, exp: lt, fn: lt_2147483647_int32},
+ {idx: 24, exp: le, fn: le_2147483647_int32},
+ {idx: 24, exp: gt, fn: gt_2147483647_int32},
+ {idx: 24, exp: ge, fn: ge_2147483647_int32},
+ {idx: 24, exp: eq, fn: eq_2147483647_int32},
+ {idx: 24, exp: ne, fn: ne_2147483647_int32},
+}
+
+// int16 tests
+var int16_vals = []int16{
+ -32768,
+ -32767,
+ -129,
+ -128,
+ -127,
+ -1,
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+}
+
+func lt_neg32768_int16(x int16) bool { return x < -32768 }
+func le_neg32768_int16(x int16) bool { return x <= -32768 }
+func gt_neg32768_int16(x int16) bool { return x > -32768 }
+func ge_neg32768_int16(x int16) bool { return x >= -32768 }
+func eq_neg32768_int16(x int16) bool { return x == -32768 }
+func ne_neg32768_int16(x int16) bool { return x != -32768 }
+func lt_neg32767_int16(x int16) bool { return x < -32767 }
+func le_neg32767_int16(x int16) bool { return x <= -32767 }
+func gt_neg32767_int16(x int16) bool { return x > -32767 }
+func ge_neg32767_int16(x int16) bool { return x >= -32767 }
+func eq_neg32767_int16(x int16) bool { return x == -32767 }
+func ne_neg32767_int16(x int16) bool { return x != -32767 }
+func lt_neg129_int16(x int16) bool { return x < -129 }
+func le_neg129_int16(x int16) bool { return x <= -129 }
+func gt_neg129_int16(x int16) bool { return x > -129 }
+func ge_neg129_int16(x int16) bool { return x >= -129 }
+func eq_neg129_int16(x int16) bool { return x == -129 }
+func ne_neg129_int16(x int16) bool { return x != -129 }
+func lt_neg128_int16(x int16) bool { return x < -128 }
+func le_neg128_int16(x int16) bool { return x <= -128 }
+func gt_neg128_int16(x int16) bool { return x > -128 }
+func ge_neg128_int16(x int16) bool { return x >= -128 }
+func eq_neg128_int16(x int16) bool { return x == -128 }
+func ne_neg128_int16(x int16) bool { return x != -128 }
+func lt_neg127_int16(x int16) bool { return x < -127 }
+func le_neg127_int16(x int16) bool { return x <= -127 }
+func gt_neg127_int16(x int16) bool { return x > -127 }
+func ge_neg127_int16(x int16) bool { return x >= -127 }
+func eq_neg127_int16(x int16) bool { return x == -127 }
+func ne_neg127_int16(x int16) bool { return x != -127 }
+func lt_neg1_int16(x int16) bool { return x < -1 }
+func le_neg1_int16(x int16) bool { return x <= -1 }
+func gt_neg1_int16(x int16) bool { return x > -1 }
+func ge_neg1_int16(x int16) bool { return x >= -1 }
+func eq_neg1_int16(x int16) bool { return x == -1 }
+func ne_neg1_int16(x int16) bool { return x != -1 }
+func lt_0_int16(x int16) bool { return x < 0 }
+func le_0_int16(x int16) bool { return x <= 0 }
+func gt_0_int16(x int16) bool { return x > 0 }
+func ge_0_int16(x int16) bool { return x >= 0 }
+func eq_0_int16(x int16) bool { return x == 0 }
+func ne_0_int16(x int16) bool { return x != 0 }
+func lt_1_int16(x int16) bool { return x < 1 }
+func le_1_int16(x int16) bool { return x <= 1 }
+func gt_1_int16(x int16) bool { return x > 1 }
+func ge_1_int16(x int16) bool { return x >= 1 }
+func eq_1_int16(x int16) bool { return x == 1 }
+func ne_1_int16(x int16) bool { return x != 1 }
+func lt_126_int16(x int16) bool { return x < 126 }
+func le_126_int16(x int16) bool { return x <= 126 }
+func gt_126_int16(x int16) bool { return x > 126 }
+func ge_126_int16(x int16) bool { return x >= 126 }
+func eq_126_int16(x int16) bool { return x == 126 }
+func ne_126_int16(x int16) bool { return x != 126 }
+func lt_127_int16(x int16) bool { return x < 127 }
+func le_127_int16(x int16) bool { return x <= 127 }
+func gt_127_int16(x int16) bool { return x > 127 }
+func ge_127_int16(x int16) bool { return x >= 127 }
+func eq_127_int16(x int16) bool { return x == 127 }
+func ne_127_int16(x int16) bool { return x != 127 }
+func lt_128_int16(x int16) bool { return x < 128 }
+func le_128_int16(x int16) bool { return x <= 128 }
+func gt_128_int16(x int16) bool { return x > 128 }
+func ge_128_int16(x int16) bool { return x >= 128 }
+func eq_128_int16(x int16) bool { return x == 128 }
+func ne_128_int16(x int16) bool { return x != 128 }
+func lt_254_int16(x int16) bool { return x < 254 }
+func le_254_int16(x int16) bool { return x <= 254 }
+func gt_254_int16(x int16) bool { return x > 254 }
+func ge_254_int16(x int16) bool { return x >= 254 }
+func eq_254_int16(x int16) bool { return x == 254 }
+func ne_254_int16(x int16) bool { return x != 254 }
+func lt_255_int16(x int16) bool { return x < 255 }
+func le_255_int16(x int16) bool { return x <= 255 }
+func gt_255_int16(x int16) bool { return x > 255 }
+func ge_255_int16(x int16) bool { return x >= 255 }
+func eq_255_int16(x int16) bool { return x == 255 }
+func ne_255_int16(x int16) bool { return x != 255 }
+func lt_256_int16(x int16) bool { return x < 256 }
+func le_256_int16(x int16) bool { return x <= 256 }
+func gt_256_int16(x int16) bool { return x > 256 }
+func ge_256_int16(x int16) bool { return x >= 256 }
+func eq_256_int16(x int16) bool { return x == 256 }
+func ne_256_int16(x int16) bool { return x != 256 }
+func lt_32766_int16(x int16) bool { return x < 32766 }
+func le_32766_int16(x int16) bool { return x <= 32766 }
+func gt_32766_int16(x int16) bool { return x > 32766 }
+func ge_32766_int16(x int16) bool { return x >= 32766 }
+func eq_32766_int16(x int16) bool { return x == 32766 }
+func ne_32766_int16(x int16) bool { return x != 32766 }
+func lt_32767_int16(x int16) bool { return x < 32767 }
+func le_32767_int16(x int16) bool { return x <= 32767 }
+func gt_32767_int16(x int16) bool { return x > 32767 }
+func ge_32767_int16(x int16) bool { return x >= 32767 }
+func eq_32767_int16(x int16) bool { return x == 32767 }
+func ne_32767_int16(x int16) bool { return x != 32767 }
+
+var int16_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(int16) bool
+}{
+ {idx: 0, exp: lt, fn: lt_neg32768_int16},
+ {idx: 0, exp: le, fn: le_neg32768_int16},
+ {idx: 0, exp: gt, fn: gt_neg32768_int16},
+ {idx: 0, exp: ge, fn: ge_neg32768_int16},
+ {idx: 0, exp: eq, fn: eq_neg32768_int16},
+ {idx: 0, exp: ne, fn: ne_neg32768_int16},
+ {idx: 1, exp: lt, fn: lt_neg32767_int16},
+ {idx: 1, exp: le, fn: le_neg32767_int16},
+ {idx: 1, exp: gt, fn: gt_neg32767_int16},
+ {idx: 1, exp: ge, fn: ge_neg32767_int16},
+ {idx: 1, exp: eq, fn: eq_neg32767_int16},
+ {idx: 1, exp: ne, fn: ne_neg32767_int16},
+ {idx: 2, exp: lt, fn: lt_neg129_int16},
+ {idx: 2, exp: le, fn: le_neg129_int16},
+ {idx: 2, exp: gt, fn: gt_neg129_int16},
+ {idx: 2, exp: ge, fn: ge_neg129_int16},
+ {idx: 2, exp: eq, fn: eq_neg129_int16},
+ {idx: 2, exp: ne, fn: ne_neg129_int16},
+ {idx: 3, exp: lt, fn: lt_neg128_int16},
+ {idx: 3, exp: le, fn: le_neg128_int16},
+ {idx: 3, exp: gt, fn: gt_neg128_int16},
+ {idx: 3, exp: ge, fn: ge_neg128_int16},
+ {idx: 3, exp: eq, fn: eq_neg128_int16},
+ {idx: 3, exp: ne, fn: ne_neg128_int16},
+ {idx: 4, exp: lt, fn: lt_neg127_int16},
+ {idx: 4, exp: le, fn: le_neg127_int16},
+ {idx: 4, exp: gt, fn: gt_neg127_int16},
+ {idx: 4, exp: ge, fn: ge_neg127_int16},
+ {idx: 4, exp: eq, fn: eq_neg127_int16},
+ {idx: 4, exp: ne, fn: ne_neg127_int16},
+ {idx: 5, exp: lt, fn: lt_neg1_int16},
+ {idx: 5, exp: le, fn: le_neg1_int16},
+ {idx: 5, exp: gt, fn: gt_neg1_int16},
+ {idx: 5, exp: ge, fn: ge_neg1_int16},
+ {idx: 5, exp: eq, fn: eq_neg1_int16},
+ {idx: 5, exp: ne, fn: ne_neg1_int16},
+ {idx: 6, exp: lt, fn: lt_0_int16},
+ {idx: 6, exp: le, fn: le_0_int16},
+ {idx: 6, exp: gt, fn: gt_0_int16},
+ {idx: 6, exp: ge, fn: ge_0_int16},
+ {idx: 6, exp: eq, fn: eq_0_int16},
+ {idx: 6, exp: ne, fn: ne_0_int16},
+ {idx: 7, exp: lt, fn: lt_1_int16},
+ {idx: 7, exp: le, fn: le_1_int16},
+ {idx: 7, exp: gt, fn: gt_1_int16},
+ {idx: 7, exp: ge, fn: ge_1_int16},
+ {idx: 7, exp: eq, fn: eq_1_int16},
+ {idx: 7, exp: ne, fn: ne_1_int16},
+ {idx: 8, exp: lt, fn: lt_126_int16},
+ {idx: 8, exp: le, fn: le_126_int16},
+ {idx: 8, exp: gt, fn: gt_126_int16},
+ {idx: 8, exp: ge, fn: ge_126_int16},
+ {idx: 8, exp: eq, fn: eq_126_int16},
+ {idx: 8, exp: ne, fn: ne_126_int16},
+ {idx: 9, exp: lt, fn: lt_127_int16},
+ {idx: 9, exp: le, fn: le_127_int16},
+ {idx: 9, exp: gt, fn: gt_127_int16},
+ {idx: 9, exp: ge, fn: ge_127_int16},
+ {idx: 9, exp: eq, fn: eq_127_int16},
+ {idx: 9, exp: ne, fn: ne_127_int16},
+ {idx: 10, exp: lt, fn: lt_128_int16},
+ {idx: 10, exp: le, fn: le_128_int16},
+ {idx: 10, exp: gt, fn: gt_128_int16},
+ {idx: 10, exp: ge, fn: ge_128_int16},
+ {idx: 10, exp: eq, fn: eq_128_int16},
+ {idx: 10, exp: ne, fn: ne_128_int16},
+ {idx: 11, exp: lt, fn: lt_254_int16},
+ {idx: 11, exp: le, fn: le_254_int16},
+ {idx: 11, exp: gt, fn: gt_254_int16},
+ {idx: 11, exp: ge, fn: ge_254_int16},
+ {idx: 11, exp: eq, fn: eq_254_int16},
+ {idx: 11, exp: ne, fn: ne_254_int16},
+ {idx: 12, exp: lt, fn: lt_255_int16},
+ {idx: 12, exp: le, fn: le_255_int16},
+ {idx: 12, exp: gt, fn: gt_255_int16},
+ {idx: 12, exp: ge, fn: ge_255_int16},
+ {idx: 12, exp: eq, fn: eq_255_int16},
+ {idx: 12, exp: ne, fn: ne_255_int16},
+ {idx: 13, exp: lt, fn: lt_256_int16},
+ {idx: 13, exp: le, fn: le_256_int16},
+ {idx: 13, exp: gt, fn: gt_256_int16},
+ {idx: 13, exp: ge, fn: ge_256_int16},
+ {idx: 13, exp: eq, fn: eq_256_int16},
+ {idx: 13, exp: ne, fn: ne_256_int16},
+ {idx: 14, exp: lt, fn: lt_32766_int16},
+ {idx: 14, exp: le, fn: le_32766_int16},
+ {idx: 14, exp: gt, fn: gt_32766_int16},
+ {idx: 14, exp: ge, fn: ge_32766_int16},
+ {idx: 14, exp: eq, fn: eq_32766_int16},
+ {idx: 14, exp: ne, fn: ne_32766_int16},
+ {idx: 15, exp: lt, fn: lt_32767_int16},
+ {idx: 15, exp: le, fn: le_32767_int16},
+ {idx: 15, exp: gt, fn: gt_32767_int16},
+ {idx: 15, exp: ge, fn: ge_32767_int16},
+ {idx: 15, exp: eq, fn: eq_32767_int16},
+ {idx: 15, exp: ne, fn: ne_32767_int16},
+}
+
+// int8 tests
+var int8_vals = []int8{
+ -128,
+ -127,
+ -1,
+ 0,
+ 1,
+ 126,
+ 127,
+}
+
+func lt_neg128_int8(x int8) bool { return x < -128 }
+func le_neg128_int8(x int8) bool { return x <= -128 }
+func gt_neg128_int8(x int8) bool { return x > -128 }
+func ge_neg128_int8(x int8) bool { return x >= -128 }
+func eq_neg128_int8(x int8) bool { return x == -128 }
+func ne_neg128_int8(x int8) bool { return x != -128 }
+func lt_neg127_int8(x int8) bool { return x < -127 }
+func le_neg127_int8(x int8) bool { return x <= -127 }
+func gt_neg127_int8(x int8) bool { return x > -127 }
+func ge_neg127_int8(x int8) bool { return x >= -127 }
+func eq_neg127_int8(x int8) bool { return x == -127 }
+func ne_neg127_int8(x int8) bool { return x != -127 }
+func lt_neg1_int8(x int8) bool { return x < -1 }
+func le_neg1_int8(x int8) bool { return x <= -1 }
+func gt_neg1_int8(x int8) bool { return x > -1 }
+func ge_neg1_int8(x int8) bool { return x >= -1 }
+func eq_neg1_int8(x int8) bool { return x == -1 }
+func ne_neg1_int8(x int8) bool { return x != -1 }
+func lt_0_int8(x int8) bool { return x < 0 }
+func le_0_int8(x int8) bool { return x <= 0 }
+func gt_0_int8(x int8) bool { return x > 0 }
+func ge_0_int8(x int8) bool { return x >= 0 }
+func eq_0_int8(x int8) bool { return x == 0 }
+func ne_0_int8(x int8) bool { return x != 0 }
+func lt_1_int8(x int8) bool { return x < 1 }
+func le_1_int8(x int8) bool { return x <= 1 }
+func gt_1_int8(x int8) bool { return x > 1 }
+func ge_1_int8(x int8) bool { return x >= 1 }
+func eq_1_int8(x int8) bool { return x == 1 }
+func ne_1_int8(x int8) bool { return x != 1 }
+func lt_126_int8(x int8) bool { return x < 126 }
+func le_126_int8(x int8) bool { return x <= 126 }
+func gt_126_int8(x int8) bool { return x > 126 }
+func ge_126_int8(x int8) bool { return x >= 126 }
+func eq_126_int8(x int8) bool { return x == 126 }
+func ne_126_int8(x int8) bool { return x != 126 }
+func lt_127_int8(x int8) bool { return x < 127 }
+func le_127_int8(x int8) bool { return x <= 127 }
+func gt_127_int8(x int8) bool { return x > 127 }
+func ge_127_int8(x int8) bool { return x >= 127 }
+func eq_127_int8(x int8) bool { return x == 127 }
+func ne_127_int8(x int8) bool { return x != 127 }
+
+var int8_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(int8) bool
+}{
+ {idx: 0, exp: lt, fn: lt_neg128_int8},
+ {idx: 0, exp: le, fn: le_neg128_int8},
+ {idx: 0, exp: gt, fn: gt_neg128_int8},
+ {idx: 0, exp: ge, fn: ge_neg128_int8},
+ {idx: 0, exp: eq, fn: eq_neg128_int8},
+ {idx: 0, exp: ne, fn: ne_neg128_int8},
+ {idx: 1, exp: lt, fn: lt_neg127_int8},
+ {idx: 1, exp: le, fn: le_neg127_int8},
+ {idx: 1, exp: gt, fn: gt_neg127_int8},
+ {idx: 1, exp: ge, fn: ge_neg127_int8},
+ {idx: 1, exp: eq, fn: eq_neg127_int8},
+ {idx: 1, exp: ne, fn: ne_neg127_int8},
+ {idx: 2, exp: lt, fn: lt_neg1_int8},
+ {idx: 2, exp: le, fn: le_neg1_int8},
+ {idx: 2, exp: gt, fn: gt_neg1_int8},
+ {idx: 2, exp: ge, fn: ge_neg1_int8},
+ {idx: 2, exp: eq, fn: eq_neg1_int8},
+ {idx: 2, exp: ne, fn: ne_neg1_int8},
+ {idx: 3, exp: lt, fn: lt_0_int8},
+ {idx: 3, exp: le, fn: le_0_int8},
+ {idx: 3, exp: gt, fn: gt_0_int8},
+ {idx: 3, exp: ge, fn: ge_0_int8},
+ {idx: 3, exp: eq, fn: eq_0_int8},
+ {idx: 3, exp: ne, fn: ne_0_int8},
+ {idx: 4, exp: lt, fn: lt_1_int8},
+ {idx: 4, exp: le, fn: le_1_int8},
+ {idx: 4, exp: gt, fn: gt_1_int8},
+ {idx: 4, exp: ge, fn: ge_1_int8},
+ {idx: 4, exp: eq, fn: eq_1_int8},
+ {idx: 4, exp: ne, fn: ne_1_int8},
+ {idx: 5, exp: lt, fn: lt_126_int8},
+ {idx: 5, exp: le, fn: le_126_int8},
+ {idx: 5, exp: gt, fn: gt_126_int8},
+ {idx: 5, exp: ge, fn: ge_126_int8},
+ {idx: 5, exp: eq, fn: eq_126_int8},
+ {idx: 5, exp: ne, fn: ne_126_int8},
+ {idx: 6, exp: lt, fn: lt_127_int8},
+ {idx: 6, exp: le, fn: le_127_int8},
+ {idx: 6, exp: gt, fn: gt_127_int8},
+ {idx: 6, exp: ge, fn: ge_127_int8},
+ {idx: 6, exp: eq, fn: eq_127_int8},
+ {idx: 6, exp: ne, fn: ne_127_int8},
+}
+
+// TestComparisonsConst tests results for comparison operations against constants.
+func TestComparisonsConst(t *testing.T) {
+ for i, test := range uint64_tests {
+ for j, x := range uint64_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=uint64 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range uint32_tests {
+ for j, x := range uint32_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=uint32 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range uint16_tests {
+ for j, x := range uint16_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=uint16 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range uint8_tests {
+ for j, x := range uint8_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=uint8 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range int64_tests {
+ for j, x := range int64_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=int64 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range int32_tests {
+ for j, x := range int32_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=int32 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range int16_tests {
+ for j, x := range int16_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=int16 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range int8_tests {
+ for j, x := range int8_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=int8 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/cmp_test.go b/src/cmd/compile/internal/gc/testdata/cmp_test.go
new file mode 100644
index 0000000..06b58f2
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/cmp_test.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// cmp_ssa.go tests compare simplification operations.
+package main
+
+import "testing"
+
+//go:noinline
+func eq_ssa(a int64) bool {
+ return 4+a == 10
+}
+
+//go:noinline
+func neq_ssa(a int64) bool {
+ return 10 != a+4
+}
+
+func testCmp(t *testing.T) {
+ if wanted, got := true, eq_ssa(6); wanted != got {
+ t.Errorf("eq_ssa: expected %v, got %v\n", wanted, got)
+ }
+ if wanted, got := false, eq_ssa(7); wanted != got {
+ t.Errorf("eq_ssa: expected %v, got %v\n", wanted, got)
+ }
+ if wanted, got := false, neq_ssa(6); wanted != got {
+ t.Errorf("neq_ssa: expected %v, got %v\n", wanted, got)
+ }
+ if wanted, got := true, neq_ssa(7); wanted != got {
+ t.Errorf("neq_ssa: expected %v, got %v\n", wanted, got)
+ }
+}
+
+func TestCmp(t *testing.T) {
+ testCmp(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/compound_test.go b/src/cmd/compile/internal/gc/testdata/compound_test.go
new file mode 100644
index 0000000..4ae464d
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/compound_test.go
@@ -0,0 +1,128 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test compound objects
+
+package main
+
+import (
+ "testing"
+)
+
+func string_ssa(a, b string, x bool) string {
+ s := ""
+ if x {
+ s = a
+ } else {
+ s = b
+ }
+ return s
+}
+
+func testString(t *testing.T) {
+ a := "foo"
+ b := "barz"
+ if want, got := a, string_ssa(a, b, true); got != want {
+ t.Errorf("string_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := b, string_ssa(a, b, false); got != want {
+ t.Errorf("string_ssa(%v, %v, false) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+//go:noinline
+func complex64_ssa(a, b complex64, x bool) complex64 {
+ var c complex64
+ if x {
+ c = a
+ } else {
+ c = b
+ }
+ return c
+}
+
+//go:noinline
+func complex128_ssa(a, b complex128, x bool) complex128 {
+ var c complex128
+ if x {
+ c = a
+ } else {
+ c = b
+ }
+ return c
+}
+
+func testComplex64(t *testing.T) {
+ var a complex64 = 1 + 2i
+ var b complex64 = 3 + 4i
+
+ if want, got := a, complex64_ssa(a, b, true); got != want {
+ t.Errorf("complex64_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := b, complex64_ssa(a, b, false); got != want {
+ t.Errorf("complex64_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+func testComplex128(t *testing.T) {
+ var a complex128 = 1 + 2i
+ var b complex128 = 3 + 4i
+
+ if want, got := a, complex128_ssa(a, b, true); got != want {
+ t.Errorf("complex128_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := b, complex128_ssa(a, b, false); got != want {
+ t.Errorf("complex128_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+func slice_ssa(a, b []byte, x bool) []byte {
+ var s []byte
+ if x {
+ s = a
+ } else {
+ s = b
+ }
+ return s
+}
+
+func testSlice(t *testing.T) {
+ a := []byte{3, 4, 5}
+ b := []byte{7, 8, 9}
+ if want, got := byte(3), slice_ssa(a, b, true)[0]; got != want {
+ t.Errorf("slice_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := byte(7), slice_ssa(a, b, false)[0]; got != want {
+ t.Errorf("slice_ssa(%v, %v, false) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+func interface_ssa(a, b interface{}, x bool) interface{} {
+ var s interface{}
+ if x {
+ s = a
+ } else {
+ s = b
+ }
+ return s
+}
+
+func testInterface(t *testing.T) {
+ a := interface{}(3)
+ b := interface{}(4)
+ if want, got := 3, interface_ssa(a, b, true).(int); got != want {
+ t.Errorf("interface_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := 4, interface_ssa(a, b, false).(int); got != want {
+ t.Errorf("interface_ssa(%v, %v, false) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+func TestCompound(t *testing.T) {
+ testString(t)
+ testSlice(t)
+ testInterface(t)
+ testComplex64(t)
+ testComplex128(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/copy_test.go b/src/cmd/compile/internal/gc/testdata/copy_test.go
new file mode 100644
index 0000000..c29611d
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/copy_test.go
@@ -0,0 +1,760 @@
+// Code generated by gen/copyGen.go. DO NOT EDIT.
+
+package main
+
+import "testing"
+
+type T1 struct {
+ pre [8]byte
+ mid [1]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1copy_ssa(y, x *[1]byte) {
+ *y = *x
+}
+func testCopy1(t *testing.T) {
+ a := T1{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1]byte{0}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1]byte{100}
+ t1copy_ssa(&a.mid, &x)
+ want := T1{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1]byte{100}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T2 struct {
+ pre [8]byte
+ mid [2]byte
+ post [8]byte
+}
+
+//go:noinline
+func t2copy_ssa(y, x *[2]byte) {
+ *y = *x
+}
+func testCopy2(t *testing.T) {
+ a := T2{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [2]byte{0, 1}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [2]byte{100, 101}
+ t2copy_ssa(&a.mid, &x)
+ want := T2{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [2]byte{100, 101}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t2copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T3 struct {
+ pre [8]byte
+ mid [3]byte
+ post [8]byte
+}
+
+//go:noinline
+func t3copy_ssa(y, x *[3]byte) {
+ *y = *x
+}
+func testCopy3(t *testing.T) {
+ a := T3{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [3]byte{0, 1, 2}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [3]byte{100, 101, 102}
+ t3copy_ssa(&a.mid, &x)
+ want := T3{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [3]byte{100, 101, 102}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t3copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T4 struct {
+ pre [8]byte
+ mid [4]byte
+ post [8]byte
+}
+
+//go:noinline
+func t4copy_ssa(y, x *[4]byte) {
+ *y = *x
+}
+func testCopy4(t *testing.T) {
+ a := T4{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [4]byte{0, 1, 2, 3}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [4]byte{100, 101, 102, 103}
+ t4copy_ssa(&a.mid, &x)
+ want := T4{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [4]byte{100, 101, 102, 103}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t4copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T5 struct {
+ pre [8]byte
+ mid [5]byte
+ post [8]byte
+}
+
+//go:noinline
+func t5copy_ssa(y, x *[5]byte) {
+ *y = *x
+}
+func testCopy5(t *testing.T) {
+ a := T5{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [5]byte{0, 1, 2, 3, 4}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [5]byte{100, 101, 102, 103, 104}
+ t5copy_ssa(&a.mid, &x)
+ want := T5{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [5]byte{100, 101, 102, 103, 104}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t5copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T6 struct {
+ pre [8]byte
+ mid [6]byte
+ post [8]byte
+}
+
+//go:noinline
+func t6copy_ssa(y, x *[6]byte) {
+ *y = *x
+}
+func testCopy6(t *testing.T) {
+ a := T6{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [6]byte{0, 1, 2, 3, 4, 5}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [6]byte{100, 101, 102, 103, 104, 105}
+ t6copy_ssa(&a.mid, &x)
+ want := T6{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [6]byte{100, 101, 102, 103, 104, 105}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t6copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T7 struct {
+ pre [8]byte
+ mid [7]byte
+ post [8]byte
+}
+
+//go:noinline
+func t7copy_ssa(y, x *[7]byte) {
+ *y = *x
+}
+func testCopy7(t *testing.T) {
+ a := T7{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [7]byte{0, 1, 2, 3, 4, 5, 6}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [7]byte{100, 101, 102, 103, 104, 105, 106}
+ t7copy_ssa(&a.mid, &x)
+ want := T7{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [7]byte{100, 101, 102, 103, 104, 105, 106}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t7copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T8 struct {
+ pre [8]byte
+ mid [8]byte
+ post [8]byte
+}
+
+//go:noinline
+func t8copy_ssa(y, x *[8]byte) {
+ *y = *x
+}
+func testCopy8(t *testing.T) {
+ a := T8{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [8]byte{0, 1, 2, 3, 4, 5, 6, 7}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [8]byte{100, 101, 102, 103, 104, 105, 106, 107}
+ t8copy_ssa(&a.mid, &x)
+ want := T8{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [8]byte{100, 101, 102, 103, 104, 105, 106, 107}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t8copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T9 struct {
+ pre [8]byte
+ mid [9]byte
+ post [8]byte
+}
+
+//go:noinline
+func t9copy_ssa(y, x *[9]byte) {
+ *y = *x
+}
+func testCopy9(t *testing.T) {
+ a := T9{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [9]byte{0, 1, 2, 3, 4, 5, 6, 7, 8}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [9]byte{100, 101, 102, 103, 104, 105, 106, 107, 108}
+ t9copy_ssa(&a.mid, &x)
+ want := T9{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [9]byte{100, 101, 102, 103, 104, 105, 106, 107, 108}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t9copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T10 struct {
+ pre [8]byte
+ mid [10]byte
+ post [8]byte
+}
+
+//go:noinline
+func t10copy_ssa(y, x *[10]byte) {
+ *y = *x
+}
+func testCopy10(t *testing.T) {
+ a := T10{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [10]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [10]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109}
+ t10copy_ssa(&a.mid, &x)
+ want := T10{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [10]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t10copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T15 struct {
+ pre [8]byte
+ mid [15]byte
+ post [8]byte
+}
+
+//go:noinline
+func t15copy_ssa(y, x *[15]byte) {
+ *y = *x
+}
+func testCopy15(t *testing.T) {
+ a := T15{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [15]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [15]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114}
+ t15copy_ssa(&a.mid, &x)
+ want := T15{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [15]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t15copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T16 struct {
+ pre [8]byte
+ mid [16]byte
+ post [8]byte
+}
+
+//go:noinline
+func t16copy_ssa(y, x *[16]byte) {
+ *y = *x
+}
+func testCopy16(t *testing.T) {
+ a := T16{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [16]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}
+ t16copy_ssa(&a.mid, &x)
+ want := T16{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [16]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t16copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T17 struct {
+ pre [8]byte
+ mid [17]byte
+ post [8]byte
+}
+
+//go:noinline
+func t17copy_ssa(y, x *[17]byte) {
+ *y = *x
+}
+func testCopy17(t *testing.T) {
+ a := T17{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [17]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [17]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116}
+ t17copy_ssa(&a.mid, &x)
+ want := T17{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [17]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t17copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T23 struct {
+ pre [8]byte
+ mid [23]byte
+ post [8]byte
+}
+
+//go:noinline
+func t23copy_ssa(y, x *[23]byte) {
+ *y = *x
+}
+func testCopy23(t *testing.T) {
+ a := T23{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [23]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [23]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}
+ t23copy_ssa(&a.mid, &x)
+ want := T23{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [23]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t23copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T24 struct {
+ pre [8]byte
+ mid [24]byte
+ post [8]byte
+}
+
+//go:noinline
+func t24copy_ssa(y, x *[24]byte) {
+ *y = *x
+}
+func testCopy24(t *testing.T) {
+ a := T24{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [24]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [24]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}
+ t24copy_ssa(&a.mid, &x)
+ want := T24{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [24]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t24copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T25 struct {
+ pre [8]byte
+ mid [25]byte
+ post [8]byte
+}
+
+//go:noinline
+func t25copy_ssa(y, x *[25]byte) {
+ *y = *x
+}
+func testCopy25(t *testing.T) {
+ a := T25{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [25]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [25]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}
+ t25copy_ssa(&a.mid, &x)
+ want := T25{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [25]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t25copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T31 struct {
+ pre [8]byte
+ mid [31]byte
+ post [8]byte
+}
+
+//go:noinline
+func t31copy_ssa(y, x *[31]byte) {
+ *y = *x
+}
+func testCopy31(t *testing.T) {
+ a := T31{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [31]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [31]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}
+ t31copy_ssa(&a.mid, &x)
+ want := T31{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [31]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t31copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T32 struct {
+ pre [8]byte
+ mid [32]byte
+ post [8]byte
+}
+
+//go:noinline
+func t32copy_ssa(y, x *[32]byte) {
+ *y = *x
+}
+func testCopy32(t *testing.T) {
+ a := T32{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [32]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [32]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}
+ t32copy_ssa(&a.mid, &x)
+ want := T32{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [32]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t32copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T33 struct {
+ pre [8]byte
+ mid [33]byte
+ post [8]byte
+}
+
+//go:noinline
+func t33copy_ssa(y, x *[33]byte) {
+ *y = *x
+}
+func testCopy33(t *testing.T) {
+ a := T33{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [33]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [33]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}
+ t33copy_ssa(&a.mid, &x)
+ want := T33{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [33]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t33copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T63 struct {
+ pre [8]byte
+ mid [63]byte
+ post [8]byte
+}
+
+//go:noinline
+func t63copy_ssa(y, x *[63]byte) {
+ *y = *x
+}
+func testCopy63(t *testing.T) {
+ a := T63{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [63]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [63]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162}
+ t63copy_ssa(&a.mid, &x)
+ want := T63{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [63]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t63copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T64 struct {
+ pre [8]byte
+ mid [64]byte
+ post [8]byte
+}
+
+//go:noinline
+func t64copy_ssa(y, x *[64]byte) {
+ *y = *x
+}
+func testCopy64(t *testing.T) {
+ a := T64{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [64]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [64]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163}
+ t64copy_ssa(&a.mid, &x)
+ want := T64{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [64]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t64copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T65 struct {
+ pre [8]byte
+ mid [65]byte
+ post [8]byte
+}
+
+//go:noinline
+func t65copy_ssa(y, x *[65]byte) {
+ *y = *x
+}
+func testCopy65(t *testing.T) {
+ a := T65{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [65]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [65]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164}
+ t65copy_ssa(&a.mid, &x)
+ want := T65{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [65]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t65copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1023 struct {
+ pre [8]byte
+ mid [1023]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1023copy_ssa(y, x *[1023]byte) {
+ *y = *x
+}
+func testCopy1023(t *testing.T) {
+ a := T1023{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1023]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1023]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}
+ t1023copy_ssa(&a.mid, &x)
+ want := T1023{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1023]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1023copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1024 struct {
+ pre [8]byte
+ mid [1024]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1024copy_ssa(y, x *[1024]byte) {
+ *y = *x
+}
+func testCopy1024(t *testing.T) {
+ a := T1024{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1024]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1024]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}
+ t1024copy_ssa(&a.mid, &x)
+ want := T1024{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1024]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1024copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1025 struct {
+ pre [8]byte
+ mid [1025]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1025copy_ssa(y, x *[1025]byte) {
+ *y = *x
+}
+func testCopy1025(t *testing.T) {
+ a := T1025{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1025]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1025]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}
+ t1025copy_ssa(&a.mid, &x)
+ want := T1025{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1025]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1025copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1031 struct {
+ pre [8]byte
+ mid [1031]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1031copy_ssa(y, x *[1031]byte) {
+ *y = *x
+}
+func testCopy1031(t *testing.T) {
+ a := T1031{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1031]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1031]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}
+ t1031copy_ssa(&a.mid, &x)
+ want := T1031{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1031]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1031copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1032 struct {
+ pre [8]byte
+ mid [1032]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1032copy_ssa(y, x *[1032]byte) {
+ *y = *x
+}
+func testCopy1032(t *testing.T) {
+ a := T1032{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1032]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1032]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}
+ t1032copy_ssa(&a.mid, &x)
+ want := T1032{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1032]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1032copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1033 struct {
+ pre [8]byte
+ mid [1033]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1033copy_ssa(y, x *[1033]byte) {
+ *y = *x
+}
+func testCopy1033(t *testing.T) {
+ a := T1033{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1033]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1033]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}
+ t1033copy_ssa(&a.mid, &x)
+ want := T1033{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1033]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1033copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1039 struct {
+ pre [8]byte
+ mid [1039]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1039copy_ssa(y, x *[1039]byte) {
+ *y = *x
+}
+func testCopy1039(t *testing.T) {
+ a := T1039{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1039]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1039]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138}
+ t1039copy_ssa(&a.mid, &x)
+ want := T1039{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1039]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1039copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1040 struct {
+ pre [8]byte
+ mid [1040]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1040copy_ssa(y, x *[1040]byte) {
+ *y = *x
+}
+func testCopy1040(t *testing.T) {
+ a := T1040{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1040]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1040]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139}
+ t1040copy_ssa(&a.mid, &x)
+ want := T1040{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1040]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1040copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1041 struct {
+ pre [8]byte
+ mid [1041]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1041copy_ssa(y, x *[1041]byte) {
+ *y = *x
+}
+func testCopy1041(t *testing.T) {
+ a := T1041{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1041]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1041]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140}
+ t1041copy_ssa(&a.mid, &x)
+ want := T1041{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1041]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1041copy got=%v, want %v\n", a, want)
+ }
+}
+
+//go:noinline
+func tu2copy_ssa(docopy bool, data [2]byte, x *[2]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy2(t *testing.T) {
+ var a [2]byte
+ t2 := [2]byte{2, 3}
+ tu2copy_ssa(true, t2, &a)
+ want2 := [2]byte{2, 3}
+ if a != want2 {
+ t.Errorf("tu2copy got=%v, want %v\n", a, want2)
+ }
+}
+
+//go:noinline
+func tu3copy_ssa(docopy bool, data [3]byte, x *[3]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy3(t *testing.T) {
+ var a [3]byte
+ t3 := [3]byte{3, 4, 5}
+ tu3copy_ssa(true, t3, &a)
+ want3 := [3]byte{3, 4, 5}
+ if a != want3 {
+ t.Errorf("tu3copy got=%v, want %v\n", a, want3)
+ }
+}
+
+//go:noinline
+func tu4copy_ssa(docopy bool, data [4]byte, x *[4]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy4(t *testing.T) {
+ var a [4]byte
+ t4 := [4]byte{4, 5, 6, 7}
+ tu4copy_ssa(true, t4, &a)
+ want4 := [4]byte{4, 5, 6, 7}
+ if a != want4 {
+ t.Errorf("tu4copy got=%v, want %v\n", a, want4)
+ }
+}
+
+//go:noinline
+func tu5copy_ssa(docopy bool, data [5]byte, x *[5]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy5(t *testing.T) {
+ var a [5]byte
+ t5 := [5]byte{5, 6, 7, 8, 9}
+ tu5copy_ssa(true, t5, &a)
+ want5 := [5]byte{5, 6, 7, 8, 9}
+ if a != want5 {
+ t.Errorf("tu5copy got=%v, want %v\n", a, want5)
+ }
+}
+
+//go:noinline
+func tu6copy_ssa(docopy bool, data [6]byte, x *[6]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy6(t *testing.T) {
+ var a [6]byte
+ t6 := [6]byte{6, 7, 8, 9, 10, 11}
+ tu6copy_ssa(true, t6, &a)
+ want6 := [6]byte{6, 7, 8, 9, 10, 11}
+ if a != want6 {
+ t.Errorf("tu6copy got=%v, want %v\n", a, want6)
+ }
+}
+
+//go:noinline
+func tu7copy_ssa(docopy bool, data [7]byte, x *[7]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy7(t *testing.T) {
+ var a [7]byte
+ t7 := [7]byte{7, 8, 9, 10, 11, 12, 13}
+ tu7copy_ssa(true, t7, &a)
+ want7 := [7]byte{7, 8, 9, 10, 11, 12, 13}
+ if a != want7 {
+ t.Errorf("tu7copy got=%v, want %v\n", a, want7)
+ }
+}
+func TestCopy(t *testing.T) {
+ testCopy1(t)
+ testCopy2(t)
+ testCopy3(t)
+ testCopy4(t)
+ testCopy5(t)
+ testCopy6(t)
+ testCopy7(t)
+ testCopy8(t)
+ testCopy9(t)
+ testCopy10(t)
+ testCopy15(t)
+ testCopy16(t)
+ testCopy17(t)
+ testCopy23(t)
+ testCopy24(t)
+ testCopy25(t)
+ testCopy31(t)
+ testCopy32(t)
+ testCopy33(t)
+ testCopy63(t)
+ testCopy64(t)
+ testCopy65(t)
+ testCopy1023(t)
+ testCopy1024(t)
+ testCopy1025(t)
+ testCopy1031(t)
+ testCopy1032(t)
+ testCopy1033(t)
+ testCopy1039(t)
+ testCopy1040(t)
+ testCopy1041(t)
+ testUnalignedCopy2(t)
+ testUnalignedCopy3(t)
+ testUnalignedCopy4(t)
+ testUnalignedCopy5(t)
+ testUnalignedCopy6(t)
+ testUnalignedCopy7(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/ctl_test.go b/src/cmd/compile/internal/gc/testdata/ctl_test.go
new file mode 100644
index 0000000..16d571c
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/ctl_test.go
@@ -0,0 +1,149 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test control flow
+
+package main
+
+import "testing"
+
+// nor_ssa calculates NOR(a, b).
+// It is implemented in a way that generates
+// phi control values.
+func nor_ssa(a, b bool) bool {
+ var c bool
+ if a {
+ c = true
+ }
+ if b {
+ c = true
+ }
+ if c {
+ return false
+ }
+ return true
+}
+
+func testPhiControl(t *testing.T) {
+ tests := [...][3]bool{ // a, b, want
+ {false, false, true},
+ {true, false, false},
+ {false, true, false},
+ {true, true, false},
+ }
+ for _, test := range tests {
+ a, b := test[0], test[1]
+ got := nor_ssa(a, b)
+ want := test[2]
+ if want != got {
+ t.Errorf("nor(%t, %t)=%t got %t", a, b, want, got)
+ }
+ }
+}
+
+func emptyRange_ssa(b []byte) bool {
+ for _, x := range b {
+ _ = x
+ }
+ return true
+}
+
+func testEmptyRange(t *testing.T) {
+ if !emptyRange_ssa([]byte{}) {
+ t.Errorf("emptyRange_ssa([]byte{})=false, want true")
+ }
+}
+
+func switch_ssa(a int) int {
+ ret := 0
+ switch a {
+ case 5:
+ ret += 5
+ case 4:
+ ret += 4
+ case 3:
+ ret += 3
+ case 2:
+ ret += 2
+ case 1:
+ ret += 1
+ }
+ return ret
+
+}
+
+func fallthrough_ssa(a int) int {
+ ret := 0
+ switch a {
+ case 5:
+ ret++
+ fallthrough
+ case 4:
+ ret++
+ fallthrough
+ case 3:
+ ret++
+ fallthrough
+ case 2:
+ ret++
+ fallthrough
+ case 1:
+ ret++
+ }
+ return ret
+
+}
+
+func testFallthrough(t *testing.T) {
+ for i := 0; i < 6; i++ {
+ if got := fallthrough_ssa(i); got != i {
+ t.Errorf("fallthrough_ssa(i) = %d, wanted %d", got, i)
+ }
+ }
+}
+
+func testSwitch(t *testing.T) {
+ for i := 0; i < 6; i++ {
+ if got := switch_ssa(i); got != i {
+ t.Errorf("switch_ssa(i) = %d, wanted %d", got, i)
+ }
+ }
+}
+
+type junk struct {
+ step int
+}
+
+// flagOverwrite_ssa is intended to reproduce an issue seen where a XOR
+// was scheduled between a compare and branch, clearing flags.
+//go:noinline
+func flagOverwrite_ssa(s *junk, c int) int {
+ if '0' <= c && c <= '9' {
+ s.step = 0
+ return 1
+ }
+ if c == 'e' || c == 'E' {
+ s.step = 0
+ return 2
+ }
+ s.step = 0
+ return 3
+}
+
+func testFlagOverwrite(t *testing.T) {
+ j := junk{}
+ if got := flagOverwrite_ssa(&j, ' '); got != 3 {
+ t.Errorf("flagOverwrite_ssa = %d, wanted 3", got)
+ }
+}
+
+func TestCtl(t *testing.T) {
+ testPhiControl(t)
+ testEmptyRange(t)
+
+ testSwitch(t)
+ testFallthrough(t)
+
+ testFlagOverwrite(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/deferNoReturn_test.go b/src/cmd/compile/internal/gc/testdata/deferNoReturn_test.go
new file mode 100644
index 0000000..308e897
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/deferNoReturn_test.go
@@ -0,0 +1,21 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that a defer in a function with no return
+// statement will compile correctly.
+
+package main
+
+import "testing"
+
+func deferNoReturn_ssa() {
+ defer func() { println("returned") }()
+ for {
+ println("loop")
+ }
+}
+
+func TestDeferNoReturn(t *testing.T) {
+ // This is a compile-time test, no runtime testing required.
+}
diff --git a/src/cmd/compile/internal/gc/testdata/divbyzero_test.go b/src/cmd/compile/internal/gc/testdata/divbyzero_test.go
new file mode 100644
index 0000000..ee848b3
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/divbyzero_test.go
@@ -0,0 +1,48 @@
+package main
+
+import (
+ "runtime"
+ "testing"
+)
+
+func checkDivByZero(f func()) (divByZero bool) {
+ defer func() {
+ if r := recover(); r != nil {
+ if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: integer divide by zero" {
+ divByZero = true
+ }
+ }
+ }()
+ f()
+ return false
+}
+
+//go:noinline
+func div_a(i uint, s []int) int {
+ return s[i%uint(len(s))]
+}
+
+//go:noinline
+func div_b(i uint, j uint) uint {
+ return i / j
+}
+
+//go:noinline
+func div_c(i int) int {
+ return 7 / (i - i)
+}
+
+func TestDivByZero(t *testing.T) {
+ if got := checkDivByZero(func() { div_b(7, 0) }); !got {
+ t.Errorf("expected div by zero for b(7, 0), got no error\n")
+ }
+ if got := checkDivByZero(func() { div_b(7, 7) }); got {
+ t.Errorf("expected no error for b(7, 7), got div by zero\n")
+ }
+ if got := checkDivByZero(func() { div_a(4, nil) }); !got {
+ t.Errorf("expected div by zero for a(4, nil), got no error\n")
+ }
+ if got := checkDivByZero(func() { div_c(5) }); !got {
+ t.Errorf("expected div by zero for c(5), got no error\n")
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/dupLoad_test.go b/src/cmd/compile/internal/gc/testdata/dupLoad_test.go
new file mode 100644
index 0000000..d859123
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/dupLoad_test.go
@@ -0,0 +1,83 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test makes sure that we don't split a single
+// load up into two separate loads.
+
+package main
+
+import "testing"
+
+//go:noinline
+func read1(b []byte) (uint16, uint16) {
+ // There is only a single read of b[0]. The two
+ // returned values must have the same low byte.
+ v := b[0]
+ return uint16(v), uint16(v) | uint16(b[1])<<8
+}
+
+func main1(t *testing.T) {
+ const N = 100000
+ done := make(chan bool, 2)
+ b := make([]byte, 2)
+ go func() {
+ for i := 0; i < N; i++ {
+ b[0] = byte(i)
+ b[1] = byte(i)
+ }
+ done <- true
+ }()
+ go func() {
+ for i := 0; i < N; i++ {
+ x, y := read1(b)
+ if byte(x) != byte(y) {
+ t.Errorf("x=%x y=%x\n", x, y)
+ done <- false
+ return
+ }
+ }
+ done <- true
+ }()
+ <-done
+ <-done
+}
+
+//go:noinline
+func read2(b []byte) (uint16, uint16) {
+ // There is only a single read of b[1]. The two
+ // returned values must have the same high byte.
+ v := uint16(b[1]) << 8
+ return v, uint16(b[0]) | v
+}
+
+func main2(t *testing.T) {
+ const N = 100000
+ done := make(chan bool, 2)
+ b := make([]byte, 2)
+ go func() {
+ for i := 0; i < N; i++ {
+ b[0] = byte(i)
+ b[1] = byte(i)
+ }
+ done <- true
+ }()
+ go func() {
+ for i := 0; i < N; i++ {
+ x, y := read2(b)
+ if x&0xff00 != y&0xff00 {
+ t.Errorf("x=%x y=%x\n", x, y)
+ done <- false
+ return
+ }
+ }
+ done <- true
+ }()
+ <-done
+ <-done
+}
+
+func TestDupLoad(t *testing.T) {
+ main1(t)
+ main2(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/flowgraph_generator1.go b/src/cmd/compile/internal/gc/testdata/flowgraph_generator1.go
new file mode 100644
index 0000000..ad22601
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/flowgraph_generator1.go
@@ -0,0 +1,315 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "strings"
+)
+
+// make fake flow graph.
+
+// The blocks of the flow graph are designated with letters A
+// through Z, always including A (start block) and Z (exit
+// block) The specification of a flow graph is a comma-
+// separated list of block successor words, for blocks ordered
+// A, B, C etc, where each block except Z has one or two
+// successors, and any block except A can be a target. Within
+// the generated code, each block with two successors includes
+// a conditional testing x & 1 != 0 (x is the input parameter
+// to the generated function) and also unconditionally shifts x
+// right by one, so that different inputs generate different
+// execution paths, including loops. Every block inverts a
+// global binary to ensure it is not empty. For a flow graph
+// with J words (J+1 blocks), a J-1 bit serial number specifies
+// which blocks (not including A and Z) include an increment of
+// the return variable y by increasing powers of 10, and a
+// different version of the test function is created for each
+// of the 2-to-the-(J-1) serial numbers.
+
+// For each generated function a compact summary is also
+// created so that the generated function can be simulated
+// with a simple interpreter to sanity check the behavior of
+// the compiled code.
+
+// For example:
+
+// func BC_CD_BE_BZ_CZ101(x int64) int64 {
+// y := int64(0)
+// var b int64
+// _ = b
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto C
+// }
+// goto B
+// B:
+// glob_ = !glob_
+// y += 1
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto D
+// }
+// goto C
+// C:
+// glob_ = !glob_
+// // no y increment
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto E
+// }
+// goto B
+// D:
+// glob_ = !glob_
+// y += 10
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto Z
+// }
+// goto B
+// E:
+// glob_ = !glob_
+// // no y increment
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto Z
+// }
+// goto C
+// Z:
+// return y
+// }
+
+// {f:BC_CD_BE_BZ_CZ101,
+// maxin:32, blocks:[]blo{
+// blo{inc:0, cond:true, succs:[2]int64{1, 2}},
+// blo{inc:1, cond:true, succs:[2]int64{2, 3}},
+// blo{inc:0, cond:true, succs:[2]int64{1, 4}},
+// blo{inc:10, cond:true, succs:[2]int64{1, 25}},
+// blo{inc:0, cond:true, succs:[2]int64{2, 25}},}},
+
+var labels string = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+func blocks(spec string) (blocks []string, fnameBase string) {
+ spec = strings.ToUpper(spec)
+ blocks = strings.Split(spec, ",")
+ fnameBase = strings.Replace(spec, ",", "_", -1)
+ return
+}
+
+func makeFunctionFromFlowGraph(blocks []blo, fname string) string {
+ s := ""
+
+ for j := range blocks {
+ // begin block
+ if j == 0 {
+ // block A, implicit label
+ s += `
+func ` + fname + `(x int64) int64 {
+ y := int64(0)
+ var b int64
+ _ = b`
+ } else {
+ // block B,C, etc, explicit label w/ conditional increment
+ l := labels[j : j+1]
+ yeq := `
+ // no y increment`
+ if blocks[j].inc != 0 {
+ yeq = `
+ y += ` + fmt.Sprintf("%d", blocks[j].inc)
+ }
+
+ s += `
+` + l + `:
+ glob = !glob` + yeq
+ }
+
+ // edges to successors
+ if blocks[j].cond { // conditionally branch to second successor
+ s += `
+ b = x & 1
+ x = x >> 1
+ if b != 0 {` + `
+ goto ` + string(labels[blocks[j].succs[1]]) + `
+ }`
+
+ }
+ // branch to first successor
+ s += `
+ goto ` + string(labels[blocks[j].succs[0]])
+ }
+
+ // end block (Z)
+ s += `
+Z:
+ return y
+}
+`
+ return s
+}
+
+var graphs []string = []string{
+ "Z", "BZ,Z", "B,BZ", "BZ,BZ",
+ "ZB,Z", "B,ZB", "ZB,BZ", "ZB,ZB",
+
+ "BC,C,Z", "BC,BC,Z", "BC,BC,BZ",
+ "BC,Z,Z", "BC,ZC,Z", "BC,ZC,BZ",
+ "BZ,C,Z", "BZ,BC,Z", "BZ,CZ,Z",
+ "BZ,C,BZ", "BZ,BC,BZ", "BZ,CZ,BZ",
+ "BZ,C,CZ", "BZ,BC,CZ", "BZ,CZ,CZ",
+
+ "BC,CD,BE,BZ,CZ",
+ "BC,BD,CE,CZ,BZ",
+ "BC,BD,CE,FZ,GZ,F,G",
+ "BC,BD,CE,FZ,GZ,G,F",
+
+ "BC,DE,BE,FZ,FZ,Z",
+ "BC,DE,BE,FZ,ZF,Z",
+ "BC,DE,BE,ZF,FZ,Z",
+ "BC,DE,EB,FZ,FZ,Z",
+ "BC,ED,BE,FZ,FZ,Z",
+ "CB,DE,BE,FZ,FZ,Z",
+
+ "CB,ED,BE,FZ,FZ,Z",
+ "BC,ED,EB,FZ,ZF,Z",
+ "CB,DE,EB,ZF,FZ,Z",
+ "CB,ED,EB,FZ,FZ,Z",
+
+ "BZ,CD,CD,CE,BZ",
+ "EC,DF,FG,ZC,GB,BE,FD",
+ "BH,CF,DG,HE,BF,CG,DH,BZ",
+}
+
+// blo describes a block in the generated/interpreted code
+type blo struct {
+ inc int64 // increment amount
+ cond bool // block ends in conditional
+ succs [2]int64
+}
+
+// strings2blocks converts a slice of strings specifying
+// successors into a slice of blo encoding the blocks in a
+// common form easy to execute or interpret.
+func strings2blocks(blocks []string, fname string, i int) (bs []blo, cond uint) {
+ bs = make([]blo, len(blocks))
+ edge := int64(1)
+ cond = 0
+ k := uint(0)
+ for j, s := range blocks {
+ if j == 0 {
+ } else {
+ if (i>>k)&1 != 0 {
+ bs[j].inc = edge
+ edge *= 10
+ }
+ k++
+ }
+ if len(s) > 1 {
+ bs[j].succs[1] = int64(blocks[j][1] - 'A')
+ bs[j].cond = true
+ cond++
+ }
+ bs[j].succs[0] = int64(blocks[j][0] - 'A')
+ }
+ return bs, cond
+}
+
+// fmtBlocks writes out the blocks for consumption in the generated test
+func fmtBlocks(bs []blo) string {
+ s := "[]blo{"
+ for _, b := range bs {
+ s += fmt.Sprintf("blo{inc:%d, cond:%v, succs:[2]int64{%d, %d}},", b.inc, b.cond, b.succs[0], b.succs[1])
+ }
+ s += "}"
+ return s
+}
+
+func main() {
+ fmt.Printf(`// This is a machine-generated test file from flowgraph_generator1.go.
+package main
+import "fmt"
+var glob bool
+`)
+ s := "var funs []fun = []fun{"
+ for _, g := range graphs {
+ split, fnameBase := blocks(g)
+ nconfigs := 1 << uint(len(split)-1)
+
+ for i := 0; i < nconfigs; i++ {
+ fname := fnameBase + fmt.Sprintf("%b", i)
+ bs, k := strings2blocks(split, fname, i)
+ fmt.Printf("%s", makeFunctionFromFlowGraph(bs, fname))
+ s += `
+ {f:` + fname + `, maxin:` + fmt.Sprintf("%d", 1<<k) + `, blocks:` + fmtBlocks(bs) + `},`
+ }
+
+ }
+ s += `}
+`
+ // write types for name+array tables.
+ fmt.Printf("%s",
+ `
+type blo struct {
+ inc int64
+ cond bool
+ succs [2]int64
+}
+type fun struct {
+ f func(int64) int64
+ maxin int64
+ blocks []blo
+}
+`)
+ // write table of function names and blo arrays.
+ fmt.Printf("%s", s)
+
+ // write interpreter and main/test
+ fmt.Printf("%s", `
+func interpret(blocks []blo, x int64) (int64, bool) {
+ y := int64(0)
+ last := int64(25) // 'Z'-'A'
+ j := int64(0)
+ for i := 0; i < 4*len(blocks); i++ {
+ b := blocks[j]
+ y += b.inc
+ next := b.succs[0]
+ if b.cond {
+ c := x&1 != 0
+ x = x>>1
+ if c {
+ next = b.succs[1]
+ }
+ }
+ if next == last {
+ return y, true
+ }
+ j = next
+ }
+ return -1, false
+}
+
+func main() {
+ sum := int64(0)
+ for i, f := range funs {
+ for x := int64(0); x < 16*f.maxin; x++ {
+ y, ok := interpret(f.blocks, x)
+ if ok {
+ yy := f.f(x)
+ if y != yy {
+ fmt.Printf("y(%d) != yy(%d), x=%b, i=%d, blocks=%v\n", y, yy, x, i, f.blocks)
+ return
+ }
+ sum += y
+ }
+ }
+ }
+// fmt.Printf("Sum of all returns over all terminating inputs is %d\n", sum)
+}
+`)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/fp_test.go b/src/cmd/compile/internal/gc/testdata/fp_test.go
new file mode 100644
index 0000000..7d61a80
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/fp_test.go
@@ -0,0 +1,1773 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests floating point arithmetic expressions
+
+package main
+
+import (
+ "fmt"
+ "testing"
+)
+
+// manysub_ssa is designed to tickle bugs that depend on register
+// pressure or unfriendly operand ordering in registers (and at
+// least once it succeeded in this).
+//go:noinline
+func manysub_ssa(a, b, c, d float64) (aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc, cd, da, db, dc, dd float64) {
+ aa = a + 11.0 - a
+ ab = a - b
+ ac = a - c
+ ad = a - d
+ ba = b - a
+ bb = b + 22.0 - b
+ bc = b - c
+ bd = b - d
+ ca = c - a
+ cb = c - b
+ cc = c + 33.0 - c
+ cd = c - d
+ da = d - a
+ db = d - b
+ dc = d - c
+ dd = d + 44.0 - d
+ return
+}
+
+// fpspill_ssa attempts to trigger a bug where phis with floating point values
+// were stored in non-fp registers causing an error in doasm.
+//go:noinline
+func fpspill_ssa(a int) float64 {
+
+ ret := -1.0
+ switch a {
+ case 0:
+ ret = 1.0
+ case 1:
+ ret = 1.1
+ case 2:
+ ret = 1.2
+ case 3:
+ ret = 1.3
+ case 4:
+ ret = 1.4
+ case 5:
+ ret = 1.5
+ case 6:
+ ret = 1.6
+ case 7:
+ ret = 1.7
+ case 8:
+ ret = 1.8
+ case 9:
+ ret = 1.9
+ case 10:
+ ret = 1.10
+ case 11:
+ ret = 1.11
+ case 12:
+ ret = 1.12
+ case 13:
+ ret = 1.13
+ case 14:
+ ret = 1.14
+ case 15:
+ ret = 1.15
+ case 16:
+ ret = 1.16
+ }
+ return ret
+}
+
+//go:noinline
+func add64_ssa(a, b float64) float64 {
+ return a + b
+}
+
+//go:noinline
+func mul64_ssa(a, b float64) float64 {
+ return a * b
+}
+
+//go:noinline
+func sub64_ssa(a, b float64) float64 {
+ return a - b
+}
+
+//go:noinline
+func div64_ssa(a, b float64) float64 {
+ return a / b
+}
+
+//go:noinline
+func neg64_ssa(a, b float64) float64 {
+ return -a + -1*b
+}
+
+//go:noinline
+func add32_ssa(a, b float32) float32 {
+ return a + b
+}
+
+//go:noinline
+func mul32_ssa(a, b float32) float32 {
+ return a * b
+}
+
+//go:noinline
+func sub32_ssa(a, b float32) float32 {
+ return a - b
+}
+
+//go:noinline
+func div32_ssa(a, b float32) float32 {
+ return a / b
+}
+
+//go:noinline
+func neg32_ssa(a, b float32) float32 {
+ return -a + -1*b
+}
+
+//go:noinline
+func conv2Float64_ssa(a int8, b uint8, c int16, d uint16,
+ e int32, f uint32, g int64, h uint64, i float32) (aa, bb, cc, dd, ee, ff, gg, hh, ii float64) {
+ aa = float64(a)
+ bb = float64(b)
+ cc = float64(c)
+ hh = float64(h)
+ dd = float64(d)
+ ee = float64(e)
+ ff = float64(f)
+ gg = float64(g)
+ ii = float64(i)
+ return
+}
+
+//go:noinline
+func conv2Float32_ssa(a int8, b uint8, c int16, d uint16,
+ e int32, f uint32, g int64, h uint64, i float64) (aa, bb, cc, dd, ee, ff, gg, hh, ii float32) {
+ aa = float32(a)
+ bb = float32(b)
+ cc = float32(c)
+ dd = float32(d)
+ ee = float32(e)
+ ff = float32(f)
+ gg = float32(g)
+ hh = float32(h)
+ ii = float32(i)
+ return
+}
+
+func integer2floatConversions(t *testing.T) {
+ {
+ a, b, c, d, e, f, g, h, i := conv2Float64_ssa(0, 0, 0, 0, 0, 0, 0, 0, 0)
+ expectAll64(t, "zero64", 0, a, b, c, d, e, f, g, h, i)
+ }
+ {
+ a, b, c, d, e, f, g, h, i := conv2Float64_ssa(1, 1, 1, 1, 1, 1, 1, 1, 1)
+ expectAll64(t, "one64", 1, a, b, c, d, e, f, g, h, i)
+ }
+ {
+ a, b, c, d, e, f, g, h, i := conv2Float32_ssa(0, 0, 0, 0, 0, 0, 0, 0, 0)
+ expectAll32(t, "zero32", 0, a, b, c, d, e, f, g, h, i)
+ }
+ {
+ a, b, c, d, e, f, g, h, i := conv2Float32_ssa(1, 1, 1, 1, 1, 1, 1, 1, 1)
+ expectAll32(t, "one32", 1, a, b, c, d, e, f, g, h, i)
+ }
+ {
+ // Check maximum values
+ a, b, c, d, e, f, g, h, i := conv2Float64_ssa(127, 255, 32767, 65535, 0x7fffffff, 0xffffffff, 0x7fffFFFFffffFFFF, 0xffffFFFFffffFFFF, 3.402823e38)
+ expect64(t, "a", a, 127)
+ expect64(t, "b", b, 255)
+ expect64(t, "c", c, 32767)
+ expect64(t, "d", d, 65535)
+ expect64(t, "e", e, float64(int32(0x7fffffff)))
+ expect64(t, "f", f, float64(uint32(0xffffffff)))
+ expect64(t, "g", g, float64(int64(0x7fffffffffffffff)))
+ expect64(t, "h", h, float64(uint64(0xffffffffffffffff)))
+ expect64(t, "i", i, float64(float32(3.402823e38)))
+ }
+ {
+ // Check minimum values (and tweaks for unsigned)
+ a, b, c, d, e, f, g, h, i := conv2Float64_ssa(-128, 254, -32768, 65534, ^0x7fffffff, 0xfffffffe, ^0x7fffFFFFffffFFFF, 0xffffFFFFffffF401, 1.5e-45)
+ expect64(t, "a", a, -128)
+ expect64(t, "b", b, 254)
+ expect64(t, "c", c, -32768)
+ expect64(t, "d", d, 65534)
+ expect64(t, "e", e, float64(^int32(0x7fffffff)))
+ expect64(t, "f", f, float64(uint32(0xfffffffe)))
+ expect64(t, "g", g, float64(^int64(0x7fffffffffffffff)))
+ expect64(t, "h", h, float64(uint64(0xfffffffffffff401)))
+ expect64(t, "i", i, float64(float32(1.5e-45)))
+ }
+ {
+ // Check maximum values
+ a, b, c, d, e, f, g, h, i := conv2Float32_ssa(127, 255, 32767, 65535, 0x7fffffff, 0xffffffff, 0x7fffFFFFffffFFFF, 0xffffFFFFffffFFFF, 3.402823e38)
+ expect32(t, "a", a, 127)
+ expect32(t, "b", b, 255)
+ expect32(t, "c", c, 32767)
+ expect32(t, "d", d, 65535)
+ expect32(t, "e", e, float32(int32(0x7fffffff)))
+ expect32(t, "f", f, float32(uint32(0xffffffff)))
+ expect32(t, "g", g, float32(int64(0x7fffffffffffffff)))
+ expect32(t, "h", h, float32(uint64(0xffffffffffffffff)))
+ expect32(t, "i", i, float32(float64(3.402823e38)))
+ }
+ {
+ // Check minimum values (and tweaks for unsigned)
+ a, b, c, d, e, f, g, h, i := conv2Float32_ssa(-128, 254, -32768, 65534, ^0x7fffffff, 0xfffffffe, ^0x7fffFFFFffffFFFF, 0xffffFFFFffffF401, 1.5e-45)
+ expect32(t, "a", a, -128)
+ expect32(t, "b", b, 254)
+ expect32(t, "c", c, -32768)
+ expect32(t, "d", d, 65534)
+ expect32(t, "e", e, float32(^int32(0x7fffffff)))
+ expect32(t, "f", f, float32(uint32(0xfffffffe)))
+ expect32(t, "g", g, float32(^int64(0x7fffffffffffffff)))
+ expect32(t, "h", h, float32(uint64(0xfffffffffffff401)))
+ expect32(t, "i", i, float32(float64(1.5e-45)))
+ }
+}
+
+func multiplyAdd(t *testing.T) {
+ {
+ // Test that a multiply-accumulate operation with intermediate
+ // rounding forced by a float32() cast produces the expected
+ // result.
+ // Test cases generated experimentally on a system (s390x) that
+ // supports fused multiply-add instructions.
+ var tests = [...]struct{ x, y, z, res float32 }{
+ {0.6046603, 0.9405091, 0.6645601, 1.2332485}, // fused multiply-add result: 1.2332486
+ {0.67908466, 0.21855305, 0.20318687, 0.3516029}, // fused multiply-add result: 0.35160288
+ {0.29311424, 0.29708257, 0.752573, 0.8396522}, // fused multiply-add result: 0.8396521
+ {0.5305857, 0.2535405, 0.282081, 0.41660595}, // fused multiply-add result: 0.41660598
+ {0.29711226, 0.89436173, 0.097454615, 0.36318043}, // fused multiply-add result: 0.36318046
+ {0.6810783, 0.24151509, 0.31152245, 0.47601312}, // fused multiply-add result: 0.47601315
+ {0.73023146, 0.18292491, 0.4283571, 0.5619346}, // fused multiply-add result: 0.56193465
+ {0.89634174, 0.32208398, 0.7211478, 1.009845}, // fused multiply-add result: 1.0098451
+ {0.6280982, 0.12675293, 0.2813303, 0.36094356}, // fused multiply-add result: 0.3609436
+ {0.29400632, 0.75316125, 0.15096405, 0.3723982}, // fused multiply-add result: 0.37239823
+ }
+ check := func(s string, got, expected float32) {
+ if got != expected {
+ fmt.Printf("multiplyAdd: %s, expected %g, got %g\n", s, expected, got)
+ }
+ }
+ for _, t := range tests {
+ check(
+ fmt.Sprintf("float32(%v * %v) + %v", t.x, t.y, t.z),
+ func(x, y, z float32) float32 {
+ return float32(x*y) + z
+ }(t.x, t.y, t.z),
+ t.res)
+
+ check(
+ fmt.Sprintf("%v += float32(%v * %v)", t.z, t.x, t.y),
+ func(x, y, z float32) float32 {
+ z += float32(x * y)
+ return z
+ }(t.x, t.y, t.z),
+ t.res)
+ }
+ }
+ {
+ // Test that a multiply-accumulate operation with intermediate
+ // rounding forced by a float64() cast produces the expected
+ // result.
+ // Test cases generated experimentally on a system (s390x) that
+ // supports fused multiply-add instructions.
+ var tests = [...]struct{ x, y, z, res float64 }{
+ {0.4688898449024232, 0.28303415118044517, 0.29310185733681576, 0.42581369658590373}, // fused multiply-add result: 0.4258136965859037
+ {0.7886049150193449, 0.3618054804803169, 0.8805431227416171, 1.1658647029293308}, // fused multiply-add result: 1.1658647029293305
+ {0.7302314772948083, 0.18292491645390843, 0.4283570818068078, 0.5619346137829748}, // fused multiply-add result: 0.5619346137829747
+ {0.6908388315056789, 0.7109071952999951, 0.5637795958152644, 1.0549018919252924}, // fused multiply-add result: 1.0549018919252926
+ {0.4584424785756506, 0.6001655953233308, 0.02626515060968944, 0.3014065536855481}, // fused multiply-add result: 0.30140655368554814
+ {0.539210105890946, 0.9756748149873165, 0.7507630564795985, 1.2768567767840384}, // fused multiply-add result: 1.2768567767840386
+ {0.7830349733960021, 0.3932509992288867, 0.1304138461737918, 0.4383431318929343}, // fused multiply-add result: 0.43834313189293433
+ {0.6841751300974551, 0.6530402051353608, 0.524499759549865, 0.9712936268572192}, // fused multiply-add result: 0.9712936268572193
+ {0.3691117091643448, 0.826454125634742, 0.34768170859156955, 0.6527356034505334}, // fused multiply-add result: 0.6527356034505333
+ {0.16867966833433606, 0.33136826030698385, 0.8279280961505588, 0.8838231843956668}, // fused multiply-add result: 0.8838231843956669
+ }
+ check := func(s string, got, expected float64) {
+ if got != expected {
+ fmt.Printf("multiplyAdd: %s, expected %g, got %g\n", s, expected, got)
+ }
+ }
+ for _, t := range tests {
+ check(
+ fmt.Sprintf("float64(%v * %v) + %v", t.x, t.y, t.z),
+ func(x, y, z float64) float64 {
+ return float64(x*y) + z
+ }(t.x, t.y, t.z),
+ t.res)
+
+ check(
+ fmt.Sprintf("%v += float64(%v * %v)", t.z, t.x, t.y),
+ func(x, y, z float64) float64 {
+ z += float64(x * y)
+ return z
+ }(t.x, t.y, t.z),
+ t.res)
+ }
+ }
+ {
+ // Test that a multiply-accumulate operation with intermediate
+ // rounding forced by a complex128() cast produces the expected
+ // result.
+ // Test cases generated experimentally on a system (s390x) that
+ // supports fused multiply-add instructions.
+ var tests = [...]struct {
+ x, y float64
+ res complex128
+ }{
+ {0.6046602879796196, 0.9405090880450124, (2.754489951983871 + 3i)}, // fused multiply-add result: (2.7544899519838713 + 3i)
+ {0.09696951891448456, 0.30091186058528707, (0.5918204173287407 + 3i)}, // fused multiply-add result: (0.5918204173287408 + 3i)
+ {0.544155573000885, 0.27850762181610883, (1.910974340818764 + 3i)}, // fused multiply-add result: (1.9109743408187638 + 3i)
+ {0.9769168685862624, 0.07429099894984302, (3.0050416047086297 + 3i)}, // fused multiply-add result: (3.00504160470863 + 3i)
+ {0.9269868035744142, 0.9549454404167818, (3.735905851140024 + 3i)}, // fused multiply-add result: (3.7359058511400245 + 3i)
+ {0.7109071952999951, 0.5637795958152644, (2.69650118171525 + 3i)}, // fused multiply-add result: (2.6965011817152496 + 3i)
+ {0.7558235074915978, 0.40380328579570035, (2.671273808270494 + 3i)}, // fused multiply-add result: (2.6712738082704934 + 3i)
+ {0.13065111702897217, 0.9859647293402467, (1.3779180804271633 + 3i)}, // fused multiply-add result: (1.3779180804271631 + 3i)
+ {0.8963417453962161, 0.3220839705208817, (3.0111092067095298 + 3i)}, // fused multiply-add result: (3.01110920670953 + 3i)
+ {0.39998376285699544, 0.497868113342702, (1.697819401913688 + 3i)}, // fused multiply-add result: (1.6978194019136883 + 3i)
+ }
+ check := func(s string, got, expected complex128) {
+ if got != expected {
+ fmt.Printf("multiplyAdd: %s, expected %v, got %v\n", s, expected, got)
+ }
+ }
+ for _, t := range tests {
+ check(
+ fmt.Sprintf("complex128(complex(%v, 1)*3) + complex(%v, 0)", t.x, t.y),
+ func(x, y float64) complex128 {
+ return complex128(complex(x, 1)*3) + complex(y, 0)
+ }(t.x, t.y),
+ t.res)
+
+ check(
+ fmt.Sprintf("z := complex(%v, 1); z += complex128(complex(%v, 1) * 3)", t.y, t.x),
+ func(x, y float64) complex128 {
+ z := complex(y, 0)
+ z += complex128(complex(x, 1) * 3)
+ return z
+ }(t.x, t.y),
+ t.res)
+ }
+ }
+}
+
+const (
+ aa = 0x1000000000000000
+ ab = 0x100000000000000
+ ac = 0x10000000000000
+ ad = 0x1000000000000
+ ba = 0x100000000000
+ bb = 0x10000000000
+ bc = 0x1000000000
+ bd = 0x100000000
+ ca = 0x10000000
+ cb = 0x1000000
+ cc = 0x100000
+ cd = 0x10000
+ da = 0x1000
+ db = 0x100
+ dc = 0x10
+ dd = 0x1
+)
+
+//go:noinline
+func compares64_ssa(a, b, c, d float64) (lt, le, eq, ne, ge, gt uint64) {
+ if a < a {
+ lt += aa
+ }
+ if a < b {
+ lt += ab
+ }
+ if a < c {
+ lt += ac
+ }
+ if a < d {
+ lt += ad
+ }
+
+ if b < a {
+ lt += ba
+ }
+ if b < b {
+ lt += bb
+ }
+ if b < c {
+ lt += bc
+ }
+ if b < d {
+ lt += bd
+ }
+
+ if c < a {
+ lt += ca
+ }
+ if c < b {
+ lt += cb
+ }
+ if c < c {
+ lt += cc
+ }
+ if c < d {
+ lt += cd
+ }
+
+ if d < a {
+ lt += da
+ }
+ if d < b {
+ lt += db
+ }
+ if d < c {
+ lt += dc
+ }
+ if d < d {
+ lt += dd
+ }
+
+ if a <= a {
+ le += aa
+ }
+ if a <= b {
+ le += ab
+ }
+ if a <= c {
+ le += ac
+ }
+ if a <= d {
+ le += ad
+ }
+
+ if b <= a {
+ le += ba
+ }
+ if b <= b {
+ le += bb
+ }
+ if b <= c {
+ le += bc
+ }
+ if b <= d {
+ le += bd
+ }
+
+ if c <= a {
+ le += ca
+ }
+ if c <= b {
+ le += cb
+ }
+ if c <= c {
+ le += cc
+ }
+ if c <= d {
+ le += cd
+ }
+
+ if d <= a {
+ le += da
+ }
+ if d <= b {
+ le += db
+ }
+ if d <= c {
+ le += dc
+ }
+ if d <= d {
+ le += dd
+ }
+
+ if a == a {
+ eq += aa
+ }
+ if a == b {
+ eq += ab
+ }
+ if a == c {
+ eq += ac
+ }
+ if a == d {
+ eq += ad
+ }
+
+ if b == a {
+ eq += ba
+ }
+ if b == b {
+ eq += bb
+ }
+ if b == c {
+ eq += bc
+ }
+ if b == d {
+ eq += bd
+ }
+
+ if c == a {
+ eq += ca
+ }
+ if c == b {
+ eq += cb
+ }
+ if c == c {
+ eq += cc
+ }
+ if c == d {
+ eq += cd
+ }
+
+ if d == a {
+ eq += da
+ }
+ if d == b {
+ eq += db
+ }
+ if d == c {
+ eq += dc
+ }
+ if d == d {
+ eq += dd
+ }
+
+ if a != a {
+ ne += aa
+ }
+ if a != b {
+ ne += ab
+ }
+ if a != c {
+ ne += ac
+ }
+ if a != d {
+ ne += ad
+ }
+
+ if b != a {
+ ne += ba
+ }
+ if b != b {
+ ne += bb
+ }
+ if b != c {
+ ne += bc
+ }
+ if b != d {
+ ne += bd
+ }
+
+ if c != a {
+ ne += ca
+ }
+ if c != b {
+ ne += cb
+ }
+ if c != c {
+ ne += cc
+ }
+ if c != d {
+ ne += cd
+ }
+
+ if d != a {
+ ne += da
+ }
+ if d != b {
+ ne += db
+ }
+ if d != c {
+ ne += dc
+ }
+ if d != d {
+ ne += dd
+ }
+
+ if a >= a {
+ ge += aa
+ }
+ if a >= b {
+ ge += ab
+ }
+ if a >= c {
+ ge += ac
+ }
+ if a >= d {
+ ge += ad
+ }
+
+ if b >= a {
+ ge += ba
+ }
+ if b >= b {
+ ge += bb
+ }
+ if b >= c {
+ ge += bc
+ }
+ if b >= d {
+ ge += bd
+ }
+
+ if c >= a {
+ ge += ca
+ }
+ if c >= b {
+ ge += cb
+ }
+ if c >= c {
+ ge += cc
+ }
+ if c >= d {
+ ge += cd
+ }
+
+ if d >= a {
+ ge += da
+ }
+ if d >= b {
+ ge += db
+ }
+ if d >= c {
+ ge += dc
+ }
+ if d >= d {
+ ge += dd
+ }
+
+ if a > a {
+ gt += aa
+ }
+ if a > b {
+ gt += ab
+ }
+ if a > c {
+ gt += ac
+ }
+ if a > d {
+ gt += ad
+ }
+
+ if b > a {
+ gt += ba
+ }
+ if b > b {
+ gt += bb
+ }
+ if b > c {
+ gt += bc
+ }
+ if b > d {
+ gt += bd
+ }
+
+ if c > a {
+ gt += ca
+ }
+ if c > b {
+ gt += cb
+ }
+ if c > c {
+ gt += cc
+ }
+ if c > d {
+ gt += cd
+ }
+
+ if d > a {
+ gt += da
+ }
+ if d > b {
+ gt += db
+ }
+ if d > c {
+ gt += dc
+ }
+ if d > d {
+ gt += dd
+ }
+
+ return
+}
+
+//go:noinline
+func compares32_ssa(a, b, c, d float32) (lt, le, eq, ne, ge, gt uint64) {
+ if a < a {
+ lt += aa
+ }
+ if a < b {
+ lt += ab
+ }
+ if a < c {
+ lt += ac
+ }
+ if a < d {
+ lt += ad
+ }
+
+ if b < a {
+ lt += ba
+ }
+ if b < b {
+ lt += bb
+ }
+ if b < c {
+ lt += bc
+ }
+ if b < d {
+ lt += bd
+ }
+
+ if c < a {
+ lt += ca
+ }
+ if c < b {
+ lt += cb
+ }
+ if c < c {
+ lt += cc
+ }
+ if c < d {
+ lt += cd
+ }
+
+ if d < a {
+ lt += da
+ }
+ if d < b {
+ lt += db
+ }
+ if d < c {
+ lt += dc
+ }
+ if d < d {
+ lt += dd
+ }
+
+ if a <= a {
+ le += aa
+ }
+ if a <= b {
+ le += ab
+ }
+ if a <= c {
+ le += ac
+ }
+ if a <= d {
+ le += ad
+ }
+
+ if b <= a {
+ le += ba
+ }
+ if b <= b {
+ le += bb
+ }
+ if b <= c {
+ le += bc
+ }
+ if b <= d {
+ le += bd
+ }
+
+ if c <= a {
+ le += ca
+ }
+ if c <= b {
+ le += cb
+ }
+ if c <= c {
+ le += cc
+ }
+ if c <= d {
+ le += cd
+ }
+
+ if d <= a {
+ le += da
+ }
+ if d <= b {
+ le += db
+ }
+ if d <= c {
+ le += dc
+ }
+ if d <= d {
+ le += dd
+ }
+
+ if a == a {
+ eq += aa
+ }
+ if a == b {
+ eq += ab
+ }
+ if a == c {
+ eq += ac
+ }
+ if a == d {
+ eq += ad
+ }
+
+ if b == a {
+ eq += ba
+ }
+ if b == b {
+ eq += bb
+ }
+ if b == c {
+ eq += bc
+ }
+ if b == d {
+ eq += bd
+ }
+
+ if c == a {
+ eq += ca
+ }
+ if c == b {
+ eq += cb
+ }
+ if c == c {
+ eq += cc
+ }
+ if c == d {
+ eq += cd
+ }
+
+ if d == a {
+ eq += da
+ }
+ if d == b {
+ eq += db
+ }
+ if d == c {
+ eq += dc
+ }
+ if d == d {
+ eq += dd
+ }
+
+ if a != a {
+ ne += aa
+ }
+ if a != b {
+ ne += ab
+ }
+ if a != c {
+ ne += ac
+ }
+ if a != d {
+ ne += ad
+ }
+
+ if b != a {
+ ne += ba
+ }
+ if b != b {
+ ne += bb
+ }
+ if b != c {
+ ne += bc
+ }
+ if b != d {
+ ne += bd
+ }
+
+ if c != a {
+ ne += ca
+ }
+ if c != b {
+ ne += cb
+ }
+ if c != c {
+ ne += cc
+ }
+ if c != d {
+ ne += cd
+ }
+
+ if d != a {
+ ne += da
+ }
+ if d != b {
+ ne += db
+ }
+ if d != c {
+ ne += dc
+ }
+ if d != d {
+ ne += dd
+ }
+
+ if a >= a {
+ ge += aa
+ }
+ if a >= b {
+ ge += ab
+ }
+ if a >= c {
+ ge += ac
+ }
+ if a >= d {
+ ge += ad
+ }
+
+ if b >= a {
+ ge += ba
+ }
+ if b >= b {
+ ge += bb
+ }
+ if b >= c {
+ ge += bc
+ }
+ if b >= d {
+ ge += bd
+ }
+
+ if c >= a {
+ ge += ca
+ }
+ if c >= b {
+ ge += cb
+ }
+ if c >= c {
+ ge += cc
+ }
+ if c >= d {
+ ge += cd
+ }
+
+ if d >= a {
+ ge += da
+ }
+ if d >= b {
+ ge += db
+ }
+ if d >= c {
+ ge += dc
+ }
+ if d >= d {
+ ge += dd
+ }
+
+ if a > a {
+ gt += aa
+ }
+ if a > b {
+ gt += ab
+ }
+ if a > c {
+ gt += ac
+ }
+ if a > d {
+ gt += ad
+ }
+
+ if b > a {
+ gt += ba
+ }
+ if b > b {
+ gt += bb
+ }
+ if b > c {
+ gt += bc
+ }
+ if b > d {
+ gt += bd
+ }
+
+ if c > a {
+ gt += ca
+ }
+ if c > b {
+ gt += cb
+ }
+ if c > c {
+ gt += cc
+ }
+ if c > d {
+ gt += cd
+ }
+
+ if d > a {
+ gt += da
+ }
+ if d > b {
+ gt += db
+ }
+ if d > c {
+ gt += dc
+ }
+ if d > d {
+ gt += dd
+ }
+
+ return
+}
+
+//go:noinline
+func le64_ssa(x, y float64) bool {
+ return x <= y
+}
+
+//go:noinline
+func ge64_ssa(x, y float64) bool {
+ return x >= y
+}
+
+//go:noinline
+func lt64_ssa(x, y float64) bool {
+ return x < y
+}
+
+//go:noinline
+func gt64_ssa(x, y float64) bool {
+ return x > y
+}
+
+//go:noinline
+func eq64_ssa(x, y float64) bool {
+ return x == y
+}
+
+//go:noinline
+func ne64_ssa(x, y float64) bool {
+ return x != y
+}
+
+//go:noinline
+func eqbr64_ssa(x, y float64) float64 {
+ if x == y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func nebr64_ssa(x, y float64) float64 {
+ if x != y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func gebr64_ssa(x, y float64) float64 {
+ if x >= y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func lebr64_ssa(x, y float64) float64 {
+ if x <= y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func ltbr64_ssa(x, y float64) float64 {
+ if x < y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func gtbr64_ssa(x, y float64) float64 {
+ if x > y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func le32_ssa(x, y float32) bool {
+ return x <= y
+}
+
+//go:noinline
+func ge32_ssa(x, y float32) bool {
+ return x >= y
+}
+
+//go:noinline
+func lt32_ssa(x, y float32) bool {
+ return x < y
+}
+
+//go:noinline
+func gt32_ssa(x, y float32) bool {
+ return x > y
+}
+
+//go:noinline
+func eq32_ssa(x, y float32) bool {
+ return x == y
+}
+
+//go:noinline
+func ne32_ssa(x, y float32) bool {
+ return x != y
+}
+
+//go:noinline
+func eqbr32_ssa(x, y float32) float32 {
+ if x == y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func nebr32_ssa(x, y float32) float32 {
+ if x != y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func gebr32_ssa(x, y float32) float32 {
+ if x >= y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func lebr32_ssa(x, y float32) float32 {
+ if x <= y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func ltbr32_ssa(x, y float32) float32 {
+ if x < y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func gtbr32_ssa(x, y float32) float32 {
+ if x > y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func F32toU8_ssa(x float32) uint8 {
+ return uint8(x)
+}
+
+//go:noinline
+func F32toI8_ssa(x float32) int8 {
+ return int8(x)
+}
+
+//go:noinline
+func F32toU16_ssa(x float32) uint16 {
+ return uint16(x)
+}
+
+//go:noinline
+func F32toI16_ssa(x float32) int16 {
+ return int16(x)
+}
+
+//go:noinline
+func F32toU32_ssa(x float32) uint32 {
+ return uint32(x)
+}
+
+//go:noinline
+func F32toI32_ssa(x float32) int32 {
+ return int32(x)
+}
+
+//go:noinline
+func F32toU64_ssa(x float32) uint64 {
+ return uint64(x)
+}
+
+//go:noinline
+func F32toI64_ssa(x float32) int64 {
+ return int64(x)
+}
+
+//go:noinline
+func F64toU8_ssa(x float64) uint8 {
+ return uint8(x)
+}
+
+//go:noinline
+func F64toI8_ssa(x float64) int8 {
+ return int8(x)
+}
+
+//go:noinline
+func F64toU16_ssa(x float64) uint16 {
+ return uint16(x)
+}
+
+//go:noinline
+func F64toI16_ssa(x float64) int16 {
+ return int16(x)
+}
+
+//go:noinline
+func F64toU32_ssa(x float64) uint32 {
+ return uint32(x)
+}
+
+//go:noinline
+func F64toI32_ssa(x float64) int32 {
+ return int32(x)
+}
+
+//go:noinline
+func F64toU64_ssa(x float64) uint64 {
+ return uint64(x)
+}
+
+//go:noinline
+func F64toI64_ssa(x float64) int64 {
+ return int64(x)
+}
+
+func floatsToInts(t *testing.T, x float64, expected int64) {
+ y := float32(x)
+ expectInt64(t, "F64toI8", int64(F64toI8_ssa(x)), expected)
+ expectInt64(t, "F64toI16", int64(F64toI16_ssa(x)), expected)
+ expectInt64(t, "F64toI32", int64(F64toI32_ssa(x)), expected)
+ expectInt64(t, "F64toI64", int64(F64toI64_ssa(x)), expected)
+ expectInt64(t, "F32toI8", int64(F32toI8_ssa(y)), expected)
+ expectInt64(t, "F32toI16", int64(F32toI16_ssa(y)), expected)
+ expectInt64(t, "F32toI32", int64(F32toI32_ssa(y)), expected)
+ expectInt64(t, "F32toI64", int64(F32toI64_ssa(y)), expected)
+}
+
+func floatsToUints(t *testing.T, x float64, expected uint64) {
+ y := float32(x)
+ expectUint64(t, "F64toU8", uint64(F64toU8_ssa(x)), expected)
+ expectUint64(t, "F64toU16", uint64(F64toU16_ssa(x)), expected)
+ expectUint64(t, "F64toU32", uint64(F64toU32_ssa(x)), expected)
+ expectUint64(t, "F64toU64", uint64(F64toU64_ssa(x)), expected)
+ expectUint64(t, "F32toU8", uint64(F32toU8_ssa(y)), expected)
+ expectUint64(t, "F32toU16", uint64(F32toU16_ssa(y)), expected)
+ expectUint64(t, "F32toU32", uint64(F32toU32_ssa(y)), expected)
+ expectUint64(t, "F32toU64", uint64(F32toU64_ssa(y)), expected)
+}
+
+func floatingToIntegerConversionsTest(t *testing.T) {
+ floatsToInts(t, 0.0, 0)
+ floatsToInts(t, 0.5, 0)
+ floatsToInts(t, 0.9, 0)
+ floatsToInts(t, 1.0, 1)
+ floatsToInts(t, 1.5, 1)
+ floatsToInts(t, 127.0, 127)
+ floatsToInts(t, -1.0, -1)
+ floatsToInts(t, -128.0, -128)
+
+ floatsToUints(t, 0.0, 0)
+ floatsToUints(t, 1.0, 1)
+ floatsToUints(t, 255.0, 255)
+
+ for j := uint(0); j < 24; j++ {
+ // Avoid hard cases in the construction
+ // of the test inputs.
+ v := int64(1<<62) | int64(1<<(62-j))
+ w := uint64(v)
+ f := float32(v)
+ d := float64(v)
+ expectUint64(t, "2**62...", F32toU64_ssa(f), w)
+ expectUint64(t, "2**62...", F64toU64_ssa(d), w)
+ expectInt64(t, "2**62...", F32toI64_ssa(f), v)
+ expectInt64(t, "2**62...", F64toI64_ssa(d), v)
+ expectInt64(t, "2**62...", F32toI64_ssa(-f), -v)
+ expectInt64(t, "2**62...", F64toI64_ssa(-d), -v)
+ w += w
+ f += f
+ d += d
+ expectUint64(t, "2**63...", F32toU64_ssa(f), w)
+ expectUint64(t, "2**63...", F64toU64_ssa(d), w)
+ }
+
+ for j := uint(0); j < 16; j++ {
+ // Avoid hard cases in the construction
+ // of the test inputs.
+ v := int32(1<<30) | int32(1<<(30-j))
+ w := uint32(v)
+ f := float32(v)
+ d := float64(v)
+ expectUint32(t, "2**30...", F32toU32_ssa(f), w)
+ expectUint32(t, "2**30...", F64toU32_ssa(d), w)
+ expectInt32(t, "2**30...", F32toI32_ssa(f), v)
+ expectInt32(t, "2**30...", F64toI32_ssa(d), v)
+ expectInt32(t, "2**30...", F32toI32_ssa(-f), -v)
+ expectInt32(t, "2**30...", F64toI32_ssa(-d), -v)
+ w += w
+ f += f
+ d += d
+ expectUint32(t, "2**31...", F32toU32_ssa(f), w)
+ expectUint32(t, "2**31...", F64toU32_ssa(d), w)
+ }
+
+ for j := uint(0); j < 15; j++ {
+ // Avoid hard cases in the construction
+ // of the test inputs.
+ v := int16(1<<14) | int16(1<<(14-j))
+ w := uint16(v)
+ f := float32(v)
+ d := float64(v)
+ expectUint16(t, "2**14...", F32toU16_ssa(f), w)
+ expectUint16(t, "2**14...", F64toU16_ssa(d), w)
+ expectInt16(t, "2**14...", F32toI16_ssa(f), v)
+ expectInt16(t, "2**14...", F64toI16_ssa(d), v)
+ expectInt16(t, "2**14...", F32toI16_ssa(-f), -v)
+ expectInt16(t, "2**14...", F64toI16_ssa(-d), -v)
+ w += w
+ f += f
+ d += d
+ expectUint16(t, "2**15...", F32toU16_ssa(f), w)
+ expectUint16(t, "2**15...", F64toU16_ssa(d), w)
+ }
+
+ expectInt32(t, "-2147483648", F32toI32_ssa(-2147483648), -2147483648)
+
+ expectInt32(t, "-2147483648", F64toI32_ssa(-2147483648), -2147483648)
+ expectInt32(t, "-2147483647", F64toI32_ssa(-2147483647), -2147483647)
+ expectUint32(t, "4294967295", F64toU32_ssa(4294967295), 4294967295)
+
+ expectInt16(t, "-32768", F64toI16_ssa(-32768), -32768)
+ expectInt16(t, "-32768", F32toI16_ssa(-32768), -32768)
+
+ // NB more of a pain to do these for 32-bit because of lost bits in Float32 mantissa
+ expectInt16(t, "32767", F64toI16_ssa(32767), 32767)
+ expectInt16(t, "32767", F32toI16_ssa(32767), 32767)
+ expectUint16(t, "32767", F64toU16_ssa(32767), 32767)
+ expectUint16(t, "32767", F32toU16_ssa(32767), 32767)
+ expectUint16(t, "65535", F64toU16_ssa(65535), 65535)
+ expectUint16(t, "65535", F32toU16_ssa(65535), 65535)
+}
+
+func fail64(s string, f func(a, b float64) float64, a, b, e float64) {
+ d := f(a, b)
+ if d != e {
+ fmt.Printf("For (float64) %v %v %v, expected %v, got %v\n", a, s, b, e, d)
+ }
+}
+
+func fail64bool(s string, f func(a, b float64) bool, a, b float64, e bool) {
+ d := f(a, b)
+ if d != e {
+ fmt.Printf("For (float64) %v %v %v, expected %v, got %v\n", a, s, b, e, d)
+ }
+}
+
+func fail32(s string, f func(a, b float32) float32, a, b, e float32) {
+ d := f(a, b)
+ if d != e {
+ fmt.Printf("For (float32) %v %v %v, expected %v, got %v\n", a, s, b, e, d)
+ }
+}
+
+func fail32bool(s string, f func(a, b float32) bool, a, b float32, e bool) {
+ d := f(a, b)
+ if d != e {
+ fmt.Printf("For (float32) %v %v %v, expected %v, got %v\n", a, s, b, e, d)
+ }
+}
+
+func expect64(t *testing.T, s string, x, expected float64) {
+ if x != expected {
+ println("F64 Expected", expected, "for", s, ", got", x)
+ }
+}
+
+func expect32(t *testing.T, s string, x, expected float32) {
+ if x != expected {
+ println("F32 Expected", expected, "for", s, ", got", x)
+ }
+}
+
+func expectUint64(t *testing.T, s string, x, expected uint64) {
+ if x != expected {
+ fmt.Printf("U64 Expected 0x%016x for %s, got 0x%016x\n", expected, s, x)
+ }
+}
+
+func expectInt64(t *testing.T, s string, x, expected int64) {
+ if x != expected {
+ fmt.Printf("%s: Expected 0x%016x, got 0x%016x\n", s, expected, x)
+ }
+}
+
+func expectUint32(t *testing.T, s string, x, expected uint32) {
+ if x != expected {
+ fmt.Printf("U32 %s: Expected 0x%08x, got 0x%08x\n", s, expected, x)
+ }
+}
+
+func expectInt32(t *testing.T, s string, x, expected int32) {
+ if x != expected {
+ fmt.Printf("I32 %s: Expected 0x%08x, got 0x%08x\n", s, expected, x)
+ }
+}
+
+func expectUint16(t *testing.T, s string, x, expected uint16) {
+ if x != expected {
+ fmt.Printf("U16 %s: Expected 0x%04x, got 0x%04x\n", s, expected, x)
+ }
+}
+
+func expectInt16(t *testing.T, s string, x, expected int16) {
+ if x != expected {
+ fmt.Printf("I16 %s: Expected 0x%04x, got 0x%04x\n", s, expected, x)
+ }
+}
+
+func expectAll64(t *testing.T, s string, expected, a, b, c, d, e, f, g, h, i float64) {
+ expect64(t, s+":a", a, expected)
+ expect64(t, s+":b", b, expected)
+ expect64(t, s+":c", c, expected)
+ expect64(t, s+":d", d, expected)
+ expect64(t, s+":e", e, expected)
+ expect64(t, s+":f", f, expected)
+ expect64(t, s+":g", g, expected)
+}
+
+func expectAll32(t *testing.T, s string, expected, a, b, c, d, e, f, g, h, i float32) {
+ expect32(t, s+":a", a, expected)
+ expect32(t, s+":b", b, expected)
+ expect32(t, s+":c", c, expected)
+ expect32(t, s+":d", d, expected)
+ expect32(t, s+":e", e, expected)
+ expect32(t, s+":f", f, expected)
+ expect32(t, s+":g", g, expected)
+}
+
+var ev64 [2]float64 = [2]float64{42.0, 17.0}
+var ev32 [2]float32 = [2]float32{42.0, 17.0}
+
+func cmpOpTest(t *testing.T,
+ s string,
+ f func(a, b float64) bool,
+ g func(a, b float64) float64,
+ ff func(a, b float32) bool,
+ gg func(a, b float32) float32,
+ zero, one, inf, nan float64, result uint) {
+ fail64bool(s, f, zero, zero, result>>16&1 == 1)
+ fail64bool(s, f, zero, one, result>>12&1 == 1)
+ fail64bool(s, f, zero, inf, result>>8&1 == 1)
+ fail64bool(s, f, zero, nan, result>>4&1 == 1)
+ fail64bool(s, f, nan, nan, result&1 == 1)
+
+ fail64(s, g, zero, zero, ev64[result>>16&1])
+ fail64(s, g, zero, one, ev64[result>>12&1])
+ fail64(s, g, zero, inf, ev64[result>>8&1])
+ fail64(s, g, zero, nan, ev64[result>>4&1])
+ fail64(s, g, nan, nan, ev64[result>>0&1])
+
+ {
+ zero := float32(zero)
+ one := float32(one)
+ inf := float32(inf)
+ nan := float32(nan)
+ fail32bool(s, ff, zero, zero, (result>>16)&1 == 1)
+ fail32bool(s, ff, zero, one, (result>>12)&1 == 1)
+ fail32bool(s, ff, zero, inf, (result>>8)&1 == 1)
+ fail32bool(s, ff, zero, nan, (result>>4)&1 == 1)
+ fail32bool(s, ff, nan, nan, result&1 == 1)
+
+ fail32(s, gg, zero, zero, ev32[(result>>16)&1])
+ fail32(s, gg, zero, one, ev32[(result>>12)&1])
+ fail32(s, gg, zero, inf, ev32[(result>>8)&1])
+ fail32(s, gg, zero, nan, ev32[(result>>4)&1])
+ fail32(s, gg, nan, nan, ev32[(result>>0)&1])
+ }
+}
+
+func expectCx128(t *testing.T, s string, x, expected complex128) {
+ if x != expected {
+ t.Errorf("Cx 128 Expected %f for %s, got %f", expected, s, x)
+ }
+}
+
+func expectCx64(t *testing.T, s string, x, expected complex64) {
+ if x != expected {
+ t.Errorf("Cx 64 Expected %f for %s, got %f", expected, s, x)
+ }
+}
+
+//go:noinline
+func cx128sum_ssa(a, b complex128) complex128 {
+ return a + b
+}
+
+//go:noinline
+func cx128diff_ssa(a, b complex128) complex128 {
+ return a - b
+}
+
+//go:noinline
+func cx128prod_ssa(a, b complex128) complex128 {
+ return a * b
+}
+
+//go:noinline
+func cx128quot_ssa(a, b complex128) complex128 {
+ return a / b
+}
+
+//go:noinline
+func cx128neg_ssa(a complex128) complex128 {
+ return -a
+}
+
+//go:noinline
+func cx128real_ssa(a complex128) float64 {
+ return real(a)
+}
+
+//go:noinline
+func cx128imag_ssa(a complex128) float64 {
+ return imag(a)
+}
+
+//go:noinline
+func cx128cnst_ssa(a complex128) complex128 {
+ b := 2 + 3i
+ return a * b
+}
+
+//go:noinline
+func cx64sum_ssa(a, b complex64) complex64 {
+ return a + b
+}
+
+//go:noinline
+func cx64diff_ssa(a, b complex64) complex64 {
+ return a - b
+}
+
+//go:noinline
+func cx64prod_ssa(a, b complex64) complex64 {
+ return a * b
+}
+
+//go:noinline
+func cx64quot_ssa(a, b complex64) complex64 {
+ return a / b
+}
+
+//go:noinline
+func cx64neg_ssa(a complex64) complex64 {
+ return -a
+}
+
+//go:noinline
+func cx64real_ssa(a complex64) float32 {
+ return real(a)
+}
+
+//go:noinline
+func cx64imag_ssa(a complex64) float32 {
+ return imag(a)
+}
+
+//go:noinline
+func cx128eq_ssa(a, b complex128) bool {
+ return a == b
+}
+
+//go:noinline
+func cx128ne_ssa(a, b complex128) bool {
+ return a != b
+}
+
+//go:noinline
+func cx64eq_ssa(a, b complex64) bool {
+ return a == b
+}
+
+//go:noinline
+func cx64ne_ssa(a, b complex64) bool {
+ return a != b
+}
+
+func expectTrue(t *testing.T, s string, b bool) {
+ if !b {
+ t.Errorf("expected true for %s, got false", s)
+ }
+}
+func expectFalse(t *testing.T, s string, b bool) {
+ if b {
+ t.Errorf("expected false for %s, got true", s)
+ }
+}
+
+func complexTest128(t *testing.T) {
+ var a complex128 = 1 + 2i
+ var b complex128 = 3 + 6i
+ sum := cx128sum_ssa(b, a)
+ diff := cx128diff_ssa(b, a)
+ prod := cx128prod_ssa(b, a)
+ quot := cx128quot_ssa(b, a)
+ neg := cx128neg_ssa(a)
+ r := cx128real_ssa(a)
+ i := cx128imag_ssa(a)
+ cnst := cx128cnst_ssa(a)
+ c1 := cx128eq_ssa(a, a)
+ c2 := cx128eq_ssa(a, b)
+ c3 := cx128ne_ssa(a, a)
+ c4 := cx128ne_ssa(a, b)
+
+ expectCx128(t, "sum", sum, 4+8i)
+ expectCx128(t, "diff", diff, 2+4i)
+ expectCx128(t, "prod", prod, -9+12i)
+ expectCx128(t, "quot", quot, 3+0i)
+ expectCx128(t, "neg", neg, -1-2i)
+ expect64(t, "real", r, 1)
+ expect64(t, "imag", i, 2)
+ expectCx128(t, "cnst", cnst, -4+7i)
+ expectTrue(t, fmt.Sprintf("%v==%v", a, a), c1)
+ expectFalse(t, fmt.Sprintf("%v==%v", a, b), c2)
+ expectFalse(t, fmt.Sprintf("%v!=%v", a, a), c3)
+ expectTrue(t, fmt.Sprintf("%v!=%v", a, b), c4)
+}
+
+func complexTest64(t *testing.T) {
+ var a complex64 = 1 + 2i
+ var b complex64 = 3 + 6i
+ sum := cx64sum_ssa(b, a)
+ diff := cx64diff_ssa(b, a)
+ prod := cx64prod_ssa(b, a)
+ quot := cx64quot_ssa(b, a)
+ neg := cx64neg_ssa(a)
+ r := cx64real_ssa(a)
+ i := cx64imag_ssa(a)
+ c1 := cx64eq_ssa(a, a)
+ c2 := cx64eq_ssa(a, b)
+ c3 := cx64ne_ssa(a, a)
+ c4 := cx64ne_ssa(a, b)
+
+ expectCx64(t, "sum", sum, 4+8i)
+ expectCx64(t, "diff", diff, 2+4i)
+ expectCx64(t, "prod", prod, -9+12i)
+ expectCx64(t, "quot", quot, 3+0i)
+ expectCx64(t, "neg", neg, -1-2i)
+ expect32(t, "real", r, 1)
+ expect32(t, "imag", i, 2)
+ expectTrue(t, fmt.Sprintf("%v==%v", a, a), c1)
+ expectFalse(t, fmt.Sprintf("%v==%v", a, b), c2)
+ expectFalse(t, fmt.Sprintf("%v!=%v", a, a), c3)
+ expectTrue(t, fmt.Sprintf("%v!=%v", a, b), c4)
+}
+
+// TestFP tests that we get the right answer for floating point expressions.
+func TestFP(t *testing.T) {
+ a := 3.0
+ b := 4.0
+
+ c := float32(3.0)
+ d := float32(4.0)
+
+ tiny := float32(1.5e-45) // smallest f32 denorm = 2**(-149)
+ dtiny := float64(tiny) // well within range of f64
+
+ fail64("+", add64_ssa, a, b, 7.0)
+ fail64("*", mul64_ssa, a, b, 12.0)
+ fail64("-", sub64_ssa, a, b, -1.0)
+ fail64("/", div64_ssa, a, b, 0.75)
+ fail64("neg", neg64_ssa, a, b, -7)
+
+ fail32("+", add32_ssa, c, d, 7.0)
+ fail32("*", mul32_ssa, c, d, 12.0)
+ fail32("-", sub32_ssa, c, d, -1.0)
+ fail32("/", div32_ssa, c, d, 0.75)
+ fail32("neg", neg32_ssa, c, d, -7)
+
+ // denorm-squared should underflow to zero.
+ fail32("*", mul32_ssa, tiny, tiny, 0)
+
+ // but should not underflow in float and in fact is exactly representable.
+ fail64("*", mul64_ssa, dtiny, dtiny, 1.9636373861190906e-90)
+
+ // Intended to create register pressure which forces
+ // asymmetric op into different code paths.
+ aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc, cd, da, db, dc, dd := manysub_ssa(1000.0, 100.0, 10.0, 1.0)
+
+ expect64(t, "aa", aa, 11.0)
+ expect64(t, "ab", ab, 900.0)
+ expect64(t, "ac", ac, 990.0)
+ expect64(t, "ad", ad, 999.0)
+
+ expect64(t, "ba", ba, -900.0)
+ expect64(t, "bb", bb, 22.0)
+ expect64(t, "bc", bc, 90.0)
+ expect64(t, "bd", bd, 99.0)
+
+ expect64(t, "ca", ca, -990.0)
+ expect64(t, "cb", cb, -90.0)
+ expect64(t, "cc", cc, 33.0)
+ expect64(t, "cd", cd, 9.0)
+
+ expect64(t, "da", da, -999.0)
+ expect64(t, "db", db, -99.0)
+ expect64(t, "dc", dc, -9.0)
+ expect64(t, "dd", dd, 44.0)
+
+ integer2floatConversions(t)
+
+ multiplyAdd(t)
+
+ var zero64 float64 = 0.0
+ var one64 float64 = 1.0
+ var inf64 float64 = 1.0 / zero64
+ var nan64 float64 = sub64_ssa(inf64, inf64)
+
+ cmpOpTest(t, "!=", ne64_ssa, nebr64_ssa, ne32_ssa, nebr32_ssa, zero64, one64, inf64, nan64, 0x01111)
+ cmpOpTest(t, "==", eq64_ssa, eqbr64_ssa, eq32_ssa, eqbr32_ssa, zero64, one64, inf64, nan64, 0x10000)
+ cmpOpTest(t, "<=", le64_ssa, lebr64_ssa, le32_ssa, lebr32_ssa, zero64, one64, inf64, nan64, 0x11100)
+ cmpOpTest(t, "<", lt64_ssa, ltbr64_ssa, lt32_ssa, ltbr32_ssa, zero64, one64, inf64, nan64, 0x01100)
+ cmpOpTest(t, ">", gt64_ssa, gtbr64_ssa, gt32_ssa, gtbr32_ssa, zero64, one64, inf64, nan64, 0x00000)
+ cmpOpTest(t, ">=", ge64_ssa, gebr64_ssa, ge32_ssa, gebr32_ssa, zero64, one64, inf64, nan64, 0x10000)
+
+ {
+ lt, le, eq, ne, ge, gt := compares64_ssa(0.0, 1.0, inf64, nan64)
+ expectUint64(t, "lt", lt, 0x0110001000000000)
+ expectUint64(t, "le", le, 0x1110011000100000)
+ expectUint64(t, "eq", eq, 0x1000010000100000)
+ expectUint64(t, "ne", ne, 0x0111101111011111)
+ expectUint64(t, "ge", ge, 0x1000110011100000)
+ expectUint64(t, "gt", gt, 0x0000100011000000)
+ // fmt.Printf("lt=0x%016x, le=0x%016x, eq=0x%016x, ne=0x%016x, ge=0x%016x, gt=0x%016x\n",
+ // lt, le, eq, ne, ge, gt)
+ }
+ {
+ lt, le, eq, ne, ge, gt := compares32_ssa(0.0, 1.0, float32(inf64), float32(nan64))
+ expectUint64(t, "lt", lt, 0x0110001000000000)
+ expectUint64(t, "le", le, 0x1110011000100000)
+ expectUint64(t, "eq", eq, 0x1000010000100000)
+ expectUint64(t, "ne", ne, 0x0111101111011111)
+ expectUint64(t, "ge", ge, 0x1000110011100000)
+ expectUint64(t, "gt", gt, 0x0000100011000000)
+ }
+
+ floatingToIntegerConversionsTest(t)
+ complexTest128(t)
+ complexTest64(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/gen/arithBoundaryGen.go b/src/cmd/compile/internal/gc/testdata/gen/arithBoundaryGen.go
new file mode 100644
index 0000000..21ad27e
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/gen/arithBoundaryGen.go
@@ -0,0 +1,209 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates a test to verify that the standard arithmetic
+// operators properly handle some special cases. The test file should be
+// generated with a known working version of go.
+// launch with `go run arithBoundaryGen.go` a file called arithBoundary.go
+// will be written into the parent directory containing the tests
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "text/template"
+)
+
+// used for interpolation in a text template
+type tmplData struct {
+ Name, Stype, Symbol string
+}
+
+// used to work around an issue with the mod symbol being
+// interpreted as part of a format string
+func (s tmplData) SymFirst() string {
+ return string(s.Symbol[0])
+}
+
+// ucast casts an unsigned int to the size in s
+func ucast(i uint64, s sizedTestData) uint64 {
+ switch s.name {
+ case "uint32":
+ return uint64(uint32(i))
+ case "uint16":
+ return uint64(uint16(i))
+ case "uint8":
+ return uint64(uint8(i))
+ }
+ return i
+}
+
+// icast casts a signed int to the size in s
+func icast(i int64, s sizedTestData) int64 {
+ switch s.name {
+ case "int32":
+ return int64(int32(i))
+ case "int16":
+ return int64(int16(i))
+ case "int8":
+ return int64(int8(i))
+ }
+ return i
+}
+
+type sizedTestData struct {
+ name string
+ sn string
+ u []uint64
+ i []int64
+}
+
+// values to generate tests. these should include the smallest and largest values, along
+// with any other values that might cause issues. we generate n^2 tests for each size to
+// cover all cases.
+var szs = []sizedTestData{
+ sizedTestData{name: "uint64", sn: "64", u: []uint64{0, 1, 4294967296, 0xffffFFFFffffFFFF}},
+ sizedTestData{name: "int64", sn: "64", i: []int64{-0x8000000000000000, -0x7FFFFFFFFFFFFFFF,
+ -4294967296, -1, 0, 1, 4294967296, 0x7FFFFFFFFFFFFFFE, 0x7FFFFFFFFFFFFFFF}},
+
+ sizedTestData{name: "uint32", sn: "32", u: []uint64{0, 1, 4294967295}},
+ sizedTestData{name: "int32", sn: "32", i: []int64{-0x80000000, -0x7FFFFFFF, -1, 0,
+ 1, 0x7FFFFFFF}},
+
+ sizedTestData{name: "uint16", sn: "16", u: []uint64{0, 1, 65535}},
+ sizedTestData{name: "int16", sn: "16", i: []int64{-32768, -32767, -1, 0, 1, 32766, 32767}},
+
+ sizedTestData{name: "uint8", sn: "8", u: []uint64{0, 1, 255}},
+ sizedTestData{name: "int8", sn: "8", i: []int64{-128, -127, -1, 0, 1, 126, 127}},
+}
+
+type op struct {
+ name, symbol string
+}
+
+// ops that we will be generating tests for
+var ops = []op{op{"add", "+"}, op{"sub", "-"}, op{"div", "/"}, op{"mod", "%%"}, op{"mul", "*"}}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/arithBoundaryGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main;\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ for _, sz := range []int{64, 32, 16, 8} {
+ fmt.Fprintf(w, "type utd%d struct {\n", sz)
+ fmt.Fprintf(w, " a,b uint%d\n", sz)
+ fmt.Fprintf(w, " add,sub,mul,div,mod uint%d\n", sz)
+ fmt.Fprintf(w, "}\n")
+
+ fmt.Fprintf(w, "type itd%d struct {\n", sz)
+ fmt.Fprintf(w, " a,b int%d\n", sz)
+ fmt.Fprintf(w, " add,sub,mul,div,mod int%d\n", sz)
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // the function being tested
+ testFunc, err := template.New("testFunc").Parse(
+ `//go:noinline
+ func {{.Name}}_{{.Stype}}_ssa(a, b {{.Stype}}) {{.Stype}} {
+ return a {{.SymFirst}} b
+}
+`)
+ if err != nil {
+ panic(err)
+ }
+
+ // generate our functions to be tested
+ for _, s := range szs {
+ for _, o := range ops {
+ fd := tmplData{o.name, s.name, o.symbol}
+ err = testFunc.Execute(w, fd)
+ if err != nil {
+ panic(err)
+ }
+ }
+ }
+
+ // generate the test data
+ for _, s := range szs {
+ if len(s.u) > 0 {
+ fmt.Fprintf(w, "var %s_data []utd%s = []utd%s{", s.name, s.sn, s.sn)
+ for _, i := range s.u {
+ for _, j := range s.u {
+ fmt.Fprintf(w, "utd%s{a: %d, b: %d, add: %d, sub: %d, mul: %d", s.sn, i, j, ucast(i+j, s), ucast(i-j, s), ucast(i*j, s))
+ if j != 0 {
+ fmt.Fprintf(w, ", div: %d, mod: %d", ucast(i/j, s), ucast(i%j, s))
+ }
+ fmt.Fprint(w, "},\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ } else {
+ // TODO: clean up this duplication
+ fmt.Fprintf(w, "var %s_data []itd%s = []itd%s{", s.name, s.sn, s.sn)
+ for _, i := range s.i {
+ for _, j := range s.i {
+ fmt.Fprintf(w, "itd%s{a: %d, b: %d, add: %d, sub: %d, mul: %d", s.sn, i, j, icast(i+j, s), icast(i-j, s), icast(i*j, s))
+ if j != 0 {
+ fmt.Fprintf(w, ", div: %d, mod: %d", icast(i/j, s), icast(i%j, s))
+ }
+ fmt.Fprint(w, "},\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ }
+
+ fmt.Fprintf(w, "//TestArithmeticBoundary tests boundary results for arithmetic operations.\n")
+ fmt.Fprintf(w, "func TestArithmeticBoundary(t *testing.T) {\n\n")
+
+ verify, err := template.New("tst").Parse(
+ `if got := {{.Name}}_{{.Stype}}_ssa(v.a, v.b); got != v.{{.Name}} {
+ t.Errorf("{{.Name}}_{{.Stype}} %d{{.Symbol}}%d = %d, wanted %d\n",v.a,v.b,got,v.{{.Name}})
+}
+`)
+
+ for _, s := range szs {
+ fmt.Fprintf(w, "for _, v := range %s_data {\n", s.name)
+
+ for _, o := range ops {
+ // avoid generating tests that divide by zero
+ if o.name == "div" || o.name == "mod" {
+ fmt.Fprint(w, "if v.b != 0 {")
+ }
+
+ err = verify.Execute(w, tmplData{o.name, s.name, o.symbol})
+
+ if o.name == "div" || o.name == "mod" {
+ fmt.Fprint(w, "\n}\n")
+ }
+
+ if err != nil {
+ panic(err)
+ }
+
+ }
+ fmt.Fprint(w, " }\n")
+ }
+
+ fmt.Fprintf(w, "}\n")
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = ioutil.WriteFile("../arithBoundary_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go b/src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go
new file mode 100644
index 0000000..41b2946
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go
@@ -0,0 +1,346 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates a test to verify that the standard arithmetic
+// operators properly handle const cases. The test file should be
+// generated with a known working version of go.
+// launch with `go run arithConstGen.go` a file called arithConst.go
+// will be written into the parent directory containing the tests
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "strings"
+ "text/template"
+)
+
+type op struct {
+ name, symbol string
+}
+type szD struct {
+ name string
+ sn string
+ u []uint64
+ i []int64
+ oponly string
+}
+
+var szs = []szD{
+ {name: "uint64", sn: "64", u: []uint64{0, 1, 4294967296, 0x8000000000000000, 0xffffFFFFffffFFFF}},
+ {name: "uint64", sn: "64", u: []uint64{3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"},
+
+ {name: "int64", sn: "64", i: []int64{-0x8000000000000000, -0x7FFFFFFFFFFFFFFF,
+ -4294967296, -1, 0, 1, 4294967296, 0x7FFFFFFFFFFFFFFE, 0x7FFFFFFFFFFFFFFF}},
+ {name: "int64", sn: "64", i: []int64{-9, -5, -3, 3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"},
+
+ {name: "uint32", sn: "32", u: []uint64{0, 1, 4294967295}},
+ {name: "uint32", sn: "32", u: []uint64{3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"},
+
+ {name: "int32", sn: "32", i: []int64{-0x80000000, -0x7FFFFFFF, -1, 0,
+ 1, 0x7FFFFFFF}},
+ {name: "int32", sn: "32", i: []int64{-9, -5, -3, 3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"},
+
+ {name: "uint16", sn: "16", u: []uint64{0, 1, 65535}},
+ {name: "int16", sn: "16", i: []int64{-32768, -32767, -1, 0, 1, 32766, 32767}},
+
+ {name: "uint8", sn: "8", u: []uint64{0, 1, 255}},
+ {name: "int8", sn: "8", i: []int64{-128, -127, -1, 0, 1, 126, 127}},
+}
+
+var ops = []op{
+ {"add", "+"},
+ {"sub", "-"},
+ {"div", "/"},
+ {"mul", "*"},
+ {"lsh", "<<"},
+ {"rsh", ">>"},
+ {"mod", "%"},
+ {"and", "&"},
+ {"or", "|"},
+ {"xor", "^"},
+}
+
+// compute the result of i op j, cast as type t.
+func ansU(i, j uint64, t, op string) string {
+ var ans uint64
+ switch op {
+ case "+":
+ ans = i + j
+ case "-":
+ ans = i - j
+ case "*":
+ ans = i * j
+ case "/":
+ if j != 0 {
+ ans = i / j
+ }
+ case "%":
+ if j != 0 {
+ ans = i % j
+ }
+ case "<<":
+ ans = i << j
+ case ">>":
+ ans = i >> j
+ case "&":
+ ans = i & j
+ case "|":
+ ans = i | j
+ case "^":
+ ans = i ^ j
+ }
+ switch t {
+ case "uint32":
+ ans = uint64(uint32(ans))
+ case "uint16":
+ ans = uint64(uint16(ans))
+ case "uint8":
+ ans = uint64(uint8(ans))
+ }
+ return fmt.Sprintf("%d", ans)
+}
+
+// compute the result of i op j, cast as type t.
+func ansS(i, j int64, t, op string) string {
+ var ans int64
+ switch op {
+ case "+":
+ ans = i + j
+ case "-":
+ ans = i - j
+ case "*":
+ ans = i * j
+ case "/":
+ if j != 0 {
+ ans = i / j
+ }
+ case "%":
+ if j != 0 {
+ ans = i % j
+ }
+ case "<<":
+ ans = i << uint64(j)
+ case ">>":
+ ans = i >> uint64(j)
+ case "&":
+ ans = i & j
+ case "|":
+ ans = i | j
+ case "^":
+ ans = i ^ j
+ }
+ switch t {
+ case "int32":
+ ans = int64(int32(ans))
+ case "int16":
+ ans = int64(int16(ans))
+ case "int8":
+ ans = int64(int8(ans))
+ }
+ return fmt.Sprintf("%d", ans)
+}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/arithConstGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main;\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ fncCnst1 := template.Must(template.New("fnc").Parse(
+ `//go:noinline
+func {{.Name}}_{{.Type_}}_{{.FNumber}}(a {{.Type_}}) {{.Type_}} { return a {{.Symbol}} {{.Number}} }
+`))
+ fncCnst2 := template.Must(template.New("fnc").Parse(
+ `//go:noinline
+func {{.Name}}_{{.FNumber}}_{{.Type_}}(a {{.Type_}}) {{.Type_}} { return {{.Number}} {{.Symbol}} a }
+`))
+
+ type fncData struct {
+ Name, Type_, Symbol, FNumber, Number string
+ }
+
+ for _, s := range szs {
+ for _, o := range ops {
+ if s.oponly != "" && s.oponly != o.name {
+ continue
+ }
+ fd := fncData{o.name, s.name, o.symbol, "", ""}
+
+ // unsigned test cases
+ if len(s.u) > 0 {
+ for _, i := range s.u {
+ fd.Number = fmt.Sprintf("%d", i)
+ fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1)
+
+ // avoid division by zero
+ if o.name != "mod" && o.name != "div" || i != 0 {
+ // introduce uint64 cast for rhs shift operands
+ // if they are too large for default uint type
+ number := fd.Number
+ if (o.name == "lsh" || o.name == "rsh") && uint64(uint32(i)) != i {
+ fd.Number = fmt.Sprintf("uint64(%s)", number)
+ }
+ fncCnst1.Execute(w, fd)
+ fd.Number = number
+ }
+
+ fncCnst2.Execute(w, fd)
+ }
+ }
+
+ // signed test cases
+ if len(s.i) > 0 {
+ // don't generate tests for shifts by signed integers
+ if o.name == "lsh" || o.name == "rsh" {
+ continue
+ }
+ for _, i := range s.i {
+ fd.Number = fmt.Sprintf("%d", i)
+ fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1)
+
+ // avoid division by zero
+ if o.name != "mod" && o.name != "div" || i != 0 {
+ fncCnst1.Execute(w, fd)
+ }
+ fncCnst2.Execute(w, fd)
+ }
+ }
+ }
+ }
+
+ vrf1 := template.Must(template.New("vrf1").Parse(`
+ test_{{.Size}}{fn: {{.Name}}_{{.FNumber}}_{{.Type_}}, fnname: "{{.Name}}_{{.FNumber}}_{{.Type_}}", in: {{.Input}}, want: {{.Ans}}},`))
+
+ vrf2 := template.Must(template.New("vrf2").Parse(`
+ test_{{.Size}}{fn: {{.Name}}_{{.Type_}}_{{.FNumber}}, fnname: "{{.Name}}_{{.Type_}}_{{.FNumber}}", in: {{.Input}}, want: {{.Ans}}},`))
+
+ type cfncData struct {
+ Size, Name, Type_, Symbol, FNumber, Number string
+ Ans, Input string
+ }
+ for _, s := range szs {
+ fmt.Fprintf(w, `
+type test_%[1]s%[2]s struct {
+ fn func (%[1]s) %[1]s
+ fnname string
+ in %[1]s
+ want %[1]s
+}
+`, s.name, s.oponly)
+ fmt.Fprintf(w, "var tests_%[1]s%[2]s =[]test_%[1]s {\n\n", s.name, s.oponly)
+
+ if len(s.u) > 0 {
+ for _, o := range ops {
+ if s.oponly != "" && s.oponly != o.name {
+ continue
+ }
+ fd := cfncData{s.name, o.name, s.name, o.symbol, "", "", "", ""}
+ for _, i := range s.u {
+ fd.Number = fmt.Sprintf("%d", i)
+ fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1)
+
+ // unsigned
+ for _, j := range s.u {
+
+ if o.name != "mod" && o.name != "div" || j != 0 {
+ fd.Ans = ansU(i, j, s.name, o.symbol)
+ fd.Input = fmt.Sprintf("%d", j)
+ if err := vrf1.Execute(w, fd); err != nil {
+ panic(err)
+ }
+ }
+
+ if o.name != "mod" && o.name != "div" || i != 0 {
+ fd.Ans = ansU(j, i, s.name, o.symbol)
+ fd.Input = fmt.Sprintf("%d", j)
+ if err := vrf2.Execute(w, fd); err != nil {
+ panic(err)
+ }
+ }
+
+ }
+ }
+
+ }
+ }
+
+ // signed
+ if len(s.i) > 0 {
+ for _, o := range ops {
+ if s.oponly != "" && s.oponly != o.name {
+ continue
+ }
+ // don't generate tests for shifts by signed integers
+ if o.name == "lsh" || o.name == "rsh" {
+ continue
+ }
+ fd := cfncData{s.name, o.name, s.name, o.symbol, "", "", "", ""}
+ for _, i := range s.i {
+ fd.Number = fmt.Sprintf("%d", i)
+ fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1)
+ for _, j := range s.i {
+ if o.name != "mod" && o.name != "div" || j != 0 {
+ fd.Ans = ansS(i, j, s.name, o.symbol)
+ fd.Input = fmt.Sprintf("%d", j)
+ if err := vrf1.Execute(w, fd); err != nil {
+ panic(err)
+ }
+ }
+
+ if o.name != "mod" && o.name != "div" || i != 0 {
+ fd.Ans = ansS(j, i, s.name, o.symbol)
+ fd.Input = fmt.Sprintf("%d", j)
+ if err := vrf2.Execute(w, fd); err != nil {
+ panic(err)
+ }
+ }
+
+ }
+ }
+
+ }
+ }
+
+ fmt.Fprintf(w, "}\n\n")
+ }
+
+ fmt.Fprint(w, `
+
+// TestArithmeticConst tests results for arithmetic operations against constants.
+func TestArithmeticConst(t *testing.T) {
+`)
+
+ for _, s := range szs {
+ fmt.Fprintf(w, `for _, test := range tests_%s%s {`, s.name, s.oponly)
+ // Use WriteString here to avoid a vet warning about formatting directives.
+ w.WriteString(`if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+`)
+ }
+
+ fmt.Fprint(w, `
+}
+`)
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = ioutil.WriteFile("../arithConst_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/gen/cmpConstGen.go b/src/cmd/compile/internal/gc/testdata/gen/cmpConstGen.go
new file mode 100644
index 0000000..5508e76
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/gen/cmpConstGen.go
@@ -0,0 +1,247 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates a test to verify that the standard comparison
+// operators properly handle one const operand. The test file should be
+// generated with a known working version of go.
+// launch with `go run cmpConstGen.go` a file called cmpConst.go
+// will be written into the parent directory containing the tests
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "math/big"
+ "sort"
+)
+
+const (
+ maxU64 = (1 << 64) - 1
+ maxU32 = (1 << 32) - 1
+ maxU16 = (1 << 16) - 1
+ maxU8 = (1 << 8) - 1
+
+ maxI64 = (1 << 63) - 1
+ maxI32 = (1 << 31) - 1
+ maxI16 = (1 << 15) - 1
+ maxI8 = (1 << 7) - 1
+
+ minI64 = -(1 << 63)
+ minI32 = -(1 << 31)
+ minI16 = -(1 << 15)
+ minI8 = -(1 << 7)
+)
+
+func cmp(left *big.Int, op string, right *big.Int) bool {
+ switch left.Cmp(right) {
+ case -1: // less than
+ return op == "<" || op == "<=" || op == "!="
+ case 0: // equal
+ return op == "==" || op == "<=" || op == ">="
+ case 1: // greater than
+ return op == ">" || op == ">=" || op == "!="
+ }
+ panic("unexpected comparison value")
+}
+
+func inRange(typ string, val *big.Int) bool {
+ min, max := &big.Int{}, &big.Int{}
+ switch typ {
+ case "uint64":
+ max = max.SetUint64(maxU64)
+ case "uint32":
+ max = max.SetUint64(maxU32)
+ case "uint16":
+ max = max.SetUint64(maxU16)
+ case "uint8":
+ max = max.SetUint64(maxU8)
+ case "int64":
+ min = min.SetInt64(minI64)
+ max = max.SetInt64(maxI64)
+ case "int32":
+ min = min.SetInt64(minI32)
+ max = max.SetInt64(maxI32)
+ case "int16":
+ min = min.SetInt64(minI16)
+ max = max.SetInt64(maxI16)
+ case "int8":
+ min = min.SetInt64(minI8)
+ max = max.SetInt64(maxI8)
+ default:
+ panic("unexpected type")
+ }
+ return cmp(min, "<=", val) && cmp(val, "<=", max)
+}
+
+func getValues(typ string) []*big.Int {
+ Uint := func(v uint64) *big.Int { return big.NewInt(0).SetUint64(v) }
+ Int := func(v int64) *big.Int { return big.NewInt(0).SetInt64(v) }
+ values := []*big.Int{
+ // limits
+ Uint(maxU64),
+ Uint(maxU64 - 1),
+ Uint(maxI64 + 1),
+ Uint(maxI64),
+ Uint(maxI64 - 1),
+ Uint(maxU32 + 1),
+ Uint(maxU32),
+ Uint(maxU32 - 1),
+ Uint(maxI32 + 1),
+ Uint(maxI32),
+ Uint(maxI32 - 1),
+ Uint(maxU16 + 1),
+ Uint(maxU16),
+ Uint(maxU16 - 1),
+ Uint(maxI16 + 1),
+ Uint(maxI16),
+ Uint(maxI16 - 1),
+ Uint(maxU8 + 1),
+ Uint(maxU8),
+ Uint(maxU8 - 1),
+ Uint(maxI8 + 1),
+ Uint(maxI8),
+ Uint(maxI8 - 1),
+ Uint(0),
+ Int(minI8 + 1),
+ Int(minI8),
+ Int(minI8 - 1),
+ Int(minI16 + 1),
+ Int(minI16),
+ Int(minI16 - 1),
+ Int(minI32 + 1),
+ Int(minI32),
+ Int(minI32 - 1),
+ Int(minI64 + 1),
+ Int(minI64),
+
+ // other possibly interesting values
+ Uint(1),
+ Int(-1),
+ Uint(0xff << 56),
+ Uint(0xff << 32),
+ Uint(0xff << 24),
+ }
+ sort.Slice(values, func(i, j int) bool { return values[i].Cmp(values[j]) == -1 })
+ var ret []*big.Int
+ for _, val := range values {
+ if !inRange(typ, val) {
+ continue
+ }
+ ret = append(ret, val)
+ }
+ return ret
+}
+
+func sigString(v *big.Int) string {
+ var t big.Int
+ t.Abs(v)
+ if v.Sign() == -1 {
+ return "neg" + t.String()
+ }
+ return t.String()
+}
+
+func main() {
+ types := []string{
+ "uint64", "uint32", "uint16", "uint8",
+ "int64", "int32", "int16", "int8",
+ }
+
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/cmpConstGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main;\n")
+ fmt.Fprintf(w, "import (\"testing\"; \"reflect\"; \"runtime\";)\n")
+ fmt.Fprintf(w, "// results show the expected result for the elements left of, equal to and right of the index.\n")
+ fmt.Fprintf(w, "type result struct{l, e, r bool}\n")
+ fmt.Fprintf(w, "var (\n")
+ fmt.Fprintf(w, " eq = result{l: false, e: true, r: false}\n")
+ fmt.Fprintf(w, " ne = result{l: true, e: false, r: true}\n")
+ fmt.Fprintf(w, " lt = result{l: true, e: false, r: false}\n")
+ fmt.Fprintf(w, " le = result{l: true, e: true, r: false}\n")
+ fmt.Fprintf(w, " gt = result{l: false, e: false, r: true}\n")
+ fmt.Fprintf(w, " ge = result{l: false, e: true, r: true}\n")
+ fmt.Fprintf(w, ")\n")
+
+ operators := []struct{ op, name string }{
+ {"<", "lt"},
+ {"<=", "le"},
+ {">", "gt"},
+ {">=", "ge"},
+ {"==", "eq"},
+ {"!=", "ne"},
+ }
+
+ for _, typ := range types {
+ // generate a slice containing valid values for this type
+ fmt.Fprintf(w, "\n// %v tests\n", typ)
+ values := getValues(typ)
+ fmt.Fprintf(w, "var %v_vals = []%v{\n", typ, typ)
+ for _, val := range values {
+ fmt.Fprintf(w, "%v,\n", val.String())
+ }
+ fmt.Fprintf(w, "}\n")
+
+ // generate test functions
+ for _, r := range values {
+ // TODO: could also test constant on lhs.
+ sig := sigString(r)
+ for _, op := range operators {
+ // no need for go:noinline because the function is called indirectly
+ fmt.Fprintf(w, "func %v_%v_%v(x %v) bool { return x %v %v; }\n", op.name, sig, typ, typ, op.op, r.String())
+ }
+ }
+
+ // generate a table of test cases
+ fmt.Fprintf(w, "var %v_tests = []struct{\n", typ)
+ fmt.Fprintf(w, " idx int // index of the constant used\n")
+ fmt.Fprintf(w, " exp result // expected results\n")
+ fmt.Fprintf(w, " fn func(%v) bool\n", typ)
+ fmt.Fprintf(w, "}{\n")
+ for i, r := range values {
+ sig := sigString(r)
+ for _, op := range operators {
+ fmt.Fprintf(w, "{idx: %v,", i)
+ fmt.Fprintf(w, "exp: %v,", op.name)
+ fmt.Fprintf(w, "fn: %v_%v_%v},\n", op.name, sig, typ)
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // emit the main function, looping over all test cases
+ fmt.Fprintf(w, "// TestComparisonsConst tests results for comparison operations against constants.\n")
+ fmt.Fprintf(w, "func TestComparisonsConst(t *testing.T) {\n")
+ for _, typ := range types {
+ fmt.Fprintf(w, "for i, test := range %v_tests {\n", typ)
+ fmt.Fprintf(w, " for j, x := range %v_vals {\n", typ)
+ fmt.Fprintf(w, " want := test.exp.l\n")
+ fmt.Fprintf(w, " if j == test.idx {\nwant = test.exp.e\n}")
+ fmt.Fprintf(w, " else if j > test.idx {\nwant = test.exp.r\n}\n")
+ fmt.Fprintf(w, " if test.fn(x) != want {\n")
+ fmt.Fprintf(w, " fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()\n")
+ fmt.Fprintf(w, " t.Errorf(\"test failed: %%v(%%v) != %%v [type=%v i=%%v j=%%v idx=%%v]\", fn, x, want, i, j, test.idx)\n", typ)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+ fmt.Fprintf(w, "}\n")
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = ioutil.WriteFile("../cmpConst_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/gen/constFoldGen.go b/src/cmd/compile/internal/gc/testdata/gen/constFoldGen.go
new file mode 100644
index 0000000..2b8a331
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/gen/constFoldGen.go
@@ -0,0 +1,307 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates a test to verify that the standard arithmetic
+// operators properly handle constant folding. The test file should be
+// generated with a known working version of go.
+// launch with `go run constFoldGen.go` a file called constFold_test.go
+// will be written into the grandparent directory containing the tests.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+)
+
+type op struct {
+ name, symbol string
+}
+type szD struct {
+ name string
+ sn string
+ u []uint64
+ i []int64
+}
+
+var szs []szD = []szD{
+ szD{name: "uint64", sn: "64", u: []uint64{0, 1, 4294967296, 0xffffFFFFffffFFFF}},
+ szD{name: "int64", sn: "64", i: []int64{-0x8000000000000000, -0x7FFFFFFFFFFFFFFF,
+ -4294967296, -1, 0, 1, 4294967296, 0x7FFFFFFFFFFFFFFE, 0x7FFFFFFFFFFFFFFF}},
+
+ szD{name: "uint32", sn: "32", u: []uint64{0, 1, 4294967295}},
+ szD{name: "int32", sn: "32", i: []int64{-0x80000000, -0x7FFFFFFF, -1, 0,
+ 1, 0x7FFFFFFF}},
+
+ szD{name: "uint16", sn: "16", u: []uint64{0, 1, 65535}},
+ szD{name: "int16", sn: "16", i: []int64{-32768, -32767, -1, 0, 1, 32766, 32767}},
+
+ szD{name: "uint8", sn: "8", u: []uint64{0, 1, 255}},
+ szD{name: "int8", sn: "8", i: []int64{-128, -127, -1, 0, 1, 126, 127}},
+}
+
+var ops = []op{
+ op{"add", "+"}, op{"sub", "-"}, op{"div", "/"}, op{"mul", "*"},
+ op{"lsh", "<<"}, op{"rsh", ">>"}, op{"mod", "%"},
+}
+
+// compute the result of i op j, cast as type t.
+func ansU(i, j uint64, t, op string) string {
+ var ans uint64
+ switch op {
+ case "+":
+ ans = i + j
+ case "-":
+ ans = i - j
+ case "*":
+ ans = i * j
+ case "/":
+ if j != 0 {
+ ans = i / j
+ }
+ case "%":
+ if j != 0 {
+ ans = i % j
+ }
+ case "<<":
+ ans = i << j
+ case ">>":
+ ans = i >> j
+ }
+ switch t {
+ case "uint32":
+ ans = uint64(uint32(ans))
+ case "uint16":
+ ans = uint64(uint16(ans))
+ case "uint8":
+ ans = uint64(uint8(ans))
+ }
+ return fmt.Sprintf("%d", ans)
+}
+
+// compute the result of i op j, cast as type t.
+func ansS(i, j int64, t, op string) string {
+ var ans int64
+ switch op {
+ case "+":
+ ans = i + j
+ case "-":
+ ans = i - j
+ case "*":
+ ans = i * j
+ case "/":
+ if j != 0 {
+ ans = i / j
+ }
+ case "%":
+ if j != 0 {
+ ans = i % j
+ }
+ case "<<":
+ ans = i << uint64(j)
+ case ">>":
+ ans = i >> uint64(j)
+ }
+ switch t {
+ case "int32":
+ ans = int64(int32(ans))
+ case "int16":
+ ans = int64(int16(ans))
+ case "int8":
+ ans = int64(int8(ans))
+ }
+ return fmt.Sprintf("%d", ans)
+}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// run\n")
+ fmt.Fprintf(w, "// Code generated by gen/constFoldGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package gc\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ for _, s := range szs {
+ for _, o := range ops {
+ if o.symbol == "<<" || o.symbol == ">>" {
+ // shifts handled separately below, as they can have
+ // different types on the LHS and RHS.
+ continue
+ }
+ fmt.Fprintf(w, "func TestConstFold%s%s(t *testing.T) {\n", s.name, o.name)
+ fmt.Fprintf(w, "\tvar x, y, r %s\n", s.name)
+ // unsigned test cases
+ for _, c := range s.u {
+ fmt.Fprintf(w, "\tx = %d\n", c)
+ for _, d := range s.u {
+ if d == 0 && (o.symbol == "/" || o.symbol == "%") {
+ continue
+ }
+ fmt.Fprintf(w, "\ty = %d\n", d)
+ fmt.Fprintf(w, "\tr = x %s y\n", o.symbol)
+ want := ansU(c, d, s.name, o.symbol)
+ fmt.Fprintf(w, "\tif r != %s {\n", want)
+ fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol)
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ // signed test cases
+ for _, c := range s.i {
+ fmt.Fprintf(w, "\tx = %d\n", c)
+ for _, d := range s.i {
+ if d == 0 && (o.symbol == "/" || o.symbol == "%") {
+ continue
+ }
+ fmt.Fprintf(w, "\ty = %d\n", d)
+ fmt.Fprintf(w, "\tr = x %s y\n", o.symbol)
+ want := ansS(c, d, s.name, o.symbol)
+ fmt.Fprintf(w, "\tif r != %s {\n", want)
+ fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol)
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ }
+
+ // Special signed/unsigned cases for shifts
+ for _, ls := range szs {
+ for _, rs := range szs {
+ if rs.name[0] != 'u' {
+ continue
+ }
+ for _, o := range ops {
+ if o.symbol != "<<" && o.symbol != ">>" {
+ continue
+ }
+ fmt.Fprintf(w, "func TestConstFold%s%s%s(t *testing.T) {\n", ls.name, rs.name, o.name)
+ fmt.Fprintf(w, "\tvar x, r %s\n", ls.name)
+ fmt.Fprintf(w, "\tvar y %s\n", rs.name)
+ // unsigned LHS
+ for _, c := range ls.u {
+ fmt.Fprintf(w, "\tx = %d\n", c)
+ for _, d := range rs.u {
+ fmt.Fprintf(w, "\ty = %d\n", d)
+ fmt.Fprintf(w, "\tr = x %s y\n", o.symbol)
+ want := ansU(c, d, ls.name, o.symbol)
+ fmt.Fprintf(w, "\tif r != %s {\n", want)
+ fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol)
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ // signed LHS
+ for _, c := range ls.i {
+ fmt.Fprintf(w, "\tx = %d\n", c)
+ for _, d := range rs.u {
+ fmt.Fprintf(w, "\ty = %d\n", d)
+ fmt.Fprintf(w, "\tr = x %s y\n", o.symbol)
+ want := ansS(c, int64(d), ls.name, o.symbol)
+ fmt.Fprintf(w, "\tif r != %s {\n", want)
+ fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol)
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ }
+ }
+
+ // Constant folding for comparisons
+ for _, s := range szs {
+ fmt.Fprintf(w, "func TestConstFoldCompare%s(t *testing.T) {\n", s.name)
+ for _, x := range s.i {
+ for _, y := range s.i {
+ fmt.Fprintf(w, "\t{\n")
+ fmt.Fprintf(w, "\t\tvar x %s = %d\n", s.name, x)
+ fmt.Fprintf(w, "\t\tvar y %s = %d\n", s.name, y)
+ if x == y {
+ fmt.Fprintf(w, "\t\tif !(x == y) { t.Errorf(\"!(%%d == %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x == y { t.Errorf(\"%%d == %%d\", x, y) }\n")
+ }
+ if x != y {
+ fmt.Fprintf(w, "\t\tif !(x != y) { t.Errorf(\"!(%%d != %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x != y { t.Errorf(\"%%d != %%d\", x, y) }\n")
+ }
+ if x < y {
+ fmt.Fprintf(w, "\t\tif !(x < y) { t.Errorf(\"!(%%d < %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x < y { t.Errorf(\"%%d < %%d\", x, y) }\n")
+ }
+ if x > y {
+ fmt.Fprintf(w, "\t\tif !(x > y) { t.Errorf(\"!(%%d > %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x > y { t.Errorf(\"%%d > %%d\", x, y) }\n")
+ }
+ if x <= y {
+ fmt.Fprintf(w, "\t\tif !(x <= y) { t.Errorf(\"!(%%d <= %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x <= y { t.Errorf(\"%%d <= %%d\", x, y) }\n")
+ }
+ if x >= y {
+ fmt.Fprintf(w, "\t\tif !(x >= y) { t.Errorf(\"!(%%d >= %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x >= y { t.Errorf(\"%%d >= %%d\", x, y) }\n")
+ }
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ for _, x := range s.u {
+ for _, y := range s.u {
+ fmt.Fprintf(w, "\t{\n")
+ fmt.Fprintf(w, "\t\tvar x %s = %d\n", s.name, x)
+ fmt.Fprintf(w, "\t\tvar y %s = %d\n", s.name, y)
+ if x == y {
+ fmt.Fprintf(w, "\t\tif !(x == y) { t.Errorf(\"!(%%d == %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x == y { t.Errorf(\"%%d == %%d\", x, y) }\n")
+ }
+ if x != y {
+ fmt.Fprintf(w, "\t\tif !(x != y) { t.Errorf(\"!(%%d != %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x != y { t.Errorf(\"%%d != %%d\", x, y) }\n")
+ }
+ if x < y {
+ fmt.Fprintf(w, "\t\tif !(x < y) { t.Errorf(\"!(%%d < %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x < y { t.Errorf(\"%%d < %%d\", x, y) }\n")
+ }
+ if x > y {
+ fmt.Fprintf(w, "\t\tif !(x > y) { t.Errorf(\"!(%%d > %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x > y { t.Errorf(\"%%d > %%d\", x, y) }\n")
+ }
+ if x <= y {
+ fmt.Fprintf(w, "\t\tif !(x <= y) { t.Errorf(\"!(%%d <= %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x <= y { t.Errorf(\"%%d <= %%d\", x, y) }\n")
+ }
+ if x >= y {
+ fmt.Fprintf(w, "\t\tif !(x >= y) { t.Errorf(\"!(%%d >= %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x >= y { t.Errorf(\"%%d >= %%d\", x, y) }\n")
+ }
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = ioutil.WriteFile("../../constFold_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/gen/copyGen.go b/src/cmd/compile/internal/gc/testdata/gen/copyGen.go
new file mode 100644
index 0000000..4567f2f
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/gen/copyGen.go
@@ -0,0 +1,121 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+)
+
+// This program generates tests to verify that copying operations
+// copy the data they are supposed to and clobber no adjacent values.
+
+// run as `go run copyGen.go`. A file called copy.go
+// will be written into the parent directory containing the tests.
+
+var sizes = [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 23, 24, 25, 31, 32, 33, 63, 64, 65, 1023, 1024, 1025, 1024 + 7, 1024 + 8, 1024 + 9, 1024 + 15, 1024 + 16, 1024 + 17}
+
+var usizes = [...]int{2, 3, 4, 5, 6, 7}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/copyGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ for _, s := range sizes {
+ // type for test
+ fmt.Fprintf(w, "type T%d struct {\n", s)
+ fmt.Fprintf(w, " pre [8]byte\n")
+ fmt.Fprintf(w, " mid [%d]byte\n", s)
+ fmt.Fprintf(w, " post [8]byte\n")
+ fmt.Fprintf(w, "}\n")
+
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func t%dcopy_ssa(y, x *[%d]byte) {\n", s, s)
+ fmt.Fprintf(w, " *y = *x\n")
+ fmt.Fprintf(w, "}\n")
+
+ // testing harness
+ fmt.Fprintf(w, "func testCopy%d(t *testing.T) {\n", s)
+ fmt.Fprintf(w, " a := T%d{[8]byte{201, 202, 203, 204, 205, 206, 207, 208},[%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "%d,", i%100)
+ }
+ fmt.Fprintf(w, "},[8]byte{211, 212, 213, 214, 215, 216, 217, 218}}\n")
+ fmt.Fprintf(w, " x := [%d]byte{", s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "%d,", 100+i%100)
+ }
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, " t%dcopy_ssa(&a.mid, &x)\n", s)
+ fmt.Fprintf(w, " want := T%d{[8]byte{201, 202, 203, 204, 205, 206, 207, 208},[%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "%d,", 100+i%100)
+ }
+ fmt.Fprintf(w, "},[8]byte{211, 212, 213, 214, 215, 216, 217, 218}}\n")
+ fmt.Fprintf(w, " if a != want {\n")
+ fmt.Fprintf(w, " t.Errorf(\"t%dcopy got=%%v, want %%v\\n\", a, want)\n", s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+
+ for _, s := range usizes {
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func tu%dcopy_ssa(docopy bool, data [%d]byte, x *[%d]byte) {\n", s, s, s)
+ fmt.Fprintf(w, " if docopy {\n")
+ fmt.Fprintf(w, " *x = data\n")
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+
+ // testing harness
+ fmt.Fprintf(w, "func testUnalignedCopy%d(t *testing.T) {\n", s)
+ fmt.Fprintf(w, " var a [%d]byte\n", s)
+ fmt.Fprintf(w, " t%d := [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, " %d,", s+i)
+ }
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, " tu%dcopy_ssa(true, t%d, &a)\n", s, s)
+ fmt.Fprintf(w, " want%d := [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, " %d,", s+i)
+ }
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, " if a != want%d {\n", s)
+ fmt.Fprintf(w, " t.Errorf(\"tu%dcopy got=%%v, want %%v\\n\", a, want%d)\n", s, s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // boilerplate at end
+ fmt.Fprintf(w, "func TestCopy(t *testing.T) {\n")
+ for _, s := range sizes {
+ fmt.Fprintf(w, " testCopy%d(t)\n", s)
+ }
+ for _, s := range usizes {
+ fmt.Fprintf(w, " testUnalignedCopy%d(t)\n", s)
+ }
+ fmt.Fprintf(w, "}\n")
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = ioutil.WriteFile("../copy_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/gen/zeroGen.go b/src/cmd/compile/internal/gc/testdata/gen/zeroGen.go
new file mode 100644
index 0000000..7056730
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/gen/zeroGen.go
@@ -0,0 +1,143 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+)
+
+// This program generates tests to verify that zeroing operations
+// zero the data they are supposed to and clobber no adjacent values.
+
+// run as `go run zeroGen.go`. A file called zero.go
+// will be written into the parent directory containing the tests.
+
+var sizes = [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 23, 24, 25, 31, 32, 33, 63, 64, 65, 1023, 1024, 1025}
+var usizes = [...]int{8, 16, 24, 32, 64, 256}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/zeroGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ for _, s := range sizes {
+ // type for test
+ fmt.Fprintf(w, "type Z%d struct {\n", s)
+ fmt.Fprintf(w, " pre [8]byte\n")
+ fmt.Fprintf(w, " mid [%d]byte\n", s)
+ fmt.Fprintf(w, " post [8]byte\n")
+ fmt.Fprintf(w, "}\n")
+
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func zero%d_ssa(x *[%d]byte) {\n", s, s)
+ fmt.Fprintf(w, " *x = [%d]byte{}\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ // testing harness
+ fmt.Fprintf(w, "func testZero%d(t *testing.T) {\n", s)
+ fmt.Fprintf(w, " a := Z%d{[8]byte{255,255,255,255,255,255,255,255},[%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "255,")
+ }
+ fmt.Fprintf(w, "},[8]byte{255,255,255,255,255,255,255,255}}\n")
+ fmt.Fprintf(w, " zero%d_ssa(&a.mid)\n", s)
+ fmt.Fprintf(w, " want := Z%d{[8]byte{255,255,255,255,255,255,255,255},[%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "0,")
+ }
+ fmt.Fprintf(w, "},[8]byte{255,255,255,255,255,255,255,255}}\n")
+ fmt.Fprintf(w, " if a != want {\n")
+ fmt.Fprintf(w, " t.Errorf(\"zero%d got=%%v, want %%v\\n\", a, want)\n", s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+
+ for _, s := range usizes {
+ // type for test
+ fmt.Fprintf(w, "type Z%du1 struct {\n", s)
+ fmt.Fprintf(w, " b bool\n")
+ fmt.Fprintf(w, " val [%d]byte\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ fmt.Fprintf(w, "type Z%du2 struct {\n", s)
+ fmt.Fprintf(w, " i uint16\n")
+ fmt.Fprintf(w, " val [%d]byte\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func zero%du1_ssa(t *Z%du1) {\n", s, s)
+ fmt.Fprintf(w, " t.val = [%d]byte{}\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func zero%du2_ssa(t *Z%du2) {\n", s, s)
+ fmt.Fprintf(w, " t.val = [%d]byte{}\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ // testing harness
+ fmt.Fprintf(w, "func testZero%du(t *testing.T) {\n", s)
+ fmt.Fprintf(w, " a := Z%du1{false, [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "255,")
+ }
+ fmt.Fprintf(w, "}}\n")
+ fmt.Fprintf(w, " zero%du1_ssa(&a)\n", s)
+ fmt.Fprintf(w, " want := Z%du1{false, [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "0,")
+ }
+ fmt.Fprintf(w, "}}\n")
+ fmt.Fprintf(w, " if a != want {\n")
+ fmt.Fprintf(w, " t.Errorf(\"zero%du2 got=%%v, want %%v\\n\", a, want)\n", s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, " b := Z%du2{15, [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "255,")
+ }
+ fmt.Fprintf(w, "}}\n")
+ fmt.Fprintf(w, " zero%du2_ssa(&b)\n", s)
+ fmt.Fprintf(w, " wantb := Z%du2{15, [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "0,")
+ }
+ fmt.Fprintf(w, "}}\n")
+ fmt.Fprintf(w, " if b != wantb {\n")
+ fmt.Fprintf(w, " t.Errorf(\"zero%du2 got=%%v, want %%v\\n\", b, wantb)\n", s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // boilerplate at end
+ fmt.Fprintf(w, "func TestZero(t *testing.T) {\n")
+ for _, s := range sizes {
+ fmt.Fprintf(w, " testZero%d(t)\n", s)
+ }
+ for _, s := range usizes {
+ fmt.Fprintf(w, " testZero%du(t)\n", s)
+ }
+ fmt.Fprintf(w, "}\n")
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = ioutil.WriteFile("../zero_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/loadstore_test.go b/src/cmd/compile/internal/gc/testdata/loadstore_test.go
new file mode 100644
index 0000000..57571f5
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/loadstore_test.go
@@ -0,0 +1,204 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests load/store ordering
+
+package main
+
+import "testing"
+
+// testLoadStoreOrder tests for reordering of stores/loads.
+func testLoadStoreOrder(t *testing.T) {
+ z := uint32(1000)
+ if testLoadStoreOrder_ssa(&z, 100) == 0 {
+ t.Errorf("testLoadStoreOrder failed")
+ }
+}
+
+//go:noinline
+func testLoadStoreOrder_ssa(z *uint32, prec uint) int {
+ old := *z // load
+ *z = uint32(prec) // store
+ if *z < old { // load
+ return 1
+ }
+ return 0
+}
+
+func testStoreSize(t *testing.T) {
+ a := [4]uint16{11, 22, 33, 44}
+ testStoreSize_ssa(&a[0], &a[2], 77)
+ want := [4]uint16{77, 22, 33, 44}
+ if a != want {
+ t.Errorf("testStoreSize failed. want = %d, got = %d", want, a)
+ }
+}
+
+//go:noinline
+func testStoreSize_ssa(p *uint16, q *uint16, v uint32) {
+ // Test to make sure that (Store ptr (Trunc32to16 val) mem)
+ // does not end up as a 32-bit store. It must stay a 16 bit store
+ // even when Trunc32to16 is rewritten to be a nop.
+ // To ensure that we get rewrite the Trunc32to16 before
+ // we rewrite the Store, we force the truncate into an
+ // earlier basic block by using it on both branches.
+ w := uint16(v)
+ if p != nil {
+ *p = w
+ } else {
+ *q = w
+ }
+}
+
+//go:noinline
+func testExtStore_ssa(p *byte, b bool) int {
+ x := *p
+ *p = 7
+ if b {
+ return int(x)
+ }
+ return 0
+}
+
+func testExtStore(t *testing.T) {
+ const start = 8
+ var b byte = start
+ if got := testExtStore_ssa(&b, true); got != start {
+ t.Errorf("testExtStore failed. want = %d, got = %d", start, got)
+ }
+}
+
+var b int
+
+// testDeadStorePanic_ssa ensures that we don't optimize away stores
+// that could be read by after recover(). Modeled after fixedbugs/issue1304.
+//go:noinline
+func testDeadStorePanic_ssa(a int) (r int) {
+ defer func() {
+ recover()
+ r = a
+ }()
+ a = 2 // store
+ b := a - a // optimized to zero
+ c := 4
+ a = c / b // store, but panics
+ a = 3 // store
+ r = a
+ return
+}
+
+func testDeadStorePanic(t *testing.T) {
+ if want, got := 2, testDeadStorePanic_ssa(1); want != got {
+ t.Errorf("testDeadStorePanic failed. want = %d, got = %d", want, got)
+ }
+}
+
+//go:noinline
+func loadHitStore8(x int8, p *int8) int32 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return int32(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStoreU8(x uint8, p *uint8) uint32 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return uint32(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStore16(x int16, p *int16) int32 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return int32(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStoreU16(x uint16, p *uint16) uint32 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return uint32(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStore32(x int32, p *int32) int64 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return int64(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStoreU32(x uint32, p *uint32) uint64 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return uint64(*p) // load and cast
+}
+
+func testLoadHitStore(t *testing.T) {
+ // Test that sign/zero extensions are kept when a load-hit-store
+ // is replaced by a register-register move.
+ {
+ var in int8 = (1 << 6) + 1
+ var p int8
+ got := loadHitStore8(in, &p)
+ want := int32(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (int8) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in uint8 = (1 << 6) + 1
+ var p uint8
+ got := loadHitStoreU8(in, &p)
+ want := uint32(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (uint8) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in int16 = (1 << 10) + 1
+ var p int16
+ got := loadHitStore16(in, &p)
+ want := int32(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (int16) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in uint16 = (1 << 10) + 1
+ var p uint16
+ got := loadHitStoreU16(in, &p)
+ want := uint32(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (uint16) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in int32 = (1 << 30) + 1
+ var p int32
+ got := loadHitStore32(in, &p)
+ want := int64(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (int32) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in uint32 = (1 << 30) + 1
+ var p uint32
+ got := loadHitStoreU32(in, &p)
+ want := uint64(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (uint32) failed. want = %d, got = %d", want, got)
+ }
+ }
+}
+
+func TestLoadStore(t *testing.T) {
+ testLoadStoreOrder(t)
+ testStoreSize(t)
+ testExtStore(t)
+ testDeadStorePanic(t)
+ testLoadHitStore(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/map_test.go b/src/cmd/compile/internal/gc/testdata/map_test.go
new file mode 100644
index 0000000..71dc820
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/map_test.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// map.go tests map operations.
+package main
+
+import "testing"
+
+//go:noinline
+func lenMap_ssa(v map[int]int) int {
+ return len(v)
+}
+
+func testLenMap(t *testing.T) {
+
+ v := make(map[int]int)
+ v[0] = 0
+ v[1] = 0
+ v[2] = 0
+
+ if want, got := 3, lenMap_ssa(v); got != want {
+ t.Errorf("expected len(map) = %d, got %d", want, got)
+ }
+}
+
+func testLenNilMap(t *testing.T) {
+
+ var v map[int]int
+ if want, got := 0, lenMap_ssa(v); got != want {
+ t.Errorf("expected len(nil) = %d, got %d", want, got)
+ }
+}
+func TestMap(t *testing.T) {
+ testLenMap(t)
+ testLenNilMap(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/namedReturn_test.go b/src/cmd/compile/internal/gc/testdata/namedReturn_test.go
new file mode 100644
index 0000000..b07e225
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/namedReturn_test.go
@@ -0,0 +1,93 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test makes sure that naming named
+// return variables in a return statement works.
+// See issue #14904.
+
+package main
+
+import (
+ "runtime"
+ "testing"
+)
+
+// Our heap-allocated object that will be GC'd incorrectly.
+// Note that we always check the second word because that's
+// where 0xdeaddeaddeaddead is written.
+type B [4]int
+
+// small (SSAable) array
+type A1 [3]*B
+
+//go:noinline
+func f1() (t A1) {
+ t[0] = &B{91, 92, 93, 94}
+ runtime.GC()
+ return t
+}
+
+// large (non-SSAable) array
+type A2 [8]*B
+
+//go:noinline
+func f2() (t A2) {
+ t[0] = &B{91, 92, 93, 94}
+ runtime.GC()
+ return t
+}
+
+// small (SSAable) struct
+type A3 struct {
+ a, b, c *B
+}
+
+//go:noinline
+func f3() (t A3) {
+ t.a = &B{91, 92, 93, 94}
+ runtime.GC()
+ return t
+}
+
+// large (non-SSAable) struct
+type A4 struct {
+ a, b, c, d, e, f *B
+}
+
+//go:noinline
+func f4() (t A4) {
+ t.a = &B{91, 92, 93, 94}
+ runtime.GC()
+ return t
+}
+
+var sink *B
+
+func f5() int {
+ b := &B{91, 92, 93, 94}
+ t := A4{b, nil, nil, nil, nil, nil}
+ sink = b // make sure b is heap allocated ...
+ sink = nil // ... but not live
+ runtime.GC()
+ t = t
+ return t.a[1]
+}
+
+func TestNamedReturn(t *testing.T) {
+ if v := f1()[0][1]; v != 92 {
+ t.Errorf("f1()[0][1]=%d, want 92\n", v)
+ }
+ if v := f2()[0][1]; v != 92 {
+ t.Errorf("f2()[0][1]=%d, want 92\n", v)
+ }
+ if v := f3().a[1]; v != 92 {
+ t.Errorf("f3().a[1]=%d, want 92\n", v)
+ }
+ if v := f4().a[1]; v != 92 {
+ t.Errorf("f4().a[1]=%d, want 92\n", v)
+ }
+ if v := f5(); v != 92 {
+ t.Errorf("f5()=%d, want 92\n", v)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/phi_test.go b/src/cmd/compile/internal/gc/testdata/phi_test.go
new file mode 100644
index 0000000..c8a73ff
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/phi_test.go
@@ -0,0 +1,99 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// Test to make sure spills of cast-shortened values
+// don't end up spilling the pre-shortened size instead
+// of the post-shortened size.
+
+import (
+ "runtime"
+ "testing"
+)
+
+var data1 [26]int32
+var data2 [26]int64
+
+func init() {
+ for i := 0; i < 26; i++ {
+ // If we spill all 8 bytes of this datum, the 1 in the high-order 4 bytes
+ // will overwrite some other variable in the stack frame.
+ data2[i] = 0x100000000
+ }
+}
+
+func foo() int32 {
+ var a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z int32
+ if always {
+ a = data1[0]
+ b = data1[1]
+ c = data1[2]
+ d = data1[3]
+ e = data1[4]
+ f = data1[5]
+ g = data1[6]
+ h = data1[7]
+ i = data1[8]
+ j = data1[9]
+ k = data1[10]
+ l = data1[11]
+ m = data1[12]
+ n = data1[13]
+ o = data1[14]
+ p = data1[15]
+ q = data1[16]
+ r = data1[17]
+ s = data1[18]
+ t = data1[19]
+ u = data1[20]
+ v = data1[21]
+ w = data1[22]
+ x = data1[23]
+ y = data1[24]
+ z = data1[25]
+ } else {
+ a = int32(data2[0])
+ b = int32(data2[1])
+ c = int32(data2[2])
+ d = int32(data2[3])
+ e = int32(data2[4])
+ f = int32(data2[5])
+ g = int32(data2[6])
+ h = int32(data2[7])
+ i = int32(data2[8])
+ j = int32(data2[9])
+ k = int32(data2[10])
+ l = int32(data2[11])
+ m = int32(data2[12])
+ n = int32(data2[13])
+ o = int32(data2[14])
+ p = int32(data2[15])
+ q = int32(data2[16])
+ r = int32(data2[17])
+ s = int32(data2[18])
+ t = int32(data2[19])
+ u = int32(data2[20])
+ v = int32(data2[21])
+ w = int32(data2[22])
+ x = int32(data2[23])
+ y = int32(data2[24])
+ z = int32(data2[25])
+ }
+ // Lots of phis of the form phi(int32,int64) of type int32 happen here.
+ // Some will be stack phis. For those stack phis, make sure the spill
+ // of the second argument uses the phi's width (4 bytes), not its width
+ // (8 bytes). Otherwise, a random stack slot gets clobbered.
+
+ runtime.Gosched()
+ return a + b + c + d + e + f + g + h + i + j + k + l + m + n + o + p + q + r + s + t + u + v + w + x + y + z
+}
+
+func TestPhi(t *testing.T) {
+ want := int32(0)
+ got := foo()
+ if got != want {
+ t.Fatalf("want %d, got %d\n", want, got)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/regalloc_test.go b/src/cmd/compile/internal/gc/testdata/regalloc_test.go
new file mode 100644
index 0000000..577f8e7
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/regalloc_test.go
@@ -0,0 +1,50 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests phi implementation
+
+package main
+
+import "testing"
+
+func phiOverwrite_ssa() int {
+ var n int
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break
+ }
+ n = i
+ }
+ return n
+}
+
+func phiOverwrite(t *testing.T) {
+ want := 5
+ got := phiOverwrite_ssa()
+ if got != want {
+ t.Errorf("phiOverwrite_ssa()= %d, got %d", want, got)
+ }
+}
+
+func phiOverwriteBig_ssa() int {
+ var a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z int
+ a = 1
+ for idx := 0; idx < 26; idx++ {
+ a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z = b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, a
+ }
+ return a*1 + b*2 + c*3 + d*4 + e*5 + f*6 + g*7 + h*8 + i*9 + j*10 + k*11 + l*12 + m*13 + n*14 + o*15 + p*16 + q*17 + r*18 + s*19 + t*20 + u*21 + v*22 + w*23 + x*24 + y*25 + z*26
+}
+
+func phiOverwriteBig(t *testing.T) {
+ want := 1
+ got := phiOverwriteBig_ssa()
+ if got != want {
+ t.Errorf("phiOverwriteBig_ssa()= %d, got %d", want, got)
+ }
+}
+
+func TestRegalloc(t *testing.T) {
+ phiOverwrite(t)
+ phiOverwriteBig(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/reproducible/issue20272.go b/src/cmd/compile/internal/gc/testdata/reproducible/issue20272.go
new file mode 100644
index 0000000..3db0b8a
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/reproducible/issue20272.go
@@ -0,0 +1,34 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var (
+ i0 uint8
+ b0 byte
+
+ i1 *uint8
+ b1 *byte
+
+ i2 **uint8
+ b2 **byte
+
+ i3 ***uint8
+ b3 ***byte
+
+ i4 ****uint8
+ b4 ****byte
+
+ i5 *****uint8
+ b5 *****byte
+
+ i6 ******uint8
+ b6 ******byte
+
+ i7 *******uint8
+ b7 *******byte
+
+ i8 ********uint8
+ b8 ********byte
+)
diff --git a/src/cmd/compile/internal/gc/testdata/reproducible/issue27013.go b/src/cmd/compile/internal/gc/testdata/reproducible/issue27013.go
new file mode 100644
index 0000000..817f4a6
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/reproducible/issue27013.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func A(arg interface{}) {
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+}
diff --git a/src/cmd/compile/internal/gc/testdata/reproducible/issue30202.go b/src/cmd/compile/internal/gc/testdata/reproducible/issue30202.go
new file mode 100644
index 0000000..7b5de2c
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/reproducible/issue30202.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func A(x interface {
+ X() int
+}) int {
+ return x.X()
+}
+
+func B(x interface {
+ X() int
+}) int {
+ return x.X()
+}
diff --git a/src/cmd/compile/internal/gc/testdata/reproducible/issue38068.go b/src/cmd/compile/internal/gc/testdata/reproducible/issue38068.go
new file mode 100644
index 0000000..db5ca7d
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/reproducible/issue38068.go
@@ -0,0 +1,70 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue38068
+
+// A type with a couple of inlinable, non-pointer-receiver methods
+// that have params and local variables.
+type A struct {
+ s string
+ next *A
+ prev *A
+}
+
+// Inlinable, value-received method with locals and parms.
+func (a A) double(x string, y int) string {
+ if y == 191 {
+ a.s = ""
+ }
+ q := a.s + "a"
+ r := a.s + "b"
+ return q + r
+}
+
+// Inlinable, value-received method with locals and parms.
+func (a A) triple(x string, y int) string {
+ q := a.s
+ if y == 998877 {
+ a.s = x
+ }
+ r := a.s + a.s
+ return q + r
+}
+
+type methods struct {
+ m1 func(a *A, x string, y int) string
+ m2 func(a *A, x string, y int) string
+}
+
+// Now a function that makes references to the methods via pointers,
+// which should trigger the wrapper generation.
+func P(a *A, ms *methods) {
+ if a != nil {
+ defer func() { println("done") }()
+ }
+ println(ms.m1(a, "a", 2))
+ println(ms.m2(a, "b", 3))
+}
+
+func G(x *A, n int) {
+ if n <= 0 {
+ println(n)
+ return
+ }
+ // Address-taken local of type A, which will insure that the
+ // compiler's dtypesym() routine will create a method wrapper.
+ var a, b A
+ a.next = x
+ a.prev = &b
+ x = &a
+ G(x, n-2)
+}
+
+var M methods
+
+func F() {
+ M.m1 = (*A).double
+ M.m2 = (*A).triple
+ G(nil, 100)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/short_test.go b/src/cmd/compile/internal/gc/testdata/short_test.go
new file mode 100644
index 0000000..7a743b5
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/short_test.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests short circuiting.
+
+package main
+
+import "testing"
+
+func and_ssa(arg1, arg2 bool) bool {
+ return arg1 && rightCall(arg2)
+}
+
+func or_ssa(arg1, arg2 bool) bool {
+ return arg1 || rightCall(arg2)
+}
+
+var rightCalled bool
+
+//go:noinline
+func rightCall(v bool) bool {
+ rightCalled = true
+ return v
+ panic("unreached")
+}
+
+func testAnd(t *testing.T, arg1, arg2, wantRes bool) {
+ testShortCircuit(t, "AND", arg1, arg2, and_ssa, arg1, wantRes)
+}
+func testOr(t *testing.T, arg1, arg2, wantRes bool) {
+ testShortCircuit(t, "OR", arg1, arg2, or_ssa, !arg1, wantRes)
+}
+
+func testShortCircuit(t *testing.T, opName string, arg1, arg2 bool, fn func(bool, bool) bool, wantRightCall, wantRes bool) {
+ rightCalled = false
+ got := fn(arg1, arg2)
+ if rightCalled != wantRightCall {
+ t.Errorf("failed for %t %s %t; rightCalled=%t want=%t", arg1, opName, arg2, rightCalled, wantRightCall)
+ }
+ if wantRes != got {
+ t.Errorf("failed for %t %s %t; res=%t want=%t", arg1, opName, arg2, got, wantRes)
+ }
+}
+
+// TestShortCircuit tests OANDAND and OOROR expressions and short circuiting.
+func TestShortCircuit(t *testing.T) {
+ testAnd(t, false, false, false)
+ testAnd(t, false, true, false)
+ testAnd(t, true, false, false)
+ testAnd(t, true, true, true)
+
+ testOr(t, false, false, false)
+ testOr(t, false, true, true)
+ testOr(t, true, false, true)
+ testOr(t, true, true, true)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/slice_test.go b/src/cmd/compile/internal/gc/testdata/slice_test.go
new file mode 100644
index 0000000..c134578
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/slice_test.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test makes sure that t.s = t.s[0:x] doesn't write
+// either the slice pointer or the capacity.
+// See issue #14855.
+
+package main
+
+import "testing"
+
+const N = 1000000
+
+type X struct {
+ s []int
+}
+
+func TestSlice(t *testing.T) {
+ done := make(chan struct{})
+ a := make([]int, N+10)
+
+ x := &X{a}
+
+ go func() {
+ for i := 0; i < N; i++ {
+ x.s = x.s[1:9]
+ }
+ done <- struct{}{}
+ }()
+ go func() {
+ for i := 0; i < N; i++ {
+ x.s = x.s[0:8] // should only write len
+ }
+ done <- struct{}{}
+ }()
+ <-done
+ <-done
+
+ if cap(x.s) != cap(a)-N {
+ t.Errorf("wanted cap=%d, got %d\n", cap(a)-N, cap(x.s))
+ }
+ if &x.s[0] != &a[N] {
+ t.Errorf("wanted ptr=%p, got %p\n", &a[N], &x.s[0])
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/sqrtConst_test.go b/src/cmd/compile/internal/gc/testdata/sqrtConst_test.go
new file mode 100644
index 0000000..5b7a149
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/sqrtConst_test.go
@@ -0,0 +1,50 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "math"
+ "testing"
+)
+
+var tests = [...]struct {
+ name string
+ in float64 // used for error messages, not an input
+ got float64
+ want float64
+}{
+ {"sqrt0", 0, math.Sqrt(0), 0},
+ {"sqrt1", 1, math.Sqrt(1), 1},
+ {"sqrt2", 2, math.Sqrt(2), math.Sqrt2},
+ {"sqrt4", 4, math.Sqrt(4), 2},
+ {"sqrt100", 100, math.Sqrt(100), 10},
+ {"sqrt101", 101, math.Sqrt(101), 10.04987562112089},
+}
+
+var nanTests = [...]struct {
+ name string
+ in float64 // used for error messages, not an input
+ got float64
+}{
+ {"sqrtNaN", math.NaN(), math.Sqrt(math.NaN())},
+ {"sqrtNegative", -1, math.Sqrt(-1)},
+ {"sqrtNegInf", math.Inf(-1), math.Sqrt(math.Inf(-1))},
+}
+
+func TestSqrtConst(t *testing.T) {
+ for _, test := range tests {
+ if test.got != test.want {
+ t.Errorf("%s: math.Sqrt(%f): got %f, want %f\n", test.name, test.in, test.got, test.want)
+ }
+ }
+ for _, test := range nanTests {
+ if math.IsNaN(test.got) != true {
+ t.Errorf("%s: math.Sqrt(%f): got %f, want NaN\n", test.name, test.in, test.got)
+ }
+ }
+ if got := math.Sqrt(math.Inf(1)); !math.IsInf(got, 1) {
+ t.Errorf("math.Sqrt(+Inf), got %f, want +Inf\n", got)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/testdata/string_test.go b/src/cmd/compile/internal/gc/testdata/string_test.go
new file mode 100644
index 0000000..5d086f0
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/string_test.go
@@ -0,0 +1,207 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// string_ssa.go tests string operations.
+package main
+
+import "testing"
+
+//go:noinline
+func testStringSlice1_ssa(a string, i, j int) string {
+ return a[i:]
+}
+
+//go:noinline
+func testStringSlice2_ssa(a string, i, j int) string {
+ return a[:j]
+}
+
+//go:noinline
+func testStringSlice12_ssa(a string, i, j int) string {
+ return a[i:j]
+}
+
+func testStringSlice(t *testing.T) {
+ tests := [...]struct {
+ fn func(string, int, int) string
+ s string
+ low, high int
+ want string
+ }{
+ // -1 means the value is not used.
+ {testStringSlice1_ssa, "foobar", 0, -1, "foobar"},
+ {testStringSlice1_ssa, "foobar", 3, -1, "bar"},
+ {testStringSlice1_ssa, "foobar", 6, -1, ""},
+ {testStringSlice2_ssa, "foobar", -1, 0, ""},
+ {testStringSlice2_ssa, "foobar", -1, 3, "foo"},
+ {testStringSlice2_ssa, "foobar", -1, 6, "foobar"},
+ {testStringSlice12_ssa, "foobar", 0, 6, "foobar"},
+ {testStringSlice12_ssa, "foobar", 0, 0, ""},
+ {testStringSlice12_ssa, "foobar", 6, 6, ""},
+ {testStringSlice12_ssa, "foobar", 1, 5, "ooba"},
+ {testStringSlice12_ssa, "foobar", 3, 3, ""},
+ {testStringSlice12_ssa, "", 0, 0, ""},
+ }
+
+ for i, test := range tests {
+ if got := test.fn(test.s, test.low, test.high); test.want != got {
+ t.Errorf("#%d %s[%d,%d] = %s, want %s", i, test.s, test.low, test.high, got, test.want)
+ }
+ }
+}
+
+type prefix struct {
+ prefix string
+}
+
+func (p *prefix) slice_ssa() {
+ p.prefix = p.prefix[:3]
+}
+
+//go:noinline
+func testStructSlice(t *testing.T) {
+ p := &prefix{"prefix"}
+ p.slice_ssa()
+ if "pre" != p.prefix {
+ t.Errorf("wrong field slice: wanted %s got %s", "pre", p.prefix)
+ }
+}
+
+func testStringSlicePanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ str := "foobar"
+ t.Errorf("got %s and expected to panic, but didn't", testStringSlice12_ssa(str, 3, 9))
+}
+
+const _Accuracy_name = "BelowExactAbove"
+
+var _Accuracy_index = [...]uint8{0, 5, 10, 15}
+
+//go:noinline
+func testSmallIndexType_ssa(i int) string {
+ return _Accuracy_name[_Accuracy_index[i]:_Accuracy_index[i+1]]
+}
+
+func testSmallIndexType(t *testing.T) {
+ tests := []struct {
+ i int
+ want string
+ }{
+ {0, "Below"},
+ {1, "Exact"},
+ {2, "Above"},
+ }
+
+ for i, test := range tests {
+ if got := testSmallIndexType_ssa(test.i); got != test.want {
+ t.Errorf("#%d got %s wanted %s", i, got, test.want)
+ }
+ }
+}
+
+//go:noinline
+func testInt64Index_ssa(s string, i int64) byte {
+ return s[i]
+}
+
+//go:noinline
+func testInt64Slice_ssa(s string, i, j int64) string {
+ return s[i:j]
+}
+
+func testInt64Index(t *testing.T) {
+ tests := []struct {
+ i int64
+ j int64
+ b byte
+ s string
+ }{
+ {0, 5, 'B', "Below"},
+ {5, 10, 'E', "Exact"},
+ {10, 15, 'A', "Above"},
+ }
+
+ str := "BelowExactAbove"
+ for i, test := range tests {
+ if got := testInt64Index_ssa(str, test.i); got != test.b {
+ t.Errorf("#%d got %d wanted %d", i, got, test.b)
+ }
+ if got := testInt64Slice_ssa(str, test.i, test.j); got != test.s {
+ t.Errorf("#%d got %s wanted %s", i, got, test.s)
+ }
+ }
+}
+
+func testInt64IndexPanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ str := "foobar"
+ t.Errorf("got %d and expected to panic, but didn't", testInt64Index_ssa(str, 1<<32+1))
+}
+
+func testInt64SlicePanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ str := "foobar"
+ t.Errorf("got %s and expected to panic, but didn't", testInt64Slice_ssa(str, 1<<32, 1<<32+1))
+}
+
+//go:noinline
+func testStringElem_ssa(s string, i int) byte {
+ return s[i]
+}
+
+func testStringElem(t *testing.T) {
+ tests := []struct {
+ s string
+ i int
+ n byte
+ }{
+ {"foobar", 3, 98},
+ {"foobar", 0, 102},
+ {"foobar", 5, 114},
+ }
+ for _, test := range tests {
+ if got := testStringElem_ssa(test.s, test.i); got != test.n {
+ t.Errorf("testStringElem \"%s\"[%d] = %d, wanted %d", test.s, test.i, got, test.n)
+ }
+ }
+}
+
+//go:noinline
+func testStringElemConst_ssa(i int) byte {
+ s := "foobar"
+ return s[i]
+}
+
+func testStringElemConst(t *testing.T) {
+ if got := testStringElemConst_ssa(3); got != 98 {
+ t.Errorf("testStringElemConst= %d, wanted 98", got)
+ }
+}
+
+func TestString(t *testing.T) {
+ testStringSlice(t)
+ testStringSlicePanic(t)
+ testStructSlice(t)
+ testSmallIndexType(t)
+ testStringElem(t)
+ testStringElemConst(t)
+ testInt64Index(t)
+ testInt64IndexPanic(t)
+ testInt64SlicePanic(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/unsafe_test.go b/src/cmd/compile/internal/gc/testdata/unsafe_test.go
new file mode 100644
index 0000000..37599d3
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/unsafe_test.go
@@ -0,0 +1,145 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "runtime"
+ "testing"
+ "unsafe"
+)
+
+// global pointer slot
+var a *[8]uint
+
+// unfoldable true
+var always = true
+
+// Test to make sure that a pointer value which is alive
+// across a call is retained, even when there are matching
+// conversions to/from uintptr around the call.
+// We arrange things very carefully to have to/from
+// conversions on either side of the call which cannot be
+// combined with any other conversions.
+func f_ssa() *[8]uint {
+ // Make x a uintptr pointing to where a points.
+ var x uintptr
+ if always {
+ x = uintptr(unsafe.Pointer(a))
+ } else {
+ x = 0
+ }
+ // Clobber the global pointer. The only live ref
+ // to the allocated object is now x.
+ a = nil
+
+ // Convert to pointer so it should hold
+ // the object live across GC call.
+ p := unsafe.Pointer(x)
+
+ // Call gc.
+ runtime.GC()
+
+ // Convert back to uintptr.
+ y := uintptr(p)
+
+ // Mess with y so that the subsequent cast
+ // to unsafe.Pointer can't be combined with the
+ // uintptr cast above.
+ var z uintptr
+ if always {
+ z = y
+ } else {
+ z = 0
+ }
+ return (*[8]uint)(unsafe.Pointer(z))
+}
+
+// g_ssa is the same as f_ssa, but with a bit of pointer
+// arithmetic for added insanity.
+func g_ssa() *[7]uint {
+ // Make x a uintptr pointing to where a points.
+ var x uintptr
+ if always {
+ x = uintptr(unsafe.Pointer(a))
+ } else {
+ x = 0
+ }
+ // Clobber the global pointer. The only live ref
+ // to the allocated object is now x.
+ a = nil
+
+ // Offset x by one int.
+ x += unsafe.Sizeof(int(0))
+
+ // Convert to pointer so it should hold
+ // the object live across GC call.
+ p := unsafe.Pointer(x)
+
+ // Call gc.
+ runtime.GC()
+
+ // Convert back to uintptr.
+ y := uintptr(p)
+
+ // Mess with y so that the subsequent cast
+ // to unsafe.Pointer can't be combined with the
+ // uintptr cast above.
+ var z uintptr
+ if always {
+ z = y
+ } else {
+ z = 0
+ }
+ return (*[7]uint)(unsafe.Pointer(z))
+}
+
+func testf(t *testing.T) {
+ a = new([8]uint)
+ for i := 0; i < 8; i++ {
+ a[i] = 0xabcd
+ }
+ c := f_ssa()
+ for i := 0; i < 8; i++ {
+ if c[i] != 0xabcd {
+ t.Fatalf("%d:%x\n", i, c[i])
+ }
+ }
+}
+
+func testg(t *testing.T) {
+ a = new([8]uint)
+ for i := 0; i < 8; i++ {
+ a[i] = 0xabcd
+ }
+ c := g_ssa()
+ for i := 0; i < 7; i++ {
+ if c[i] != 0xabcd {
+ t.Fatalf("%d:%x\n", i, c[i])
+ }
+ }
+}
+
+func alias_ssa(ui64 *uint64, ui32 *uint32) uint32 {
+ *ui32 = 0xffffffff
+ *ui64 = 0 // store
+ ret := *ui32 // load from same address, should be zero
+ *ui64 = 0xffffffffffffffff // store
+ return ret
+}
+func testdse(t *testing.T) {
+ x := int64(-1)
+ // construct two pointers that alias one another
+ ui64 := (*uint64)(unsafe.Pointer(&x))
+ ui32 := (*uint32)(unsafe.Pointer(&x))
+ if want, got := uint32(0), alias_ssa(ui64, ui32); got != want {
+ t.Fatalf("alias_ssa: wanted %d, got %d\n", want, got)
+ }
+}
+
+func TestUnsafe(t *testing.T) {
+ testf(t)
+ testg(t)
+ testdse(t)
+}
diff --git a/src/cmd/compile/internal/gc/testdata/zero_test.go b/src/cmd/compile/internal/gc/testdata/zero_test.go
new file mode 100644
index 0000000..64fa25e
--- /dev/null
+++ b/src/cmd/compile/internal/gc/testdata/zero_test.go
@@ -0,0 +1,711 @@
+// Code generated by gen/zeroGen.go. DO NOT EDIT.
+
+package main
+
+import "testing"
+
+type Z1 struct {
+ pre [8]byte
+ mid [1]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero1_ssa(x *[1]byte) {
+ *x = [1]byte{}
+}
+func testZero1(t *testing.T) {
+ a := Z1{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1]byte{255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero1_ssa(&a.mid)
+ want := Z1{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1]byte{0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero1 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z2 struct {
+ pre [8]byte
+ mid [2]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero2_ssa(x *[2]byte) {
+ *x = [2]byte{}
+}
+func testZero2(t *testing.T) {
+ a := Z2{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [2]byte{255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero2_ssa(&a.mid)
+ want := Z2{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [2]byte{0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero2 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z3 struct {
+ pre [8]byte
+ mid [3]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero3_ssa(x *[3]byte) {
+ *x = [3]byte{}
+}
+func testZero3(t *testing.T) {
+ a := Z3{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [3]byte{255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero3_ssa(&a.mid)
+ want := Z3{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [3]byte{0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero3 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z4 struct {
+ pre [8]byte
+ mid [4]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero4_ssa(x *[4]byte) {
+ *x = [4]byte{}
+}
+func testZero4(t *testing.T) {
+ a := Z4{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [4]byte{255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero4_ssa(&a.mid)
+ want := Z4{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [4]byte{0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero4 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z5 struct {
+ pre [8]byte
+ mid [5]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero5_ssa(x *[5]byte) {
+ *x = [5]byte{}
+}
+func testZero5(t *testing.T) {
+ a := Z5{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [5]byte{255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero5_ssa(&a.mid)
+ want := Z5{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [5]byte{0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero5 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z6 struct {
+ pre [8]byte
+ mid [6]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero6_ssa(x *[6]byte) {
+ *x = [6]byte{}
+}
+func testZero6(t *testing.T) {
+ a := Z6{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [6]byte{255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero6_ssa(&a.mid)
+ want := Z6{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [6]byte{0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero6 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z7 struct {
+ pre [8]byte
+ mid [7]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero7_ssa(x *[7]byte) {
+ *x = [7]byte{}
+}
+func testZero7(t *testing.T) {
+ a := Z7{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [7]byte{255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero7_ssa(&a.mid)
+ want := Z7{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [7]byte{0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero7 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z8 struct {
+ pre [8]byte
+ mid [8]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero8_ssa(x *[8]byte) {
+ *x = [8]byte{}
+}
+func testZero8(t *testing.T) {
+ a := Z8{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero8_ssa(&a.mid)
+ want := Z8{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero8 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z9 struct {
+ pre [8]byte
+ mid [9]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero9_ssa(x *[9]byte) {
+ *x = [9]byte{}
+}
+func testZero9(t *testing.T) {
+ a := Z9{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [9]byte{255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero9_ssa(&a.mid)
+ want := Z9{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [9]byte{0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero9 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z10 struct {
+ pre [8]byte
+ mid [10]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero10_ssa(x *[10]byte) {
+ *x = [10]byte{}
+}
+func testZero10(t *testing.T) {
+ a := Z10{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [10]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero10_ssa(&a.mid)
+ want := Z10{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [10]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero10 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z15 struct {
+ pre [8]byte
+ mid [15]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero15_ssa(x *[15]byte) {
+ *x = [15]byte{}
+}
+func testZero15(t *testing.T) {
+ a := Z15{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [15]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero15_ssa(&a.mid)
+ want := Z15{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [15]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero15 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z16 struct {
+ pre [8]byte
+ mid [16]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero16_ssa(x *[16]byte) {
+ *x = [16]byte{}
+}
+func testZero16(t *testing.T) {
+ a := Z16{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero16_ssa(&a.mid)
+ want := Z16{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero16 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z17 struct {
+ pre [8]byte
+ mid [17]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero17_ssa(x *[17]byte) {
+ *x = [17]byte{}
+}
+func testZero17(t *testing.T) {
+ a := Z17{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [17]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero17_ssa(&a.mid)
+ want := Z17{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [17]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero17 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z23 struct {
+ pre [8]byte
+ mid [23]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero23_ssa(x *[23]byte) {
+ *x = [23]byte{}
+}
+func testZero23(t *testing.T) {
+ a := Z23{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [23]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero23_ssa(&a.mid)
+ want := Z23{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [23]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero23 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z24 struct {
+ pre [8]byte
+ mid [24]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero24_ssa(x *[24]byte) {
+ *x = [24]byte{}
+}
+func testZero24(t *testing.T) {
+ a := Z24{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero24_ssa(&a.mid)
+ want := Z24{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero24 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z25 struct {
+ pre [8]byte
+ mid [25]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero25_ssa(x *[25]byte) {
+ *x = [25]byte{}
+}
+func testZero25(t *testing.T) {
+ a := Z25{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [25]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero25_ssa(&a.mid)
+ want := Z25{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [25]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero25 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z31 struct {
+ pre [8]byte
+ mid [31]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero31_ssa(x *[31]byte) {
+ *x = [31]byte{}
+}
+func testZero31(t *testing.T) {
+ a := Z31{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [31]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero31_ssa(&a.mid)
+ want := Z31{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [31]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero31 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z32 struct {
+ pre [8]byte
+ mid [32]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero32_ssa(x *[32]byte) {
+ *x = [32]byte{}
+}
+func testZero32(t *testing.T) {
+ a := Z32{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero32_ssa(&a.mid)
+ want := Z32{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero32 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z33 struct {
+ pre [8]byte
+ mid [33]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero33_ssa(x *[33]byte) {
+ *x = [33]byte{}
+}
+func testZero33(t *testing.T) {
+ a := Z33{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [33]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero33_ssa(&a.mid)
+ want := Z33{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [33]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero33 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z63 struct {
+ pre [8]byte
+ mid [63]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero63_ssa(x *[63]byte) {
+ *x = [63]byte{}
+}
+func testZero63(t *testing.T) {
+ a := Z63{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [63]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero63_ssa(&a.mid)
+ want := Z63{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [63]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero63 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z64 struct {
+ pre [8]byte
+ mid [64]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero64_ssa(x *[64]byte) {
+ *x = [64]byte{}
+}
+func testZero64(t *testing.T) {
+ a := Z64{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero64_ssa(&a.mid)
+ want := Z64{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero64 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z65 struct {
+ pre [8]byte
+ mid [65]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero65_ssa(x *[65]byte) {
+ *x = [65]byte{}
+}
+func testZero65(t *testing.T) {
+ a := Z65{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [65]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero65_ssa(&a.mid)
+ want := Z65{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [65]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero65 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z1023 struct {
+ pre [8]byte
+ mid [1023]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero1023_ssa(x *[1023]byte) {
+ *x = [1023]byte{}
+}
+func testZero1023(t *testing.T) {
+ a := Z1023{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1023]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero1023_ssa(&a.mid)
+ want := Z1023{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1023]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero1023 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z1024 struct {
+ pre [8]byte
+ mid [1024]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero1024_ssa(x *[1024]byte) {
+ *x = [1024]byte{}
+}
+func testZero1024(t *testing.T) {
+ a := Z1024{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1024]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero1024_ssa(&a.mid)
+ want := Z1024{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1024]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero1024 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z1025 struct {
+ pre [8]byte
+ mid [1025]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero1025_ssa(x *[1025]byte) {
+ *x = [1025]byte{}
+}
+func testZero1025(t *testing.T) {
+ a := Z1025{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1025]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero1025_ssa(&a.mid)
+ want := Z1025{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1025]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero1025 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z8u1 struct {
+ b bool
+ val [8]byte
+}
+type Z8u2 struct {
+ i uint16
+ val [8]byte
+}
+
+//go:noinline
+func zero8u1_ssa(t *Z8u1) {
+ t.val = [8]byte{}
+}
+
+//go:noinline
+func zero8u2_ssa(t *Z8u2) {
+ t.val = [8]byte{}
+}
+func testZero8u(t *testing.T) {
+ a := Z8u1{false, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero8u1_ssa(&a)
+ want := Z8u1{false, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero8u2 got=%v, want %v\n", a, want)
+ }
+ b := Z8u2{15, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero8u2_ssa(&b)
+ wantb := Z8u2{15, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero8u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z16u1 struct {
+ b bool
+ val [16]byte
+}
+type Z16u2 struct {
+ i uint16
+ val [16]byte
+}
+
+//go:noinline
+func zero16u1_ssa(t *Z16u1) {
+ t.val = [16]byte{}
+}
+
+//go:noinline
+func zero16u2_ssa(t *Z16u2) {
+ t.val = [16]byte{}
+}
+func testZero16u(t *testing.T) {
+ a := Z16u1{false, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero16u1_ssa(&a)
+ want := Z16u1{false, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero16u2 got=%v, want %v\n", a, want)
+ }
+ b := Z16u2{15, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero16u2_ssa(&b)
+ wantb := Z16u2{15, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero16u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z24u1 struct {
+ b bool
+ val [24]byte
+}
+type Z24u2 struct {
+ i uint16
+ val [24]byte
+}
+
+//go:noinline
+func zero24u1_ssa(t *Z24u1) {
+ t.val = [24]byte{}
+}
+
+//go:noinline
+func zero24u2_ssa(t *Z24u2) {
+ t.val = [24]byte{}
+}
+func testZero24u(t *testing.T) {
+ a := Z24u1{false, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero24u1_ssa(&a)
+ want := Z24u1{false, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero24u2 got=%v, want %v\n", a, want)
+ }
+ b := Z24u2{15, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero24u2_ssa(&b)
+ wantb := Z24u2{15, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero24u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z32u1 struct {
+ b bool
+ val [32]byte
+}
+type Z32u2 struct {
+ i uint16
+ val [32]byte
+}
+
+//go:noinline
+func zero32u1_ssa(t *Z32u1) {
+ t.val = [32]byte{}
+}
+
+//go:noinline
+func zero32u2_ssa(t *Z32u2) {
+ t.val = [32]byte{}
+}
+func testZero32u(t *testing.T) {
+ a := Z32u1{false, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero32u1_ssa(&a)
+ want := Z32u1{false, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero32u2 got=%v, want %v\n", a, want)
+ }
+ b := Z32u2{15, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero32u2_ssa(&b)
+ wantb := Z32u2{15, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero32u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z64u1 struct {
+ b bool
+ val [64]byte
+}
+type Z64u2 struct {
+ i uint16
+ val [64]byte
+}
+
+//go:noinline
+func zero64u1_ssa(t *Z64u1) {
+ t.val = [64]byte{}
+}
+
+//go:noinline
+func zero64u2_ssa(t *Z64u2) {
+ t.val = [64]byte{}
+}
+func testZero64u(t *testing.T) {
+ a := Z64u1{false, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero64u1_ssa(&a)
+ want := Z64u1{false, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero64u2 got=%v, want %v\n", a, want)
+ }
+ b := Z64u2{15, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero64u2_ssa(&b)
+ wantb := Z64u2{15, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero64u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z256u1 struct {
+ b bool
+ val [256]byte
+}
+type Z256u2 struct {
+ i uint16
+ val [256]byte
+}
+
+//go:noinline
+func zero256u1_ssa(t *Z256u1) {
+ t.val = [256]byte{}
+}
+
+//go:noinline
+func zero256u2_ssa(t *Z256u2) {
+ t.val = [256]byte{}
+}
+func testZero256u(t *testing.T) {
+ a := Z256u1{false, [256]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero256u1_ssa(&a)
+ want := Z256u1{false, [256]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero256u2 got=%v, want %v\n", a, want)
+ }
+ b := Z256u2{15, [256]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero256u2_ssa(&b)
+ wantb := Z256u2{15, [256]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero256u2 got=%v, want %v\n", b, wantb)
+ }
+}
+func TestZero(t *testing.T) {
+ testZero1(t)
+ testZero2(t)
+ testZero3(t)
+ testZero4(t)
+ testZero5(t)
+ testZero6(t)
+ testZero7(t)
+ testZero8(t)
+ testZero9(t)
+ testZero10(t)
+ testZero15(t)
+ testZero16(t)
+ testZero17(t)
+ testZero23(t)
+ testZero24(t)
+ testZero25(t)
+ testZero31(t)
+ testZero32(t)
+ testZero33(t)
+ testZero63(t)
+ testZero64(t)
+ testZero65(t)
+ testZero1023(t)
+ testZero1024(t)
+ testZero1025(t)
+ testZero8u(t)
+ testZero16u(t)
+ testZero24u(t)
+ testZero32u(t)
+ testZero64u(t)
+ testZero256u(t)
+}
diff --git a/src/cmd/compile/internal/gc/timings.go b/src/cmd/compile/internal/gc/timings.go
new file mode 100644
index 0000000..56b3899
--- /dev/null
+++ b/src/cmd/compile/internal/gc/timings.go
@@ -0,0 +1,235 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "time"
+)
+
+// Timings collects the execution times of labeled phases
+// which are added trough a sequence of Start/Stop calls.
+// Events may be associated with each phase via AddEvent.
+type Timings struct {
+ list []timestamp
+ events map[int][]*event // lazily allocated
+}
+
+type timestamp struct {
+ time time.Time
+ label string
+ start bool
+}
+
+type event struct {
+ size int64 // count or amount of data processed (allocations, data size, lines, funcs, ...)
+ unit string // unit of size measure (count, MB, lines, funcs, ...)
+}
+
+func (t *Timings) append(labels []string, start bool) {
+ t.list = append(t.list, timestamp{time.Now(), strings.Join(labels, ":"), start})
+}
+
+// Start marks the beginning of a new phase and implicitly stops the previous phase.
+// The phase name is the colon-separated concatenation of the labels.
+func (t *Timings) Start(labels ...string) {
+ t.append(labels, true)
+}
+
+// Stop marks the end of a phase and implicitly starts a new phase.
+// The labels are added to the labels of the ended phase.
+func (t *Timings) Stop(labels ...string) {
+ t.append(labels, false)
+}
+
+// AddEvent associates an event, i.e., a count, or an amount of data,
+// with the most recently started or stopped phase; or the very first
+// phase if Start or Stop hasn't been called yet. The unit specifies
+// the unit of measurement (e.g., MB, lines, no. of funcs, etc.).
+func (t *Timings) AddEvent(size int64, unit string) {
+ m := t.events
+ if m == nil {
+ m = make(map[int][]*event)
+ t.events = m
+ }
+ i := len(t.list)
+ if i > 0 {
+ i--
+ }
+ m[i] = append(m[i], &event{size, unit})
+}
+
+// Write prints the phase times to w.
+// The prefix is printed at the start of each line.
+func (t *Timings) Write(w io.Writer, prefix string) {
+ if len(t.list) > 0 {
+ var lines lines
+
+ // group of phases with shared non-empty label prefix
+ var group struct {
+ label string // label prefix
+ tot time.Duration // accumulated phase time
+ size int // number of phases collected in group
+ }
+
+ // accumulated time between Stop/Start timestamps
+ var unaccounted time.Duration
+
+ // process Start/Stop timestamps
+ pt := &t.list[0] // previous timestamp
+ tot := t.list[len(t.list)-1].time.Sub(pt.time)
+ for i := 1; i < len(t.list); i++ {
+ qt := &t.list[i] // current timestamp
+ dt := qt.time.Sub(pt.time)
+
+ var label string
+ var events []*event
+ if pt.start {
+ // previous phase started
+ label = pt.label
+ events = t.events[i-1]
+ if qt.start {
+ // start implicitly ended previous phase; nothing to do
+ } else {
+ // stop ended previous phase; append stop labels, if any
+ if qt.label != "" {
+ label += ":" + qt.label
+ }
+ // events associated with stop replace prior events
+ if e := t.events[i]; e != nil {
+ events = e
+ }
+ }
+ } else {
+ // previous phase stopped
+ if qt.start {
+ // between a stopped and started phase; unaccounted time
+ unaccounted += dt
+ } else {
+ // previous stop implicitly started current phase
+ label = qt.label
+ events = t.events[i]
+ }
+ }
+ if label != "" {
+ // add phase to existing group, or start a new group
+ l := commonPrefix(group.label, label)
+ if group.size == 1 && l != "" || group.size > 1 && l == group.label {
+ // add to existing group
+ group.label = l
+ group.tot += dt
+ group.size++
+ } else {
+ // start a new group
+ if group.size > 1 {
+ lines.add(prefix+group.label+"subtotal", 1, group.tot, tot, nil)
+ }
+ group.label = label
+ group.tot = dt
+ group.size = 1
+ }
+
+ // write phase
+ lines.add(prefix+label, 1, dt, tot, events)
+ }
+
+ pt = qt
+ }
+
+ if group.size > 1 {
+ lines.add(prefix+group.label+"subtotal", 1, group.tot, tot, nil)
+ }
+
+ if unaccounted != 0 {
+ lines.add(prefix+"unaccounted", 1, unaccounted, tot, nil)
+ }
+
+ lines.add(prefix+"total", 1, tot, tot, nil)
+
+ lines.write(w)
+ }
+}
+
+func commonPrefix(a, b string) string {
+ i := 0
+ for i < len(a) && i < len(b) && a[i] == b[i] {
+ i++
+ }
+ return a[:i]
+}
+
+type lines [][]string
+
+func (lines *lines) add(label string, n int, dt, tot time.Duration, events []*event) {
+ var line []string
+ add := func(format string, args ...interface{}) {
+ line = append(line, fmt.Sprintf(format, args...))
+ }
+
+ add("%s", label)
+ add(" %d", n)
+ add(" %d ns/op", dt)
+ add(" %.2f %%", float64(dt)/float64(tot)*100)
+
+ for _, e := range events {
+ add(" %d", e.size)
+ add(" %s", e.unit)
+ add(" %d", int64(float64(e.size)/dt.Seconds()+0.5))
+ add(" %s/s", e.unit)
+ }
+
+ *lines = append(*lines, line)
+}
+
+func (lines lines) write(w io.Writer) {
+ // determine column widths and contents
+ var widths []int
+ var number []bool
+ for _, line := range lines {
+ for i, col := range line {
+ if i < len(widths) {
+ if len(col) > widths[i] {
+ widths[i] = len(col)
+ }
+ } else {
+ widths = append(widths, len(col))
+ number = append(number, isnumber(col)) // first line determines column contents
+ }
+ }
+ }
+
+ // make column widths a multiple of align for more stable output
+ const align = 1 // set to a value > 1 to enable
+ if align > 1 {
+ for i, w := range widths {
+ w += align - 1
+ widths[i] = w - w%align
+ }
+ }
+
+ // print lines taking column widths and contents into account
+ for _, line := range lines {
+ for i, col := range line {
+ format := "%-*s"
+ if number[i] {
+ format = "%*s" // numbers are right-aligned
+ }
+ fmt.Fprintf(w, format, widths[i], col)
+ }
+ fmt.Fprintln(w)
+ }
+}
+
+func isnumber(s string) bool {
+ for _, ch := range s {
+ if ch <= ' ' {
+ continue // ignore leading whitespace
+ }
+ return '0' <= ch && ch <= '9' || ch == '.' || ch == '-' || ch == '+'
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/gc/trace.go b/src/cmd/compile/internal/gc/trace.go
new file mode 100644
index 0000000..ed4b5a2
--- /dev/null
+++ b/src/cmd/compile/internal/gc/trace.go
@@ -0,0 +1,27 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package gc
+
+import (
+ "os"
+ tracepkg "runtime/trace"
+)
+
+func init() {
+ traceHandler = traceHandlerGo17
+}
+
+func traceHandlerGo17(traceprofile string) {
+ f, err := os.Create(traceprofile)
+ if err != nil {
+ Fatalf("%v", err)
+ }
+ if err := tracepkg.Start(f); err != nil {
+ Fatalf("%v", err)
+ }
+ atExit(tracepkg.Stop)
+}
diff --git a/src/cmd/compile/internal/gc/truncconst_test.go b/src/cmd/compile/internal/gc/truncconst_test.go
new file mode 100644
index 0000000..d153818
--- /dev/null
+++ b/src/cmd/compile/internal/gc/truncconst_test.go
@@ -0,0 +1,63 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "testing"
+
+var f52want float64 = 1.0 / (1 << 52)
+var f53want float64 = 1.0 / (1 << 53)
+
+func TestTruncFlt(t *testing.T) {
+ const f52 = 1 + 1.0/(1<<52)
+ const f53 = 1 + 1.0/(1<<53)
+
+ if got := f52 - 1; got != f52want {
+ t.Errorf("f52-1 = %g, want %g", got, f52want)
+ }
+ if got := float64(f52) - 1; got != f52want {
+ t.Errorf("float64(f52)-1 = %g, want %g", got, f52want)
+ }
+ if got := f53 - 1; got != f53want {
+ t.Errorf("f53-1 = %g, want %g", got, f53want)
+ }
+ if got := float64(f53) - 1; got != 0 {
+ t.Errorf("float64(f53)-1 = %g, want 0", got)
+ }
+}
+
+func TestTruncCmplx(t *testing.T) {
+ const r52 = complex(1+1.0/(1<<52), 0)
+ const r53 = complex(1+1.0/(1<<53), 0)
+
+ if got := real(r52 - 1); got != f52want {
+ t.Errorf("real(r52-1) = %g, want %g", got, f52want)
+ }
+ if got := real(complex128(r52) - 1); got != f52want {
+ t.Errorf("real(complex128(r52)-1) = %g, want %g", got, f52want)
+ }
+ if got := real(r53 - 1); got != f53want {
+ t.Errorf("real(r53-1) = %g, want %g", got, f53want)
+ }
+ if got := real(complex128(r53) - 1); got != 0 {
+ t.Errorf("real(complex128(r53)-1) = %g, want 0", got)
+ }
+
+ const i52 = complex(0, 1+1.0/(1<<52))
+ const i53 = complex(0, 1+1.0/(1<<53))
+
+ if got := imag(i52 - 1i); got != f52want {
+ t.Errorf("imag(i52-1i) = %g, want %g", got, f52want)
+ }
+ if got := imag(complex128(i52) - 1i); got != f52want {
+ t.Errorf("imag(complex128(i52)-1i) = %g, want %g", got, f52want)
+ }
+ if got := imag(i53 - 1i); got != f53want {
+ t.Errorf("imag(i53-1i) = %g, want %g", got, f53want)
+ }
+ if got := imag(complex128(i53) - 1i); got != 0 {
+ t.Errorf("imag(complex128(i53)-1i) = %g, want 0", got)
+ }
+
+}
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
new file mode 100644
index 0000000..c0b0503
--- /dev/null
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -0,0 +1,4019 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+ "strings"
+)
+
+// To enable tracing support (-t flag), set enableTrace to true.
+const enableTrace = false
+
+var trace bool
+var traceIndent []byte
+var skipDowidthForTracing bool
+
+func tracePrint(title string, n *Node) func(np **Node) {
+ indent := traceIndent
+
+ // guard against nil
+ var pos, op string
+ var tc uint8
+ if n != nil {
+ pos = linestr(n.Pos)
+ op = n.Op.String()
+ tc = n.Typecheck()
+ }
+
+ skipDowidthForTracing = true
+ defer func() { skipDowidthForTracing = false }()
+ fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc)
+ traceIndent = append(traceIndent, ". "...)
+
+ return func(np **Node) {
+ traceIndent = traceIndent[:len(traceIndent)-2]
+
+ // if we have a result, use that
+ if np != nil {
+ n = *np
+ }
+
+ // guard against nil
+ // use outer pos, op so we don't get empty pos/op if n == nil (nicer output)
+ var tc uint8
+ var typ *types.Type
+ if n != nil {
+ pos = linestr(n.Pos)
+ op = n.Op.String()
+ tc = n.Typecheck()
+ typ = n.Type
+ }
+
+ skipDowidthForTracing = true
+ defer func() { skipDowidthForTracing = false }()
+ fmt.Printf("%s: %s=> %p %s %v tc=%d type=%#L\n", pos, indent, n, op, n, tc, typ)
+ }
+}
+
+const (
+ ctxStmt = 1 << iota // evaluated at statement level
+ ctxExpr // evaluated in value context
+ ctxType // evaluated in type context
+ ctxCallee // call-only expressions are ok
+ ctxMultiOK // multivalue function returns are ok
+ ctxAssign // assigning to expression
+)
+
+// type checks the whole tree of an expression.
+// calculates expression types.
+// evaluates compile time constants.
+// marks variables that escape the local frame.
+// rewrites n.Op to be more specific in some cases.
+
+var typecheckdefstack []*Node
+
+// resolve ONONAME to definition, if any.
+func resolve(n *Node) (res *Node) {
+ if n == nil || n.Op != ONONAME {
+ return n
+ }
+
+ // only trace if there's work to do
+ if enableTrace && trace {
+ defer tracePrint("resolve", n)(&res)
+ }
+
+ if n.Sym.Pkg != localpkg {
+ if inimport {
+ Fatalf("recursive inimport")
+ }
+ inimport = true
+ expandDecl(n)
+ inimport = false
+ return n
+ }
+
+ r := asNode(n.Sym.Def)
+ if r == nil {
+ return n
+ }
+
+ if r.Op == OIOTA {
+ if x := getIotaValue(); x >= 0 {
+ return nodintconst(x)
+ }
+ return n
+ }
+
+ return r
+}
+
+func typecheckslice(l []*Node, top int) {
+ for i := range l {
+ l[i] = typecheck(l[i], top)
+ }
+}
+
+var _typekind = []string{
+ TINT: "int",
+ TUINT: "uint",
+ TINT8: "int8",
+ TUINT8: "uint8",
+ TINT16: "int16",
+ TUINT16: "uint16",
+ TINT32: "int32",
+ TUINT32: "uint32",
+ TINT64: "int64",
+ TUINT64: "uint64",
+ TUINTPTR: "uintptr",
+ TCOMPLEX64: "complex64",
+ TCOMPLEX128: "complex128",
+ TFLOAT32: "float32",
+ TFLOAT64: "float64",
+ TBOOL: "bool",
+ TSTRING: "string",
+ TPTR: "pointer",
+ TUNSAFEPTR: "unsafe.Pointer",
+ TSTRUCT: "struct",
+ TINTER: "interface",
+ TCHAN: "chan",
+ TMAP: "map",
+ TARRAY: "array",
+ TSLICE: "slice",
+ TFUNC: "func",
+ TNIL: "nil",
+ TIDEAL: "untyped number",
+}
+
+func typekind(t *types.Type) string {
+ if t.IsUntyped() {
+ return fmt.Sprintf("%v", t)
+ }
+ et := t.Etype
+ if int(et) < len(_typekind) {
+ s := _typekind[et]
+ if s != "" {
+ return s
+ }
+ }
+ return fmt.Sprintf("etype=%d", et)
+}
+
+func cycleFor(start *Node) []*Node {
+ // Find the start node in typecheck_tcstack.
+ // We know that it must exist because each time we mark
+ // a node with n.SetTypecheck(2) we push it on the stack,
+ // and each time we mark a node with n.SetTypecheck(2) we
+ // pop it from the stack. We hit a cycle when we encounter
+ // a node marked 2 in which case is must be on the stack.
+ i := len(typecheck_tcstack) - 1
+ for i > 0 && typecheck_tcstack[i] != start {
+ i--
+ }
+
+ // collect all nodes with same Op
+ var cycle []*Node
+ for _, n := range typecheck_tcstack[i:] {
+ if n.Op == start.Op {
+ cycle = append(cycle, n)
+ }
+ }
+
+ return cycle
+}
+
+func cycleTrace(cycle []*Node) string {
+ var s string
+ for i, n := range cycle {
+ s += fmt.Sprintf("\n\t%v: %v uses %v", n.Line(), n, cycle[(i+1)%len(cycle)])
+ }
+ return s
+}
+
+var typecheck_tcstack []*Node
+
+// typecheck type checks node n.
+// The result of typecheck MUST be assigned back to n, e.g.
+// n.Left = typecheck(n.Left, top)
+func typecheck(n *Node, top int) (res *Node) {
+ // cannot type check until all the source has been parsed
+ if !typecheckok {
+ Fatalf("early typecheck")
+ }
+
+ if n == nil {
+ return nil
+ }
+
+ // only trace if there's work to do
+ if enableTrace && trace {
+ defer tracePrint("typecheck", n)(&res)
+ }
+
+ lno := setlineno(n)
+
+ // Skip over parens.
+ for n.Op == OPAREN {
+ n = n.Left
+ }
+
+ // Resolve definition of name and value of iota lazily.
+ n = resolve(n)
+
+ // Skip typecheck if already done.
+ // But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
+ if n.Typecheck() == 1 {
+ switch n.Op {
+ case ONAME, OTYPE, OLITERAL, OPACK:
+ break
+
+ default:
+ lineno = lno
+ return n
+ }
+ }
+
+ if n.Typecheck() == 2 {
+ // Typechecking loop. Trying printing a meaningful message,
+ // otherwise a stack trace of typechecking.
+ switch n.Op {
+ // We can already diagnose variables used as types.
+ case ONAME:
+ if top&(ctxExpr|ctxType) == ctxType {
+ yyerror("%v is not a type", n)
+ }
+
+ case OTYPE:
+ // Only report a type cycle if we are expecting a type.
+ // Otherwise let other code report an error.
+ if top&ctxType == ctxType {
+ // A cycle containing only alias types is an error
+ // since it would expand indefinitely when aliases
+ // are substituted.
+ cycle := cycleFor(n)
+ for _, n1 := range cycle {
+ if n1.Name != nil && !n1.Name.Param.Alias() {
+ // Cycle is ok. But if n is an alias type and doesn't
+ // have a type yet, we have a recursive type declaration
+ // with aliases that we can't handle properly yet.
+ // Report an error rather than crashing later.
+ if n.Name != nil && n.Name.Param.Alias() && n.Type == nil {
+ lineno = n.Pos
+ Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
+ }
+ lineno = lno
+ return n
+ }
+ }
+ yyerrorl(n.Pos, "invalid recursive type alias %v%s", n, cycleTrace(cycle))
+ }
+
+ case OLITERAL:
+ if top&(ctxExpr|ctxType) == ctxType {
+ yyerror("%v is not a type", n)
+ break
+ }
+ yyerrorl(n.Pos, "constant definition loop%s", cycleTrace(cycleFor(n)))
+ }
+
+ if nsavederrors+nerrors == 0 {
+ var trace string
+ for i := len(typecheck_tcstack) - 1; i >= 0; i-- {
+ x := typecheck_tcstack[i]
+ trace += fmt.Sprintf("\n\t%v %v", x.Line(), x)
+ }
+ yyerror("typechecking loop involving %v%s", n, trace)
+ }
+
+ lineno = lno
+ return n
+ }
+
+ n.SetTypecheck(2)
+
+ typecheck_tcstack = append(typecheck_tcstack, n)
+ n = typecheck1(n, top)
+
+ n.SetTypecheck(1)
+
+ last := len(typecheck_tcstack) - 1
+ typecheck_tcstack[last] = nil
+ typecheck_tcstack = typecheck_tcstack[:last]
+
+ lineno = lno
+ return n
+}
+
+// indexlit implements typechecking of untyped values as
+// array/slice indexes. It is almost equivalent to defaultlit
+// but also accepts untyped numeric values representable as
+// value of type int (see also checkmake for comparison).
+// The result of indexlit MUST be assigned back to n, e.g.
+// n.Left = indexlit(n.Left)
+func indexlit(n *Node) *Node {
+ if n != nil && n.Type != nil && n.Type.Etype == TIDEAL {
+ return defaultlit(n, types.Types[TINT])
+ }
+ return n
+}
+
+// The result of typecheck1 MUST be assigned back to n, e.g.
+// n.Left = typecheck1(n.Left, top)
+func typecheck1(n *Node, top int) (res *Node) {
+ if enableTrace && trace {
+ defer tracePrint("typecheck1", n)(&res)
+ }
+
+ switch n.Op {
+ case OLITERAL, ONAME, ONONAME, OTYPE:
+ if n.Sym == nil {
+ break
+ }
+
+ if n.Op == ONAME && n.SubOp() != 0 && top&ctxCallee == 0 {
+ yyerror("use of builtin %v not in function call", n.Sym)
+ n.Type = nil
+ return n
+ }
+
+ typecheckdef(n)
+ if n.Op == ONONAME {
+ n.Type = nil
+ return n
+ }
+ }
+
+ ok := 0
+ switch n.Op {
+ // until typecheck is complete, do nothing.
+ default:
+ Dump("typecheck", n)
+
+ Fatalf("typecheck %v", n.Op)
+
+ // names
+ case OLITERAL:
+ ok |= ctxExpr
+
+ if n.Type == nil && n.Val().Ctype() == CTSTR {
+ n.Type = types.UntypedString
+ }
+
+ case ONONAME:
+ ok |= ctxExpr
+
+ case ONAME:
+ if n.Name.Decldepth == 0 {
+ n.Name.Decldepth = decldepth
+ }
+ if n.SubOp() != 0 {
+ ok |= ctxCallee
+ break
+ }
+
+ if top&ctxAssign == 0 {
+ // not a write to the variable
+ if n.isBlank() {
+ yyerror("cannot use _ as value")
+ n.Type = nil
+ return n
+ }
+
+ n.Name.SetUsed(true)
+ }
+
+ ok |= ctxExpr
+
+ case OPACK:
+ yyerror("use of package %v without selector", n.Sym)
+ n.Type = nil
+ return n
+
+ case ODDD:
+ break
+
+ // types (ODEREF is with exprs)
+ case OTYPE:
+ ok |= ctxType
+
+ if n.Type == nil {
+ return n
+ }
+
+ case OTARRAY:
+ ok |= ctxType
+ r := typecheck(n.Right, ctxType)
+ if r.Type == nil {
+ n.Type = nil
+ return n
+ }
+
+ var t *types.Type
+ if n.Left == nil {
+ t = types.NewSlice(r.Type)
+ } else if n.Left.Op == ODDD {
+ if !n.Diag() {
+ n.SetDiag(true)
+ yyerror("use of [...] array outside of array literal")
+ }
+ n.Type = nil
+ return n
+ } else {
+ n.Left = indexlit(typecheck(n.Left, ctxExpr))
+ l := n.Left
+ if consttype(l) != CTINT {
+ switch {
+ case l.Type == nil:
+ // Error already reported elsewhere.
+ case l.Type.IsInteger() && l.Op != OLITERAL:
+ yyerror("non-constant array bound %v", l)
+ default:
+ yyerror("invalid array bound %v", l)
+ }
+ n.Type = nil
+ return n
+ }
+
+ v := l.Val()
+ if doesoverflow(v, types.Types[TINT]) {
+ yyerror("array bound is too large")
+ n.Type = nil
+ return n
+ }
+
+ bound := v.U.(*Mpint).Int64()
+ if bound < 0 {
+ yyerror("array bound must be non-negative")
+ n.Type = nil
+ return n
+ }
+ t = types.NewArray(r.Type, bound)
+ }
+
+ setTypeNode(n, t)
+ n.Left = nil
+ n.Right = nil
+ checkwidth(t)
+
+ case OTMAP:
+ ok |= ctxType
+ n.Left = typecheck(n.Left, ctxType)
+ n.Right = typecheck(n.Right, ctxType)
+ l := n.Left
+ r := n.Right
+ if l.Type == nil || r.Type == nil {
+ n.Type = nil
+ return n
+ }
+ if l.Type.NotInHeap() {
+ yyerror("incomplete (or unallocatable) map key not allowed")
+ }
+ if r.Type.NotInHeap() {
+ yyerror("incomplete (or unallocatable) map value not allowed")
+ }
+
+ setTypeNode(n, types.NewMap(l.Type, r.Type))
+ mapqueue = append(mapqueue, n) // check map keys when all types are settled
+ n.Left = nil
+ n.Right = nil
+
+ case OTCHAN:
+ ok |= ctxType
+ n.Left = typecheck(n.Left, ctxType)
+ l := n.Left
+ if l.Type == nil {
+ n.Type = nil
+ return n
+ }
+ if l.Type.NotInHeap() {
+ yyerror("chan of incomplete (or unallocatable) type not allowed")
+ }
+
+ setTypeNode(n, types.NewChan(l.Type, n.TChanDir()))
+ n.Left = nil
+ n.ResetAux()
+
+ case OTSTRUCT:
+ ok |= ctxType
+ setTypeNode(n, tostruct(n.List.Slice()))
+ n.List.Set(nil)
+
+ case OTINTER:
+ ok |= ctxType
+ setTypeNode(n, tointerface(n.List.Slice()))
+
+ case OTFUNC:
+ ok |= ctxType
+ setTypeNode(n, functype(n.Left, n.List.Slice(), n.Rlist.Slice()))
+ n.Left = nil
+ n.List.Set(nil)
+ n.Rlist.Set(nil)
+
+ // type or expr
+ case ODEREF:
+ n.Left = typecheck(n.Left, ctxExpr|ctxType)
+ l := n.Left
+ t := l.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+ if l.Op == OTYPE {
+ ok |= ctxType
+ setTypeNode(n, types.NewPtr(l.Type))
+ n.Left = nil
+ // Ensure l.Type gets dowidth'd for the backend. Issue 20174.
+ checkwidth(l.Type)
+ break
+ }
+
+ if !t.IsPtr() {
+ if top&(ctxExpr|ctxStmt) != 0 {
+ yyerror("invalid indirect of %L", n.Left)
+ n.Type = nil
+ return n
+ }
+
+ break
+ }
+
+ ok |= ctxExpr
+ n.Type = t.Elem()
+
+ // arithmetic exprs
+ case OASOP,
+ OADD,
+ OAND,
+ OANDAND,
+ OANDNOT,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLT,
+ OLSH,
+ ORSH,
+ OMOD,
+ OMUL,
+ ONE,
+ OOR,
+ OOROR,
+ OSUB,
+ OXOR:
+ var l *Node
+ var op Op
+ var r *Node
+ if n.Op == OASOP {
+ ok |= ctxStmt
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Right = typecheck(n.Right, ctxExpr)
+ l = n.Left
+ r = n.Right
+ checkassign(n, n.Left)
+ if l.Type == nil || r.Type == nil {
+ n.Type = nil
+ return n
+ }
+ if n.Implicit() && !okforarith[l.Type.Etype] {
+ yyerror("invalid operation: %v (non-numeric type %v)", n, l.Type)
+ n.Type = nil
+ return n
+ }
+ // TODO(marvin): Fix Node.EType type union.
+ op = n.SubOp()
+ } else {
+ ok |= ctxExpr
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Right = typecheck(n.Right, ctxExpr)
+ l = n.Left
+ r = n.Right
+ if l.Type == nil || r.Type == nil {
+ n.Type = nil
+ return n
+ }
+ op = n.Op
+ }
+ if op == OLSH || op == ORSH {
+ r = defaultlit(r, types.Types[TUINT])
+ n.Right = r
+ t := r.Type
+ if !t.IsInteger() {
+ yyerror("invalid operation: %v (shift count type %v, must be integer)", n, r.Type)
+ n.Type = nil
+ return n
+ }
+ if t.IsSigned() && !langSupported(1, 13, curpkg()) {
+ yyerrorv("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type)
+ n.Type = nil
+ return n
+ }
+ t = l.Type
+ if t != nil && t.Etype != TIDEAL && !t.IsInteger() {
+ yyerror("invalid operation: %v (shift of type %v)", n, t)
+ n.Type = nil
+ return n
+ }
+
+ // no defaultlit for left
+ // the outer context gives the type
+ n.Type = l.Type
+ if (l.Type == types.UntypedFloat || l.Type == types.UntypedComplex) && r.Op == OLITERAL {
+ n.Type = types.UntypedInt
+ }
+
+ break
+ }
+
+ // For "x == x && len(s)", it's better to report that "len(s)" (type int)
+ // can't be used with "&&" than to report that "x == x" (type untyped bool)
+ // can't be converted to int (see issue #41500).
+ if n.Op == OANDAND || n.Op == OOROR {
+ if !n.Left.Type.IsBoolean() {
+ yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Left.Type))
+ n.Type = nil
+ return n
+ }
+ if !n.Right.Type.IsBoolean() {
+ yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Right.Type))
+ n.Type = nil
+ return n
+ }
+ }
+
+ // ideal mixed with non-ideal
+ l, r = defaultlit2(l, r, false)
+
+ n.Left = l
+ n.Right = r
+ if l.Type == nil || r.Type == nil {
+ n.Type = nil
+ return n
+ }
+ t := l.Type
+ if t.Etype == TIDEAL {
+ t = r.Type
+ }
+ et := t.Etype
+ if et == TIDEAL {
+ et = TINT
+ }
+ aop := OXXX
+ if iscmp[n.Op] && t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) {
+ // comparison is okay as long as one side is
+ // assignable to the other. convert so they have
+ // the same type.
+ //
+ // the only conversion that isn't a no-op is concrete == interface.
+ // in that case, check comparability of the concrete type.
+ // The conversion allocates, so only do it if the concrete type is huge.
+ converted := false
+ if r.Type.Etype != TBLANK {
+ aop, _ = assignop(l.Type, r.Type)
+ if aop != OXXX {
+ if r.Type.IsInterface() && !l.Type.IsInterface() && !IsComparable(l.Type) {
+ yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type))
+ n.Type = nil
+ return n
+ }
+
+ dowidth(l.Type)
+ if r.Type.IsInterface() == l.Type.IsInterface() || l.Type.Width >= 1<<16 {
+ l = nod(aop, l, nil)
+ l.Type = r.Type
+ l.SetTypecheck(1)
+ n.Left = l
+ }
+
+ t = r.Type
+ converted = true
+ }
+ }
+
+ if !converted && l.Type.Etype != TBLANK {
+ aop, _ = assignop(r.Type, l.Type)
+ if aop != OXXX {
+ if l.Type.IsInterface() && !r.Type.IsInterface() && !IsComparable(r.Type) {
+ yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type))
+ n.Type = nil
+ return n
+ }
+
+ dowidth(r.Type)
+ if r.Type.IsInterface() == l.Type.IsInterface() || r.Type.Width >= 1<<16 {
+ r = nod(aop, r, nil)
+ r.Type = l.Type
+ r.SetTypecheck(1)
+ n.Right = r
+ }
+
+ t = l.Type
+ }
+ }
+
+ et = t.Etype
+ }
+
+ if t.Etype != TIDEAL && !types.Identical(l.Type, r.Type) {
+ l, r = defaultlit2(l, r, true)
+ if l.Type == nil || r.Type == nil {
+ n.Type = nil
+ return n
+ }
+ if l.Type.IsInterface() == r.Type.IsInterface() || aop == 0 {
+ yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
+ n.Type = nil
+ return n
+ }
+ }
+
+ if t.Etype == TIDEAL {
+ t = mixUntyped(l.Type, r.Type)
+ }
+ if dt := defaultType(t); !okfor[op][dt.Etype] {
+ yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
+ n.Type = nil
+ return n
+ }
+
+ // okfor allows any array == array, map == map, func == func.
+ // restrict to slice/map/func == nil and nil == slice/map/func.
+ if l.Type.IsArray() && !IsComparable(l.Type) {
+ yyerror("invalid operation: %v (%v cannot be compared)", n, l.Type)
+ n.Type = nil
+ return n
+ }
+
+ if l.Type.IsSlice() && !l.isNil() && !r.isNil() {
+ yyerror("invalid operation: %v (slice can only be compared to nil)", n)
+ n.Type = nil
+ return n
+ }
+
+ if l.Type.IsMap() && !l.isNil() && !r.isNil() {
+ yyerror("invalid operation: %v (map can only be compared to nil)", n)
+ n.Type = nil
+ return n
+ }
+
+ if l.Type.Etype == TFUNC && !l.isNil() && !r.isNil() {
+ yyerror("invalid operation: %v (func can only be compared to nil)", n)
+ n.Type = nil
+ return n
+ }
+
+ if l.Type.IsStruct() {
+ if f := IncomparableField(l.Type); f != nil {
+ yyerror("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
+ n.Type = nil
+ return n
+ }
+ }
+
+ if iscmp[n.Op] {
+ evconst(n)
+ t = types.UntypedBool
+ if n.Op != OLITERAL {
+ l, r = defaultlit2(l, r, true)
+ n.Left = l
+ n.Right = r
+ }
+ }
+
+ if et == TSTRING && n.Op == OADD {
+ // create OADDSTR node with list of strings in x + y + z + (w + v) + ...
+ n.Op = OADDSTR
+
+ if l.Op == OADDSTR {
+ n.List.Set(l.List.Slice())
+ } else {
+ n.List.Set1(l)
+ }
+ if r.Op == OADDSTR {
+ n.List.AppendNodes(&r.List)
+ } else {
+ n.List.Append(r)
+ }
+ n.Left = nil
+ n.Right = nil
+ }
+
+ if (op == ODIV || op == OMOD) && Isconst(r, CTINT) {
+ if r.Val().U.(*Mpint).CmpInt64(0) == 0 {
+ yyerror("division by zero")
+ n.Type = nil
+ return n
+ }
+ }
+
+ n.Type = t
+
+ case OBITNOT, ONEG, ONOT, OPLUS:
+ ok |= ctxExpr
+ n.Left = typecheck(n.Left, ctxExpr)
+ l := n.Left
+ t := l.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+ if !okfor[n.Op][defaultType(t).Etype] {
+ yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(t))
+ n.Type = nil
+ return n
+ }
+
+ n.Type = t
+
+ // exprs
+ case OADDR:
+ ok |= ctxExpr
+
+ n.Left = typecheck(n.Left, ctxExpr)
+ if n.Left.Type == nil {
+ n.Type = nil
+ return n
+ }
+
+ switch n.Left.Op {
+ case OARRAYLIT, OMAPLIT, OSLICELIT, OSTRUCTLIT:
+ n.Op = OPTRLIT
+
+ default:
+ checklvalue(n.Left, "take the address of")
+ r := outervalue(n.Left)
+ if r.Op == ONAME {
+ if r.Orig != r {
+ Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
+ }
+ r.Name.SetAddrtaken(true)
+ if r.Name.IsClosureVar() && !capturevarscomplete {
+ // Mark the original variable as Addrtaken so that capturevars
+ // knows not to pass it by value.
+ // But if the capturevars phase is complete, don't touch it,
+ // in case l.Name's containing function has not yet been compiled.
+ r.Name.Defn.Name.SetAddrtaken(true)
+ }
+ }
+ n.Left = defaultlit(n.Left, nil)
+ if n.Left.Type == nil {
+ n.Type = nil
+ return n
+ }
+ }
+
+ n.Type = types.NewPtr(n.Left.Type)
+
+ case OCOMPLIT:
+ ok |= ctxExpr
+ n = typecheckcomplit(n)
+ if n.Type == nil {
+ return n
+ }
+
+ case OXDOT, ODOT:
+ if n.Op == OXDOT {
+ n = adddot(n)
+ n.Op = ODOT
+ if n.Left == nil {
+ n.Type = nil
+ return n
+ }
+ }
+
+ n.Left = typecheck(n.Left, ctxExpr|ctxType)
+
+ n.Left = defaultlit(n.Left, nil)
+
+ t := n.Left.Type
+ if t == nil {
+ adderrorname(n)
+ n.Type = nil
+ return n
+ }
+
+ s := n.Sym
+
+ if n.Left.Op == OTYPE {
+ n = typecheckMethodExpr(n)
+ if n.Type == nil {
+ return n
+ }
+ ok = ctxExpr
+ break
+ }
+
+ if t.IsPtr() && !t.Elem().IsInterface() {
+ t = t.Elem()
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+ n.Op = ODOTPTR
+ checkwidth(t)
+ }
+
+ if n.Sym.IsBlank() {
+ yyerror("cannot refer to blank field or method")
+ n.Type = nil
+ return n
+ }
+
+ if lookdot(n, t, 0) == nil {
+ // Legitimate field or method lookup failed, try to explain the error
+ switch {
+ case t.IsEmptyInterface():
+ yyerror("%v undefined (type %v is interface with no methods)", n, n.Left.Type)
+
+ case t.IsPtr() && t.Elem().IsInterface():
+ // Pointer to interface is almost always a mistake.
+ yyerror("%v undefined (type %v is pointer to interface, not interface)", n, n.Left.Type)
+
+ case lookdot(n, t, 1) != nil:
+ // Field or method matches by name, but it is not exported.
+ yyerror("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym)
+
+ default:
+ if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup.
+ yyerror("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left.Type, n.Sym, mt.Sym)
+ } else {
+ yyerror("%v undefined (type %v has no field or method %v)", n, n.Left.Type, n.Sym)
+ }
+ }
+ n.Type = nil
+ return n
+ }
+
+ switch n.Op {
+ case ODOTINTER, ODOTMETH:
+ if top&ctxCallee != 0 {
+ ok |= ctxCallee
+ } else {
+ typecheckpartialcall(n, s)
+ ok |= ctxExpr
+ }
+
+ default:
+ ok |= ctxExpr
+ }
+
+ case ODOTTYPE:
+ ok |= ctxExpr
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Left = defaultlit(n.Left, nil)
+ l := n.Left
+ t := l.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+ if !t.IsInterface() {
+ yyerror("invalid type assertion: %v (non-interface type %v on left)", n, t)
+ n.Type = nil
+ return n
+ }
+
+ if n.Right != nil {
+ n.Right = typecheck(n.Right, ctxType)
+ n.Type = n.Right.Type
+ n.Right = nil
+ if n.Type == nil {
+ return n
+ }
+ }
+
+ if n.Type != nil && !n.Type.IsInterface() {
+ var missing, have *types.Field
+ var ptr int
+ if !implements(n.Type, t, &missing, &have, &ptr) {
+ if have != nil && have.Sym == missing.Sym {
+ yyerror("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+
+ "\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else if ptr != 0 {
+ yyerror("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type, t, missing.Sym)
+ } else if have != nil {
+ yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+
+ "\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else {
+ yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type, t, missing.Sym)
+ }
+ n.Type = nil
+ return n
+ }
+ }
+
+ case OINDEX:
+ ok |= ctxExpr
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Left = defaultlit(n.Left, nil)
+ n.Left = implicitstar(n.Left)
+ l := n.Left
+ n.Right = typecheck(n.Right, ctxExpr)
+ r := n.Right
+ t := l.Type
+ if t == nil || r.Type == nil {
+ n.Type = nil
+ return n
+ }
+ switch t.Etype {
+ default:
+ yyerror("invalid operation: %v (type %v does not support indexing)", n, t)
+ n.Type = nil
+ return n
+
+ case TSTRING, TARRAY, TSLICE:
+ n.Right = indexlit(n.Right)
+ if t.IsString() {
+ n.Type = types.Bytetype
+ } else {
+ n.Type = t.Elem()
+ }
+ why := "string"
+ if t.IsArray() {
+ why = "array"
+ } else if t.IsSlice() {
+ why = "slice"
+ }
+
+ if n.Right.Type != nil && !n.Right.Type.IsInteger() {
+ yyerror("non-integer %s index %v", why, n.Right)
+ break
+ }
+
+ if !n.Bounded() && Isconst(n.Right, CTINT) {
+ x := n.Right.Int64Val()
+ if x < 0 {
+ yyerror("invalid %s index %v (index must be non-negative)", why, n.Right)
+ } else if t.IsArray() && x >= t.NumElem() {
+ yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem())
+ } else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.StringVal())) {
+ yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.StringVal()))
+ } else if n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
+ yyerror("invalid %s index %v (index too large)", why, n.Right)
+ }
+ }
+
+ case TMAP:
+ n.Right = assignconv(n.Right, t.Key(), "map index")
+ n.Type = t.Elem()
+ n.Op = OINDEXMAP
+ n.ResetAux()
+ }
+
+ case ORECV:
+ ok |= ctxStmt | ctxExpr
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Left = defaultlit(n.Left, nil)
+ l := n.Left
+ t := l.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+ if !t.IsChan() {
+ yyerror("invalid operation: %v (receive from non-chan type %v)", n, t)
+ n.Type = nil
+ return n
+ }
+
+ if !t.ChanDir().CanRecv() {
+ yyerror("invalid operation: %v (receive from send-only type %v)", n, t)
+ n.Type = nil
+ return n
+ }
+
+ n.Type = t.Elem()
+
+ case OSEND:
+ ok |= ctxStmt
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Right = typecheck(n.Right, ctxExpr)
+ n.Left = defaultlit(n.Left, nil)
+ t := n.Left.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+ if !t.IsChan() {
+ yyerror("invalid operation: %v (send to non-chan type %v)", n, t)
+ n.Type = nil
+ return n
+ }
+
+ if !t.ChanDir().CanSend() {
+ yyerror("invalid operation: %v (send to receive-only type %v)", n, t)
+ n.Type = nil
+ return n
+ }
+
+ n.Right = assignconv(n.Right, t.Elem(), "send")
+ if n.Right.Type == nil {
+ n.Type = nil
+ return n
+ }
+ n.Type = nil
+
+ case OSLICEHEADER:
+ // Errors here are Fatalf instead of yyerror because only the compiler
+ // can construct an OSLICEHEADER node.
+ // Components used in OSLICEHEADER that are supplied by parsed source code
+ // have already been typechecked in e.g. OMAKESLICE earlier.
+ ok |= ctxExpr
+
+ t := n.Type
+ if t == nil {
+ Fatalf("no type specified for OSLICEHEADER")
+ }
+
+ if !t.IsSlice() {
+ Fatalf("invalid type %v for OSLICEHEADER", n.Type)
+ }
+
+ if n.Left == nil || n.Left.Type == nil || !n.Left.Type.IsUnsafePtr() {
+ Fatalf("need unsafe.Pointer for OSLICEHEADER")
+ }
+
+ if x := n.List.Len(); x != 2 {
+ Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x)
+ }
+
+ n.Left = typecheck(n.Left, ctxExpr)
+ l := typecheck(n.List.First(), ctxExpr)
+ c := typecheck(n.List.Second(), ctxExpr)
+ l = defaultlit(l, types.Types[TINT])
+ c = defaultlit(c, types.Types[TINT])
+
+ if Isconst(l, CTINT) && l.Int64Val() < 0 {
+ Fatalf("len for OSLICEHEADER must be non-negative")
+ }
+
+ if Isconst(c, CTINT) && c.Int64Val() < 0 {
+ Fatalf("cap for OSLICEHEADER must be non-negative")
+ }
+
+ if Isconst(l, CTINT) && Isconst(c, CTINT) && l.Val().U.(*Mpint).Cmp(c.Val().U.(*Mpint)) > 0 {
+ Fatalf("len larger than cap for OSLICEHEADER")
+ }
+
+ n.List.SetFirst(l)
+ n.List.SetSecond(c)
+
+ case OMAKESLICECOPY:
+ // Errors here are Fatalf instead of yyerror because only the compiler
+ // can construct an OMAKESLICECOPY node.
+ // Components used in OMAKESCLICECOPY that are supplied by parsed source code
+ // have already been typechecked in OMAKE and OCOPY earlier.
+ ok |= ctxExpr
+
+ t := n.Type
+
+ if t == nil {
+ Fatalf("no type specified for OMAKESLICECOPY")
+ }
+
+ if !t.IsSlice() {
+ Fatalf("invalid type %v for OMAKESLICECOPY", n.Type)
+ }
+
+ if n.Left == nil {
+ Fatalf("missing len argument for OMAKESLICECOPY")
+ }
+
+ if n.Right == nil {
+ Fatalf("missing slice argument to copy for OMAKESLICECOPY")
+ }
+
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Right = typecheck(n.Right, ctxExpr)
+
+ n.Left = defaultlit(n.Left, types.Types[TINT])
+
+ if !n.Left.Type.IsInteger() && n.Type.Etype != TIDEAL {
+ yyerror("non-integer len argument in OMAKESLICECOPY")
+ }
+
+ if Isconst(n.Left, CTINT) {
+ if n.Left.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
+ Fatalf("len for OMAKESLICECOPY too large")
+ }
+ if n.Left.Int64Val() < 0 {
+ Fatalf("len for OMAKESLICECOPY must be non-negative")
+ }
+ }
+
+ case OSLICE, OSLICE3:
+ ok |= ctxExpr
+ n.Left = typecheck(n.Left, ctxExpr)
+ low, high, max := n.SliceBounds()
+ hasmax := n.Op.IsSlice3()
+ low = typecheck(low, ctxExpr)
+ high = typecheck(high, ctxExpr)
+ max = typecheck(max, ctxExpr)
+ n.Left = defaultlit(n.Left, nil)
+ low = indexlit(low)
+ high = indexlit(high)
+ max = indexlit(max)
+ n.SetSliceBounds(low, high, max)
+ l := n.Left
+ if l.Type == nil {
+ n.Type = nil
+ return n
+ }
+ if l.Type.IsArray() {
+ if !islvalue(n.Left) {
+ yyerror("invalid operation %v (slice of unaddressable value)", n)
+ n.Type = nil
+ return n
+ }
+
+ n.Left = nod(OADDR, n.Left, nil)
+ n.Left.SetImplicit(true)
+ n.Left = typecheck(n.Left, ctxExpr)
+ l = n.Left
+ }
+ t := l.Type
+ var tp *types.Type
+ if t.IsString() {
+ if hasmax {
+ yyerror("invalid operation %v (3-index slice of string)", n)
+ n.Type = nil
+ return n
+ }
+ n.Type = t
+ n.Op = OSLICESTR
+ } else if t.IsPtr() && t.Elem().IsArray() {
+ tp = t.Elem()
+ n.Type = types.NewSlice(tp.Elem())
+ dowidth(n.Type)
+ if hasmax {
+ n.Op = OSLICE3ARR
+ } else {
+ n.Op = OSLICEARR
+ }
+ } else if t.IsSlice() {
+ n.Type = t
+ } else {
+ yyerror("cannot slice %v (type %v)", l, t)
+ n.Type = nil
+ return n
+ }
+
+ if low != nil && !checksliceindex(l, low, tp) {
+ n.Type = nil
+ return n
+ }
+ if high != nil && !checksliceindex(l, high, tp) {
+ n.Type = nil
+ return n
+ }
+ if max != nil && !checksliceindex(l, max, tp) {
+ n.Type = nil
+ return n
+ }
+ if !checksliceconst(low, high) || !checksliceconst(low, max) || !checksliceconst(high, max) {
+ n.Type = nil
+ return n
+ }
+
+ // call and call like
+ case OCALL:
+ typecheckslice(n.Ninit.Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907)
+ n.Left = typecheck(n.Left, ctxExpr|ctxType|ctxCallee)
+ if n.Left.Diag() {
+ n.SetDiag(true)
+ }
+
+ l := n.Left
+
+ if l.Op == ONAME && l.SubOp() != 0 {
+ if n.IsDDD() && l.SubOp() != OAPPEND {
+ yyerror("invalid use of ... with builtin %v", l)
+ }
+
+ // builtin: OLEN, OCAP, etc.
+ n.Op = l.SubOp()
+ n.Left = n.Right
+ n.Right = nil
+ n = typecheck1(n, top)
+ return n
+ }
+
+ n.Left = defaultlit(n.Left, nil)
+ l = n.Left
+ if l.Op == OTYPE {
+ if n.IsDDD() {
+ if !l.Type.Broke() {
+ yyerror("invalid use of ... in type conversion to %v", l.Type)
+ }
+ n.SetDiag(true)
+ }
+
+ // pick off before type-checking arguments
+ ok |= ctxExpr
+
+ // turn CALL(type, arg) into CONV(arg) w/ type
+ n.Left = nil
+
+ n.Op = OCONV
+ n.Type = l.Type
+ if !onearg(n, "conversion to %v", l.Type) {
+ n.Type = nil
+ return n
+ }
+ n = typecheck1(n, top)
+ return n
+ }
+
+ typecheckargs(n)
+ t := l.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+ checkwidth(t)
+
+ switch l.Op {
+ case ODOTINTER:
+ n.Op = OCALLINTER
+
+ case ODOTMETH:
+ n.Op = OCALLMETH
+
+ // typecheckaste was used here but there wasn't enough
+ // information further down the call chain to know if we
+ // were testing a method receiver for unexported fields.
+ // It isn't necessary, so just do a sanity check.
+ tp := t.Recv().Type
+
+ if l.Left == nil || !types.Identical(l.Left.Type, tp) {
+ Fatalf("method receiver")
+ }
+
+ default:
+ n.Op = OCALLFUNC
+ if t.Etype != TFUNC {
+ name := l.String()
+ if isBuiltinFuncName(name) && l.Name.Defn != nil {
+ // be more specific when the function
+ // name matches a predeclared function
+ yyerror("cannot call non-function %s (type %v), declared at %s",
+ name, t, linestr(l.Name.Defn.Pos))
+ } else {
+ yyerror("cannot call non-function %s (type %v)", name, t)
+ }
+ n.Type = nil
+ return n
+ }
+ }
+
+ typecheckaste(OCALL, n.Left, n.IsDDD(), t.Params(), n.List, func() string { return fmt.Sprintf("argument to %v", n.Left) })
+ ok |= ctxStmt
+ if t.NumResults() == 0 {
+ break
+ }
+ ok |= ctxExpr
+ if t.NumResults() == 1 {
+ n.Type = l.Type.Results().Field(0).Type
+
+ if n.Op == OCALLFUNC && n.Left.Op == ONAME && isRuntimePkg(n.Left.Sym.Pkg) && n.Left.Sym.Name == "getg" {
+ // Emit code for runtime.getg() directly instead of calling function.
+ // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
+ // so that the ordering pass can make sure to preserve the semantics of the original code
+ // (in particular, the exact time of the function call) by introducing temporaries.
+ // In this case, we know getg() always returns the same result within a given function
+ // and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
+ n.Op = OGETG
+ }
+
+ break
+ }
+
+ // multiple return
+ if top&(ctxMultiOK|ctxStmt) == 0 {
+ yyerror("multiple-value %v() in single-value context", l)
+ break
+ }
+
+ n.Type = l.Type.Results()
+
+ case OALIGNOF, OOFFSETOF, OSIZEOF:
+ ok |= ctxExpr
+ if !onearg(n, "%v", n.Op) {
+ n.Type = nil
+ return n
+ }
+ n.Type = types.Types[TUINTPTR]
+
+ case OCAP, OLEN:
+ ok |= ctxExpr
+ if !onearg(n, "%v", n.Op) {
+ n.Type = nil
+ return n
+ }
+
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Left = defaultlit(n.Left, nil)
+ n.Left = implicitstar(n.Left)
+ l := n.Left
+ t := l.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+
+ var ok bool
+ if n.Op == OLEN {
+ ok = okforlen[t.Etype]
+ } else {
+ ok = okforcap[t.Etype]
+ }
+ if !ok {
+ yyerror("invalid argument %L for %v", l, n.Op)
+ n.Type = nil
+ return n
+ }
+
+ n.Type = types.Types[TINT]
+
+ case OREAL, OIMAG:
+ ok |= ctxExpr
+ if !onearg(n, "%v", n.Op) {
+ n.Type = nil
+ return n
+ }
+
+ n.Left = typecheck(n.Left, ctxExpr)
+ l := n.Left
+ t := l.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+
+ // Determine result type.
+ switch t.Etype {
+ case TIDEAL:
+ n.Type = types.UntypedFloat
+ case TCOMPLEX64:
+ n.Type = types.Types[TFLOAT32]
+ case TCOMPLEX128:
+ n.Type = types.Types[TFLOAT64]
+ default:
+ yyerror("invalid argument %L for %v", l, n.Op)
+ n.Type = nil
+ return n
+ }
+
+ case OCOMPLEX:
+ ok |= ctxExpr
+ typecheckargs(n)
+ if !twoarg(n) {
+ n.Type = nil
+ return n
+ }
+ l := n.Left
+ r := n.Right
+ if l.Type == nil || r.Type == nil {
+ n.Type = nil
+ return n
+ }
+ l, r = defaultlit2(l, r, false)
+ if l.Type == nil || r.Type == nil {
+ n.Type = nil
+ return n
+ }
+ n.Left = l
+ n.Right = r
+
+ if !types.Identical(l.Type, r.Type) {
+ yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
+ n.Type = nil
+ return n
+ }
+
+ var t *types.Type
+ switch l.Type.Etype {
+ default:
+ yyerror("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type)
+ n.Type = nil
+ return n
+
+ case TIDEAL:
+ t = types.UntypedComplex
+
+ case TFLOAT32:
+ t = types.Types[TCOMPLEX64]
+
+ case TFLOAT64:
+ t = types.Types[TCOMPLEX128]
+ }
+ n.Type = t
+
+ case OCLOSE:
+ if !onearg(n, "%v", n.Op) {
+ n.Type = nil
+ return n
+ }
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Left = defaultlit(n.Left, nil)
+ l := n.Left
+ t := l.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+ if !t.IsChan() {
+ yyerror("invalid operation: %v (non-chan type %v)", n, t)
+ n.Type = nil
+ return n
+ }
+
+ if !t.ChanDir().CanSend() {
+ yyerror("invalid operation: %v (cannot close receive-only channel)", n)
+ n.Type = nil
+ return n
+ }
+
+ ok |= ctxStmt
+
+ case ODELETE:
+ ok |= ctxStmt
+ typecheckargs(n)
+ args := n.List
+ if args.Len() == 0 {
+ yyerror("missing arguments to delete")
+ n.Type = nil
+ return n
+ }
+
+ if args.Len() == 1 {
+ yyerror("missing second (key) argument to delete")
+ n.Type = nil
+ return n
+ }
+
+ if args.Len() != 2 {
+ yyerror("too many arguments to delete")
+ n.Type = nil
+ return n
+ }
+
+ l := args.First()
+ r := args.Second()
+ if l.Type != nil && !l.Type.IsMap() {
+ yyerror("first argument to delete must be map; have %L", l.Type)
+ n.Type = nil
+ return n
+ }
+
+ args.SetSecond(assignconv(r, l.Type.Key(), "delete"))
+
+ case OAPPEND:
+ ok |= ctxExpr
+ typecheckargs(n)
+ args := n.List
+ if args.Len() == 0 {
+ yyerror("missing arguments to append")
+ n.Type = nil
+ return n
+ }
+
+ t := args.First().Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+
+ n.Type = t
+ if !t.IsSlice() {
+ if Isconst(args.First(), CTNIL) {
+ yyerror("first argument to append must be typed slice; have untyped nil")
+ n.Type = nil
+ return n
+ }
+
+ yyerror("first argument to append must be slice; have %L", t)
+ n.Type = nil
+ return n
+ }
+
+ if n.IsDDD() {
+ if args.Len() == 1 {
+ yyerror("cannot use ... on first argument to append")
+ n.Type = nil
+ return n
+ }
+
+ if args.Len() != 2 {
+ yyerror("too many arguments to append")
+ n.Type = nil
+ return n
+ }
+
+ if t.Elem().IsKind(TUINT8) && args.Second().Type.IsString() {
+ args.SetSecond(defaultlit(args.Second(), types.Types[TSTRING]))
+ break
+ }
+
+ args.SetSecond(assignconv(args.Second(), t.Orig, "append"))
+ break
+ }
+
+ as := args.Slice()[1:]
+ for i, n := range as {
+ if n.Type == nil {
+ continue
+ }
+ as[i] = assignconv(n, t.Elem(), "append")
+ checkwidth(as[i].Type) // ensure width is calculated for backend
+ }
+
+ case OCOPY:
+ ok |= ctxStmt | ctxExpr
+ typecheckargs(n)
+ if !twoarg(n) {
+ n.Type = nil
+ return n
+ }
+ n.Type = types.Types[TINT]
+ if n.Left.Type == nil || n.Right.Type == nil {
+ n.Type = nil
+ return n
+ }
+ n.Left = defaultlit(n.Left, nil)
+ n.Right = defaultlit(n.Right, nil)
+ if n.Left.Type == nil || n.Right.Type == nil {
+ n.Type = nil
+ return n
+ }
+
+ // copy([]byte, string)
+ if n.Left.Type.IsSlice() && n.Right.Type.IsString() {
+ if types.Identical(n.Left.Type.Elem(), types.Bytetype) {
+ break
+ }
+ yyerror("arguments to copy have different element types: %L and string", n.Left.Type)
+ n.Type = nil
+ return n
+ }
+
+ if !n.Left.Type.IsSlice() || !n.Right.Type.IsSlice() {
+ if !n.Left.Type.IsSlice() && !n.Right.Type.IsSlice() {
+ yyerror("arguments to copy must be slices; have %L, %L", n.Left.Type, n.Right.Type)
+ } else if !n.Left.Type.IsSlice() {
+ yyerror("first argument to copy should be slice; have %L", n.Left.Type)
+ } else {
+ yyerror("second argument to copy should be slice or string; have %L", n.Right.Type)
+ }
+ n.Type = nil
+ return n
+ }
+
+ if !types.Identical(n.Left.Type.Elem(), n.Right.Type.Elem()) {
+ yyerror("arguments to copy have different element types: %L and %L", n.Left.Type, n.Right.Type)
+ n.Type = nil
+ return n
+ }
+
+ case OCONV:
+ ok |= ctxExpr
+ checkwidth(n.Type) // ensure width is calculated for backend
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Left = convlit1(n.Left, n.Type, true, nil)
+ t := n.Left.Type
+ if t == nil || n.Type == nil {
+ n.Type = nil
+ return n
+ }
+ var why string
+ n.Op, why = convertop(n.Left.Op == OLITERAL, t, n.Type)
+ if n.Op == OXXX {
+ if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() {
+ yyerror("cannot convert %L to type %v%s", n.Left, n.Type, why)
+ n.SetDiag(true)
+ }
+ n.Op = OCONV
+ n.Type = nil
+ return n
+ }
+
+ switch n.Op {
+ case OCONVNOP:
+ if t.Etype == n.Type.Etype {
+ switch t.Etype {
+ case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128:
+ // Floating point casts imply rounding and
+ // so the conversion must be kept.
+ n.Op = OCONV
+ }
+ }
+
+ // do not convert to []byte literal. See CL 125796.
+ // generated code and compiler memory footprint is better without it.
+ case OSTR2BYTES:
+ break
+
+ case OSTR2RUNES:
+ if n.Left.Op == OLITERAL {
+ n = stringtoruneslit(n)
+ }
+ }
+
+ case OMAKE:
+ ok |= ctxExpr
+ args := n.List.Slice()
+ if len(args) == 0 {
+ yyerror("missing argument to make")
+ n.Type = nil
+ return n
+ }
+
+ n.List.Set(nil)
+ l := args[0]
+ l = typecheck(l, ctxType)
+ t := l.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+
+ i := 1
+ switch t.Etype {
+ default:
+ yyerror("cannot make type %v", t)
+ n.Type = nil
+ return n
+
+ case TSLICE:
+ if i >= len(args) {
+ yyerror("missing len argument to make(%v)", t)
+ n.Type = nil
+ return n
+ }
+
+ l = args[i]
+ i++
+ l = typecheck(l, ctxExpr)
+ var r *Node
+ if i < len(args) {
+ r = args[i]
+ i++
+ r = typecheck(r, ctxExpr)
+ }
+
+ if l.Type == nil || (r != nil && r.Type == nil) {
+ n.Type = nil
+ return n
+ }
+ if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) {
+ n.Type = nil
+ return n
+ }
+ if Isconst(l, CTINT) && r != nil && Isconst(r, CTINT) && l.Val().U.(*Mpint).Cmp(r.Val().U.(*Mpint)) > 0 {
+ yyerror("len larger than cap in make(%v)", t)
+ n.Type = nil
+ return n
+ }
+
+ n.Left = l
+ n.Right = r
+ n.Op = OMAKESLICE
+
+ case TMAP:
+ if i < len(args) {
+ l = args[i]
+ i++
+ l = typecheck(l, ctxExpr)
+ l = defaultlit(l, types.Types[TINT])
+ if l.Type == nil {
+ n.Type = nil
+ return n
+ }
+ if !checkmake(t, "size", &l) {
+ n.Type = nil
+ return n
+ }
+ n.Left = l
+ } else {
+ n.Left = nodintconst(0)
+ }
+ n.Op = OMAKEMAP
+
+ case TCHAN:
+ l = nil
+ if i < len(args) {
+ l = args[i]
+ i++
+ l = typecheck(l, ctxExpr)
+ l = defaultlit(l, types.Types[TINT])
+ if l.Type == nil {
+ n.Type = nil
+ return n
+ }
+ if !checkmake(t, "buffer", &l) {
+ n.Type = nil
+ return n
+ }
+ n.Left = l
+ } else {
+ n.Left = nodintconst(0)
+ }
+ n.Op = OMAKECHAN
+ }
+
+ if i < len(args) {
+ yyerror("too many arguments to make(%v)", t)
+ n.Op = OMAKE
+ n.Type = nil
+ return n
+ }
+
+ n.Type = t
+
+ case ONEW:
+ ok |= ctxExpr
+ args := n.List
+ if args.Len() == 0 {
+ yyerror("missing argument to new")
+ n.Type = nil
+ return n
+ }
+
+ l := args.First()
+ l = typecheck(l, ctxType)
+ t := l.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+ if args.Len() > 1 {
+ yyerror("too many arguments to new(%v)", t)
+ n.Type = nil
+ return n
+ }
+
+ n.Left = l
+ n.Type = types.NewPtr(t)
+
+ case OPRINT, OPRINTN:
+ ok |= ctxStmt
+ typecheckargs(n)
+ ls := n.List.Slice()
+ for i1, n1 := range ls {
+ // Special case for print: int constant is int64, not int.
+ if Isconst(n1, CTINT) {
+ ls[i1] = defaultlit(ls[i1], types.Types[TINT64])
+ } else {
+ ls[i1] = defaultlit(ls[i1], nil)
+ }
+ }
+
+ case OPANIC:
+ ok |= ctxStmt
+ if !onearg(n, "panic") {
+ n.Type = nil
+ return n
+ }
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Left = defaultlit(n.Left, types.Types[TINTER])
+ if n.Left.Type == nil {
+ n.Type = nil
+ return n
+ }
+
+ case ORECOVER:
+ ok |= ctxExpr | ctxStmt
+ if n.List.Len() != 0 {
+ yyerror("too many arguments to recover")
+ n.Type = nil
+ return n
+ }
+
+ n.Type = types.Types[TINTER]
+
+ case OCLOSURE:
+ ok |= ctxExpr
+ typecheckclosure(n, top)
+ if n.Type == nil {
+ return n
+ }
+
+ case OITAB:
+ ok |= ctxExpr
+ n.Left = typecheck(n.Left, ctxExpr)
+ t := n.Left.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+ if !t.IsInterface() {
+ Fatalf("OITAB of %v", t)
+ }
+ n.Type = types.NewPtr(types.Types[TUINTPTR])
+
+ case OIDATA:
+ // Whoever creates the OIDATA node must know a priori the concrete type at that moment,
+ // usually by just having checked the OITAB.
+ Fatalf("cannot typecheck interface data %v", n)
+
+ case OSPTR:
+ ok |= ctxExpr
+ n.Left = typecheck(n.Left, ctxExpr)
+ t := n.Left.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+ if !t.IsSlice() && !t.IsString() {
+ Fatalf("OSPTR of %v", t)
+ }
+ if t.IsString() {
+ n.Type = types.NewPtr(types.Types[TUINT8])
+ } else {
+ n.Type = types.NewPtr(t.Elem())
+ }
+
+ case OCLOSUREVAR:
+ ok |= ctxExpr
+
+ case OCFUNC:
+ ok |= ctxExpr
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Type = types.Types[TUINTPTR]
+
+ case OCONVNOP:
+ ok |= ctxExpr
+ n.Left = typecheck(n.Left, ctxExpr)
+
+ // statements
+ case OAS:
+ ok |= ctxStmt
+
+ typecheckas(n)
+
+ // Code that creates temps does not bother to set defn, so do it here.
+ if n.Left.Op == ONAME && n.Left.IsAutoTmp() {
+ n.Left.Name.Defn = n
+ }
+
+ case OAS2:
+ ok |= ctxStmt
+ typecheckas2(n)
+
+ case OBREAK,
+ OCONTINUE,
+ ODCL,
+ OEMPTY,
+ OGOTO,
+ OFALL,
+ OVARKILL,
+ OVARLIVE:
+ ok |= ctxStmt
+
+ case OLABEL:
+ ok |= ctxStmt
+ decldepth++
+ if n.Sym.IsBlank() {
+ // Empty identifier is valid but useless.
+ // Eliminate now to simplify life later.
+ // See issues 7538, 11589, 11593.
+ n.Op = OEMPTY
+ n.Left = nil
+ }
+
+ case ODEFER:
+ ok |= ctxStmt
+ n.Left = typecheck(n.Left, ctxStmt|ctxExpr)
+ if !n.Left.Diag() {
+ checkdefergo(n)
+ }
+
+ case OGO:
+ ok |= ctxStmt
+ n.Left = typecheck(n.Left, ctxStmt|ctxExpr)
+ checkdefergo(n)
+
+ case OFOR, OFORUNTIL:
+ ok |= ctxStmt
+ typecheckslice(n.Ninit.Slice(), ctxStmt)
+ decldepth++
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Left = defaultlit(n.Left, nil)
+ if n.Left != nil {
+ t := n.Left.Type
+ if t != nil && !t.IsBoolean() {
+ yyerror("non-bool %L used as for condition", n.Left)
+ }
+ }
+ n.Right = typecheck(n.Right, ctxStmt)
+ if n.Op == OFORUNTIL {
+ typecheckslice(n.List.Slice(), ctxStmt)
+ }
+ typecheckslice(n.Nbody.Slice(), ctxStmt)
+ decldepth--
+
+ case OIF:
+ ok |= ctxStmt
+ typecheckslice(n.Ninit.Slice(), ctxStmt)
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Left = defaultlit(n.Left, nil)
+ if n.Left != nil {
+ t := n.Left.Type
+ if t != nil && !t.IsBoolean() {
+ yyerror("non-bool %L used as if condition", n.Left)
+ }
+ }
+ typecheckslice(n.Nbody.Slice(), ctxStmt)
+ typecheckslice(n.Rlist.Slice(), ctxStmt)
+
+ case ORETURN:
+ ok |= ctxStmt
+ typecheckargs(n)
+ if Curfn == nil {
+ yyerror("return outside function")
+ n.Type = nil
+ return n
+ }
+
+ if Curfn.Type.FuncType().Outnamed && n.List.Len() == 0 {
+ break
+ }
+ typecheckaste(ORETURN, nil, false, Curfn.Type.Results(), n.List, func() string { return "return argument" })
+
+ case ORETJMP:
+ ok |= ctxStmt
+
+ case OSELECT:
+ ok |= ctxStmt
+ typecheckselect(n)
+
+ case OSWITCH:
+ ok |= ctxStmt
+ typecheckswitch(n)
+
+ case ORANGE:
+ ok |= ctxStmt
+ typecheckrange(n)
+
+ case OTYPESW:
+ yyerror("use of .(type) outside type switch")
+ n.Type = nil
+ return n
+
+ case ODCLFUNC:
+ ok |= ctxStmt
+ typecheckfunc(n)
+
+ case ODCLCONST:
+ ok |= ctxStmt
+ n.Left = typecheck(n.Left, ctxExpr)
+
+ case ODCLTYPE:
+ ok |= ctxStmt
+ n.Left = typecheck(n.Left, ctxType)
+ checkwidth(n.Left.Type)
+ }
+
+ t := n.Type
+ if t != nil && !t.IsFuncArgStruct() && n.Op != OTYPE {
+ switch t.Etype {
+ case TFUNC, // might have TANY; wait until it's called
+ TANY, TFORW, TIDEAL, TNIL, TBLANK:
+ break
+
+ default:
+ checkwidth(t)
+ }
+ }
+
+ evconst(n)
+ if n.Op == OTYPE && top&ctxType == 0 {
+ if !n.Type.Broke() {
+ yyerror("type %v is not an expression", n.Type)
+ }
+ n.Type = nil
+ return n
+ }
+
+ if top&(ctxExpr|ctxType) == ctxType && n.Op != OTYPE {
+ yyerror("%v is not a type", n)
+ n.Type = nil
+ return n
+ }
+
+ // TODO(rsc): simplify
+ if (top&(ctxCallee|ctxExpr|ctxType) != 0) && top&ctxStmt == 0 && ok&(ctxExpr|ctxType|ctxCallee) == 0 {
+ yyerror("%v used as value", n)
+ n.Type = nil
+ return n
+ }
+
+ if (top&ctxStmt != 0) && top&(ctxCallee|ctxExpr|ctxType) == 0 && ok&ctxStmt == 0 {
+ if !n.Diag() {
+ yyerror("%v evaluated but not used", n)
+ n.SetDiag(true)
+ }
+
+ n.Type = nil
+ return n
+ }
+
+ return n
+}
+
+func typecheckargs(n *Node) {
+ if n.List.Len() != 1 || n.IsDDD() {
+ typecheckslice(n.List.Slice(), ctxExpr)
+ return
+ }
+
+ typecheckslice(n.List.Slice(), ctxExpr|ctxMultiOK)
+ t := n.List.First().Type
+ if t == nil || !t.IsFuncArgStruct() {
+ return
+ }
+
+ // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
+
+ // Save n as n.Orig for fmt.go.
+ if n.Orig == n {
+ n.Orig = n.sepcopy()
+ }
+
+ as := nod(OAS2, nil, nil)
+ as.Rlist.AppendNodes(&n.List)
+
+ // If we're outside of function context, then this call will
+ // be executed during the generated init function. However,
+ // init.go hasn't yet created it. Instead, associate the
+ // temporary variables with dummyInitFn for now, and init.go
+ // will reassociate them later when it's appropriate.
+ static := Curfn == nil
+ if static {
+ Curfn = dummyInitFn
+ }
+ for _, f := range t.FieldSlice() {
+ t := temp(f.Type)
+ as.Ninit.Append(nod(ODCL, t, nil))
+ as.List.Append(t)
+ n.List.Append(t)
+ }
+ if static {
+ Curfn = nil
+ }
+
+ as = typecheck(as, ctxStmt)
+ n.Ninit.Append(as)
+}
+
+func checksliceindex(l *Node, r *Node, tp *types.Type) bool {
+ t := r.Type
+ if t == nil {
+ return false
+ }
+ if !t.IsInteger() {
+ yyerror("invalid slice index %v (type %v)", r, t)
+ return false
+ }
+
+ if r.Op == OLITERAL {
+ if r.Int64Val() < 0 {
+ yyerror("invalid slice index %v (index must be non-negative)", r)
+ return false
+ } else if tp != nil && tp.NumElem() >= 0 && r.Int64Val() > tp.NumElem() {
+ yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
+ return false
+ } else if Isconst(l, CTSTR) && r.Int64Val() > int64(len(l.StringVal())) {
+ yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.StringVal()))
+ return false
+ } else if r.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
+ yyerror("invalid slice index %v (index too large)", r)
+ return false
+ }
+ }
+
+ return true
+}
+
+func checksliceconst(lo *Node, hi *Node) bool {
+ if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && lo.Val().U.(*Mpint).Cmp(hi.Val().U.(*Mpint)) > 0 {
+ yyerror("invalid slice index: %v > %v", lo, hi)
+ return false
+ }
+
+ return true
+}
+
+func checkdefergo(n *Node) {
+ what := "defer"
+ if n.Op == OGO {
+ what = "go"
+ }
+
+ switch n.Left.Op {
+ // ok
+ case OCALLINTER,
+ OCALLMETH,
+ OCALLFUNC,
+ OCLOSE,
+ OCOPY,
+ ODELETE,
+ OPANIC,
+ OPRINT,
+ OPRINTN,
+ ORECOVER:
+ return
+
+ case OAPPEND,
+ OCAP,
+ OCOMPLEX,
+ OIMAG,
+ OLEN,
+ OMAKE,
+ OMAKESLICE,
+ OMAKECHAN,
+ OMAKEMAP,
+ ONEW,
+ OREAL,
+ OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
+ if n.Left.Orig != nil && n.Left.Orig.Op == OCONV {
+ break
+ }
+ yyerrorl(n.Pos, "%s discards result of %v", what, n.Left)
+ return
+ }
+
+ // type is broken or missing, most likely a method call on a broken type
+ // we will warn about the broken type elsewhere. no need to emit a potentially confusing error
+ if n.Left.Type == nil || n.Left.Type.Broke() {
+ return
+ }
+
+ if !n.Diag() {
+ // The syntax made sure it was a call, so this must be
+ // a conversion.
+ n.SetDiag(true)
+ yyerrorl(n.Pos, "%s requires function call, not conversion", what)
+ }
+}
+
+// The result of implicitstar MUST be assigned back to n, e.g.
+// n.Left = implicitstar(n.Left)
+func implicitstar(n *Node) *Node {
+ // insert implicit * if needed for fixed array
+ t := n.Type
+ if t == nil || !t.IsPtr() {
+ return n
+ }
+ t = t.Elem()
+ if t == nil {
+ return n
+ }
+ if !t.IsArray() {
+ return n
+ }
+ n = nod(ODEREF, n, nil)
+ n.SetImplicit(true)
+ n = typecheck(n, ctxExpr)
+ return n
+}
+
+func onearg(n *Node, f string, args ...interface{}) bool {
+ if n.Left != nil {
+ return true
+ }
+ if n.List.Len() == 0 {
+ p := fmt.Sprintf(f, args...)
+ yyerror("missing argument to %s: %v", p, n)
+ return false
+ }
+
+ if n.List.Len() > 1 {
+ p := fmt.Sprintf(f, args...)
+ yyerror("too many arguments to %s: %v", p, n)
+ n.Left = n.List.First()
+ n.List.Set(nil)
+ return false
+ }
+
+ n.Left = n.List.First()
+ n.List.Set(nil)
+ return true
+}
+
+func twoarg(n *Node) bool {
+ if n.Left != nil {
+ return true
+ }
+ if n.List.Len() != 2 {
+ if n.List.Len() < 2 {
+ yyerror("not enough arguments in call to %v", n)
+ } else {
+ yyerror("too many arguments in call to %v", n)
+ }
+ return false
+ }
+ n.Left = n.List.First()
+ n.Right = n.List.Second()
+ n.List.Set(nil)
+ return true
+}
+
+func lookdot1(errnode *Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
+ var r *types.Field
+ for _, f := range fs.Slice() {
+ if dostrcmp != 0 && f.Sym.Name == s.Name {
+ return f
+ }
+ if dostrcmp == 2 && strings.EqualFold(f.Sym.Name, s.Name) {
+ return f
+ }
+ if f.Sym != s {
+ continue
+ }
+ if r != nil {
+ if errnode != nil {
+ yyerror("ambiguous selector %v", errnode)
+ } else if t.IsPtr() {
+ yyerror("ambiguous selector (%v).%v", t, s)
+ } else {
+ yyerror("ambiguous selector %v.%v", t, s)
+ }
+ break
+ }
+
+ r = f
+ }
+
+ return r
+}
+
+// typecheckMethodExpr checks selector expressions (ODOT) where the
+// base expression is a type expression (OTYPE).
+func typecheckMethodExpr(n *Node) (res *Node) {
+ if enableTrace && trace {
+ defer tracePrint("typecheckMethodExpr", n)(&res)
+ }
+
+ t := n.Left.Type
+
+ // Compute the method set for t.
+ var ms *types.Fields
+ if t.IsInterface() {
+ ms = t.Fields()
+ } else {
+ mt := methtype(t)
+ if mt == nil {
+ yyerror("%v undefined (type %v has no method %v)", n, t, n.Sym)
+ n.Type = nil
+ return n
+ }
+ expandmeth(mt)
+ ms = mt.AllMethods()
+
+ // The method expression T.m requires a wrapper when T
+ // is different from m's declared receiver type. We
+ // normally generate these wrappers while writing out
+ // runtime type descriptors, which is always done for
+ // types declared at package scope. However, we need
+ // to make sure to generate wrappers for anonymous
+ // receiver types too.
+ if mt.Sym == nil {
+ addsignat(t)
+ }
+ }
+
+ s := n.Sym
+ m := lookdot1(n, s, t, ms, 0)
+ if m == nil {
+ if lookdot1(n, s, t, ms, 1) != nil {
+ yyerror("%v undefined (cannot refer to unexported method %v)", n, s)
+ } else if _, ambig := dotpath(s, t, nil, false); ambig {
+ yyerror("%v undefined (ambiguous selector)", n) // method or field
+ } else {
+ yyerror("%v undefined (type %v has no method %v)", n, t, s)
+ }
+ n.Type = nil
+ return n
+ }
+
+ if !isMethodApplicable(t, m) {
+ yyerror("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s)
+ n.Type = nil
+ return n
+ }
+
+ n.Op = ONAME
+ if n.Name == nil {
+ n.Name = new(Name)
+ }
+ n.Right = newname(n.Sym)
+ n.Sym = methodSym(t, n.Sym)
+ n.Type = methodfunc(m.Type, n.Left.Type)
+ n.Xoffset = 0
+ n.SetClass(PFUNC)
+ // methodSym already marked n.Sym as a function.
+
+ // Issue 25065. Make sure that we emit the symbol for a local method.
+ if Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == localpkg) {
+ makefuncsym(n.Sym)
+ }
+
+ return n
+}
+
+// isMethodApplicable reports whether method m can be called on a
+// value of type t. This is necessary because we compute a single
+// method set for both T and *T, but some *T methods are not
+// applicable to T receivers.
+func isMethodApplicable(t *types.Type, m *types.Field) bool {
+ return t.IsPtr() || !m.Type.Recv().Type.IsPtr() || isifacemethod(m.Type) || m.Embedded == 2
+}
+
+func derefall(t *types.Type) *types.Type {
+ for t != nil && t.IsPtr() {
+ t = t.Elem()
+ }
+ return t
+}
+
+func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
+ s := n.Sym
+
+ dowidth(t)
+ var f1 *types.Field
+ if t.IsStruct() || t.IsInterface() {
+ f1 = lookdot1(n, s, t, t.Fields(), dostrcmp)
+ }
+
+ var f2 *types.Field
+ if n.Left.Type == t || n.Left.Type.Sym == nil {
+ mt := methtype(t)
+ if mt != nil {
+ f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp)
+ }
+ }
+
+ if f1 != nil {
+ if dostrcmp > 1 || f1.Broke() {
+ // Already in the process of diagnosing an error.
+ return f1
+ }
+ if f2 != nil {
+ yyerror("%v is both field and method", n.Sym)
+ }
+ if f1.Offset == BADWIDTH {
+ Fatalf("lookdot badwidth %v %p", f1, f1)
+ }
+ n.Xoffset = f1.Offset
+ n.Type = f1.Type
+ if t.IsInterface() {
+ if n.Left.Type.IsPtr() {
+ n.Left = nod(ODEREF, n.Left, nil) // implicitstar
+ n.Left.SetImplicit(true)
+ n.Left = typecheck(n.Left, ctxExpr)
+ }
+
+ n.Op = ODOTINTER
+ } else {
+ n.SetOpt(f1)
+ }
+
+ return f1
+ }
+
+ if f2 != nil {
+ if dostrcmp > 1 {
+ // Already in the process of diagnosing an error.
+ return f2
+ }
+ tt := n.Left.Type
+ dowidth(tt)
+ rcvr := f2.Type.Recv().Type
+ if !types.Identical(rcvr, tt) {
+ if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) {
+ checklvalue(n.Left, "call pointer method on")
+ n.Left = nod(OADDR, n.Left, nil)
+ n.Left.SetImplicit(true)
+ n.Left = typecheck(n.Left, ctxType|ctxExpr)
+ } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
+ n.Left = nod(ODEREF, n.Left, nil)
+ n.Left.SetImplicit(true)
+ n.Left = typecheck(n.Left, ctxType|ctxExpr)
+ } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) {
+ yyerror("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left)
+ for tt.IsPtr() {
+ // Stop one level early for method with pointer receiver.
+ if rcvr.IsPtr() && !tt.Elem().IsPtr() {
+ break
+ }
+ n.Left = nod(ODEREF, n.Left, nil)
+ n.Left.SetImplicit(true)
+ n.Left = typecheck(n.Left, ctxType|ctxExpr)
+ tt = tt.Elem()
+ }
+ } else {
+ Fatalf("method mismatch: %v for %v", rcvr, tt)
+ }
+ }
+
+ pll := n
+ ll := n.Left
+ for ll.Left != nil && (ll.Op == ODOT || ll.Op == ODOTPTR || ll.Op == ODEREF) {
+ pll = ll
+ ll = ll.Left
+ }
+ if pll.Implicit() && ll.Type.IsPtr() && ll.Type.Sym != nil && asNode(ll.Type.Sym.Def) != nil && asNode(ll.Type.Sym.Def).Op == OTYPE {
+ // It is invalid to automatically dereference a named pointer type when selecting a method.
+ // Make n.Left == ll to clarify error message.
+ n.Left = ll
+ return nil
+ }
+
+ n.Sym = methodSym(n.Left.Type, f2.Sym)
+ n.Xoffset = f2.Offset
+ n.Type = f2.Type
+ n.Op = ODOTMETH
+
+ return f2
+ }
+
+ return nil
+}
+
+func nokeys(l Nodes) bool {
+ for _, n := range l.Slice() {
+ if n.Op == OKEY || n.Op == OSTRUCTKEY {
+ return false
+ }
+ }
+ return true
+}
+
+func hasddd(t *types.Type) bool {
+ for _, tl := range t.Fields().Slice() {
+ if tl.IsDDD() {
+ return true
+ }
+ }
+
+ return false
+}
+
+// typecheck assignment: type list = expression list
+func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes, desc func() string) {
+ var t *types.Type
+ var i int
+
+ lno := lineno
+ defer func() { lineno = lno }()
+
+ if tstruct.Broke() {
+ return
+ }
+
+ var n *Node
+ if nl.Len() == 1 {
+ n = nl.First()
+ }
+
+ n1 := tstruct.NumFields()
+ n2 := nl.Len()
+ if !hasddd(tstruct) {
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ } else {
+ if !isddd {
+ if n2 < n1-1 {
+ goto notenough
+ }
+ } else {
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ }
+ }
+
+ i = 0
+ for _, tl := range tstruct.Fields().Slice() {
+ t = tl.Type
+ if tl.IsDDD() {
+ if isddd {
+ if i >= nl.Len() {
+ goto notenough
+ }
+ if nl.Len()-i > 1 {
+ goto toomany
+ }
+ n = nl.Index(i)
+ setlineno(n)
+ if n.Type != nil {
+ nl.SetIndex(i, assignconvfn(n, t, desc))
+ }
+ return
+ }
+
+ // TODO(mdempsky): Make into ... call with implicit slice.
+ for ; i < nl.Len(); i++ {
+ n = nl.Index(i)
+ setlineno(n)
+ if n.Type != nil {
+ nl.SetIndex(i, assignconvfn(n, t.Elem(), desc))
+ }
+ }
+ return
+ }
+
+ if i >= nl.Len() {
+ goto notenough
+ }
+ n = nl.Index(i)
+ setlineno(n)
+ if n.Type != nil {
+ nl.SetIndex(i, assignconvfn(n, t, desc))
+ }
+ i++
+ }
+
+ if i < nl.Len() {
+ goto toomany
+ }
+ if isddd {
+ if call != nil {
+ yyerror("invalid use of ... in call to %v", call)
+ } else {
+ yyerror("invalid use of ... in %v", op)
+ }
+ }
+ return
+
+notenough:
+ if n == nil || (!n.Diag() && n.Type != nil) {
+ details := errorDetails(nl, tstruct, isddd)
+ if call != nil {
+ // call is the expression being called, not the overall call.
+ // Method expressions have the form T.M, and the compiler has
+ // rewritten those to ONAME nodes but left T in Left.
+ if call.isMethodExpression() {
+ yyerror("not enough arguments in call to method expression %v%s", call, details)
+ } else {
+ yyerror("not enough arguments in call to %v%s", call, details)
+ }
+ } else {
+ yyerror("not enough arguments to %v%s", op, details)
+ }
+ if n != nil {
+ n.SetDiag(true)
+ }
+ }
+ return
+
+toomany:
+ details := errorDetails(nl, tstruct, isddd)
+ if call != nil {
+ yyerror("too many arguments in call to %v%s", call, details)
+ } else {
+ yyerror("too many arguments to %v%s", op, details)
+ }
+}
+
+func errorDetails(nl Nodes, tstruct *types.Type, isddd bool) string {
+ // If we don't know any type at a call site, let's suppress any return
+ // message signatures. See Issue https://golang.org/issues/19012.
+ if tstruct == nil {
+ return ""
+ }
+ // If any node has an unknown type, suppress it as well
+ for _, n := range nl.Slice() {
+ if n.Type == nil {
+ return ""
+ }
+ }
+ return fmt.Sprintf("\n\thave %s\n\twant %v", nl.sigerr(isddd), tstruct)
+}
+
+// sigrepr is a type's representation to the outside world,
+// in string representations of return signatures
+// e.g in error messages about wrong arguments to return.
+func sigrepr(t *types.Type, isddd bool) string {
+ switch t {
+ case types.UntypedString:
+ return "string"
+ case types.UntypedBool:
+ return "bool"
+ }
+
+ if t.Etype == TIDEAL {
+ // "untyped number" is not commonly used
+ // outside of the compiler, so let's use "number".
+ // TODO(mdempsky): Revisit this.
+ return "number"
+ }
+
+ // Turn []T... argument to ...T for clearer error message.
+ if isddd {
+ if !t.IsSlice() {
+ Fatalf("bad type for ... argument: %v", t)
+ }
+ return "..." + t.Elem().String()
+ }
+ return t.String()
+}
+
+// sigerr returns the signature of the types at the call or return.
+func (nl Nodes) sigerr(isddd bool) string {
+ if nl.Len() < 1 {
+ return "()"
+ }
+
+ var typeStrings []string
+ for i, n := range nl.Slice() {
+ isdddArg := isddd && i == nl.Len()-1
+ typeStrings = append(typeStrings, sigrepr(n.Type, isdddArg))
+ }
+
+ return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", "))
+}
+
+// type check composite
+func fielddup(name string, hash map[string]bool) {
+ if hash[name] {
+ yyerror("duplicate field name in struct literal: %s", name)
+ return
+ }
+ hash[name] = true
+}
+
+// iscomptype reports whether type t is a composite literal type.
+func iscomptype(t *types.Type) bool {
+ switch t.Etype {
+ case TARRAY, TSLICE, TSTRUCT, TMAP:
+ return true
+ default:
+ return false
+ }
+}
+
+// pushtype adds elided type information for composite literals if
+// appropriate, and returns the resulting expression.
+func pushtype(n *Node, t *types.Type) *Node {
+ if n == nil || n.Op != OCOMPLIT || n.Right != nil {
+ return n
+ }
+
+ switch {
+ case iscomptype(t):
+ // For T, return T{...}.
+ n.Right = typenod(t)
+
+ case t.IsPtr() && iscomptype(t.Elem()):
+ // For *T, return &T{...}.
+ n.Right = typenod(t.Elem())
+
+ n = nodl(n.Pos, OADDR, n, nil)
+ n.SetImplicit(true)
+ }
+
+ return n
+}
+
+// The result of typecheckcomplit MUST be assigned back to n, e.g.
+// n.Left = typecheckcomplit(n.Left)
+func typecheckcomplit(n *Node) (res *Node) {
+ if enableTrace && trace {
+ defer tracePrint("typecheckcomplit", n)(&res)
+ }
+
+ lno := lineno
+ defer func() {
+ lineno = lno
+ }()
+
+ if n.Right == nil {
+ yyerrorl(n.Pos, "missing type in composite literal")
+ n.Type = nil
+ return n
+ }
+
+ // Save original node (including n.Right)
+ n.Orig = n.copy()
+
+ setlineno(n.Right)
+
+ // Need to handle [...]T arrays specially.
+ if n.Right.Op == OTARRAY && n.Right.Left != nil && n.Right.Left.Op == ODDD {
+ n.Right.Right = typecheck(n.Right.Right, ctxType)
+ if n.Right.Right.Type == nil {
+ n.Type = nil
+ return n
+ }
+ elemType := n.Right.Right.Type
+
+ length := typecheckarraylit(elemType, -1, n.List.Slice(), "array literal")
+
+ n.Op = OARRAYLIT
+ n.Type = types.NewArray(elemType, length)
+ n.Right = nil
+ return n
+ }
+
+ n.Right = typecheck(n.Right, ctxType)
+ t := n.Right.Type
+ if t == nil {
+ n.Type = nil
+ return n
+ }
+ n.Type = t
+
+ switch t.Etype {
+ default:
+ yyerror("invalid composite literal type %v", t)
+ n.Type = nil
+
+ case TARRAY:
+ typecheckarraylit(t.Elem(), t.NumElem(), n.List.Slice(), "array literal")
+ n.Op = OARRAYLIT
+ n.Right = nil
+
+ case TSLICE:
+ length := typecheckarraylit(t.Elem(), -1, n.List.Slice(), "slice literal")
+ n.Op = OSLICELIT
+ n.Right = nodintconst(length)
+
+ case TMAP:
+ var cs constSet
+ for i3, l := range n.List.Slice() {
+ setlineno(l)
+ if l.Op != OKEY {
+ n.List.SetIndex(i3, typecheck(l, ctxExpr))
+ yyerror("missing key in map literal")
+ continue
+ }
+
+ r := l.Left
+ r = pushtype(r, t.Key())
+ r = typecheck(r, ctxExpr)
+ l.Left = assignconv(r, t.Key(), "map key")
+ cs.add(lineno, l.Left, "key", "map literal")
+
+ r = l.Right
+ r = pushtype(r, t.Elem())
+ r = typecheck(r, ctxExpr)
+ l.Right = assignconv(r, t.Elem(), "map value")
+ }
+
+ n.Op = OMAPLIT
+ n.Right = nil
+
+ case TSTRUCT:
+ // Need valid field offsets for Xoffset below.
+ dowidth(t)
+
+ errored := false
+ if n.List.Len() != 0 && nokeys(n.List) {
+ // simple list of variables
+ ls := n.List.Slice()
+ for i, n1 := range ls {
+ setlineno(n1)
+ n1 = typecheck(n1, ctxExpr)
+ ls[i] = n1
+ if i >= t.NumFields() {
+ if !errored {
+ yyerror("too many values in %v", n)
+ errored = true
+ }
+ continue
+ }
+
+ f := t.Field(i)
+ s := f.Sym
+ if s != nil && !types.IsExported(s.Name) && s.Pkg != localpkg {
+ yyerror("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+ }
+ // No pushtype allowed here. Must name fields for that.
+ n1 = assignconv(n1, f.Type, "field value")
+ n1 = nodSym(OSTRUCTKEY, n1, f.Sym)
+ n1.Xoffset = f.Offset
+ ls[i] = n1
+ }
+ if len(ls) < t.NumFields() {
+ yyerror("too few values in %v", n)
+ }
+ } else {
+ hash := make(map[string]bool)
+
+ // keyed list
+ ls := n.List.Slice()
+ for i, l := range ls {
+ setlineno(l)
+
+ if l.Op == OKEY {
+ key := l.Left
+
+ l.Op = OSTRUCTKEY
+ l.Left = l.Right
+ l.Right = nil
+
+ // An OXDOT uses the Sym field to hold
+ // the field to the right of the dot,
+ // so s will be non-nil, but an OXDOT
+ // is never a valid struct literal key.
+ if key.Sym == nil || key.Op == OXDOT || key.Sym.IsBlank() {
+ yyerror("invalid field name %v in struct initializer", key)
+ l.Left = typecheck(l.Left, ctxExpr)
+ continue
+ }
+
+ // Sym might have resolved to name in other top-level
+ // package, because of import dot. Redirect to correct sym
+ // before we do the lookup.
+ s := key.Sym
+ if s.Pkg != localpkg && types.IsExported(s.Name) {
+ s1 := lookup(s.Name)
+ if s1.Origpkg == s.Pkg {
+ s = s1
+ }
+ }
+ l.Sym = s
+ }
+
+ if l.Op != OSTRUCTKEY {
+ if !errored {
+ yyerror("mixture of field:value and value initializers")
+ errored = true
+ }
+ ls[i] = typecheck(ls[i], ctxExpr)
+ continue
+ }
+
+ f := lookdot1(nil, l.Sym, t, t.Fields(), 0)
+ if f == nil {
+ if ci := lookdot1(nil, l.Sym, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
+ if visible(ci.Sym) {
+ yyerror("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym, t, ci.Sym)
+ } else if nonexported(l.Sym) && l.Sym.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
+ yyerror("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym, t)
+ } else {
+ yyerror("unknown field '%v' in struct literal of type %v", l.Sym, t)
+ }
+ continue
+ }
+ var f *types.Field
+ p, _ := dotpath(l.Sym, t, &f, true)
+ if p == nil || f.IsMethod() {
+ yyerror("unknown field '%v' in struct literal of type %v", l.Sym, t)
+ continue
+ }
+ // dotpath returns the parent embedded types in reverse order.
+ var ep []string
+ for ei := len(p) - 1; ei >= 0; ei-- {
+ ep = append(ep, p[ei].field.Sym.Name)
+ }
+ ep = append(ep, l.Sym.Name)
+ yyerror("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t)
+ continue
+ }
+ fielddup(f.Sym.Name, hash)
+ l.Xoffset = f.Offset
+
+ // No pushtype allowed here. Tried and rejected.
+ l.Left = typecheck(l.Left, ctxExpr)
+ l.Left = assignconv(l.Left, f.Type, "field value")
+ }
+ }
+
+ n.Op = OSTRUCTLIT
+ n.Right = nil
+ }
+
+ return n
+}
+
+// typecheckarraylit type-checks a sequence of slice/array literal elements.
+func typecheckarraylit(elemType *types.Type, bound int64, elts []*Node, ctx string) int64 {
+ // If there are key/value pairs, create a map to keep seen
+ // keys so we can check for duplicate indices.
+ var indices map[int64]bool
+ for _, elt := range elts {
+ if elt.Op == OKEY {
+ indices = make(map[int64]bool)
+ break
+ }
+ }
+
+ var key, length int64
+ for i, elt := range elts {
+ setlineno(elt)
+ vp := &elts[i]
+ if elt.Op == OKEY {
+ elt.Left = typecheck(elt.Left, ctxExpr)
+ key = indexconst(elt.Left)
+ if key < 0 {
+ if !elt.Left.Diag() {
+ if key == -2 {
+ yyerror("index too large")
+ } else {
+ yyerror("index must be non-negative integer constant")
+ }
+ elt.Left.SetDiag(true)
+ }
+ key = -(1 << 30) // stay negative for a while
+ }
+ vp = &elt.Right
+ }
+
+ r := *vp
+ r = pushtype(r, elemType)
+ r = typecheck(r, ctxExpr)
+ *vp = assignconv(r, elemType, ctx)
+
+ if key >= 0 {
+ if indices != nil {
+ if indices[key] {
+ yyerror("duplicate index in %s: %d", ctx, key)
+ } else {
+ indices[key] = true
+ }
+ }
+
+ if bound >= 0 && key >= bound {
+ yyerror("array index %d out of bounds [0:%d]", key, bound)
+ bound = -1
+ }
+ }
+
+ key++
+ if key > length {
+ length = key
+ }
+ }
+
+ return length
+}
+
+// visible reports whether sym is exported or locally defined.
+func visible(sym *types.Sym) bool {
+ return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == localpkg)
+}
+
+// nonexported reports whether sym is an unexported field.
+func nonexported(sym *types.Sym) bool {
+ return sym != nil && !types.IsExported(sym.Name)
+}
+
+// lvalue etc
+func islvalue(n *Node) bool {
+ switch n.Op {
+ case OINDEX:
+ if n.Left.Type != nil && n.Left.Type.IsArray() {
+ return islvalue(n.Left)
+ }
+ if n.Left.Type != nil && n.Left.Type.IsString() {
+ return false
+ }
+ fallthrough
+ case ODEREF, ODOTPTR, OCLOSUREVAR:
+ return true
+
+ case ODOT:
+ return islvalue(n.Left)
+
+ case ONAME:
+ if n.Class() == PFUNC {
+ return false
+ }
+ return true
+ }
+
+ return false
+}
+
+func checklvalue(n *Node, verb string) {
+ if !islvalue(n) {
+ yyerror("cannot %s %v", verb, n)
+ }
+}
+
+func checkassign(stmt *Node, n *Node) {
+ // Variables declared in ORANGE are assigned on every iteration.
+ if n.Name == nil || n.Name.Defn != stmt || stmt.Op == ORANGE {
+ r := outervalue(n)
+ if r.Op == ONAME {
+ r.Name.SetAssigned(true)
+ if r.Name.IsClosureVar() {
+ r.Name.Defn.Name.SetAssigned(true)
+ }
+ }
+ }
+
+ if islvalue(n) {
+ return
+ }
+ if n.Op == OINDEXMAP {
+ n.SetIndexMapLValue(true)
+ return
+ }
+
+ // have already complained about n being invalid
+ if n.Type == nil {
+ return
+ }
+
+ switch {
+ case n.Op == ODOT && n.Left.Op == OINDEXMAP:
+ yyerror("cannot assign to struct field %v in map", n)
+ case (n.Op == OINDEX && n.Left.Type.IsString()) || n.Op == OSLICESTR:
+ yyerror("cannot assign to %v (strings are immutable)", n)
+ case n.Op == OLITERAL && n.Sym != nil && n.isGoConst():
+ yyerror("cannot assign to %v (declared const)", n)
+ default:
+ yyerror("cannot assign to %v", n)
+ }
+ n.Type = nil
+}
+
+func checkassignlist(stmt *Node, l Nodes) {
+ for _, n := range l.Slice() {
+ checkassign(stmt, n)
+ }
+}
+
+// samesafeexpr checks whether it is safe to reuse one of l and r
+// instead of computing both. samesafeexpr assumes that l and r are
+// used in the same statement or expression. In order for it to be
+// safe to reuse l or r, they must:
+// * be the same expression
+// * not have side-effects (no function calls, no channel ops);
+// however, panics are ok
+// * not cause inappropriate aliasing; e.g. two string to []byte
+// conversions, must result in two distinct slices
+//
+// The handling of OINDEXMAP is subtle. OINDEXMAP can occur both
+// as an lvalue (map assignment) and an rvalue (map access). This is
+// currently OK, since the only place samesafeexpr gets used on an
+// lvalue expression is for OSLICE and OAPPEND optimizations, and it
+// is correct in those settings.
+func samesafeexpr(l *Node, r *Node) bool {
+ if l.Op != r.Op || !types.Identical(l.Type, r.Type) {
+ return false
+ }
+
+ switch l.Op {
+ case ONAME, OCLOSUREVAR:
+ return l == r
+
+ case ODOT, ODOTPTR:
+ return l.Sym != nil && r.Sym != nil && l.Sym == r.Sym && samesafeexpr(l.Left, r.Left)
+
+ case ODEREF, OCONVNOP,
+ ONOT, OBITNOT, OPLUS, ONEG:
+ return samesafeexpr(l.Left, r.Left)
+
+ case OCONV:
+ // Some conversions can't be reused, such as []byte(str).
+ // Allow only numeric-ish types. This is a bit conservative.
+ return issimple[l.Type.Etype] && samesafeexpr(l.Left, r.Left)
+
+ case OINDEX, OINDEXMAP,
+ OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
+ return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right)
+
+ case OLITERAL:
+ return eqval(l.Val(), r.Val())
+ }
+
+ return false
+}
+
+// type check assignment.
+// if this assignment is the definition of a var on the left side,
+// fill in the var's type.
+func typecheckas(n *Node) {
+ if enableTrace && trace {
+ defer tracePrint("typecheckas", n)(nil)
+ }
+
+ // delicate little dance.
+ // the definition of n may refer to this assignment
+ // as its definition, in which case it will call typecheckas.
+ // in that case, do not call typecheck back, or it will cycle.
+ // if the variable has a type (ntype) then typechecking
+ // will not look at defn, so it is okay (and desirable,
+ // so that the conversion below happens).
+ n.Left = resolve(n.Left)
+
+ if n.Left.Name == nil || n.Left.Name.Defn != n || n.Left.Name.Param.Ntype != nil {
+ n.Left = typecheck(n.Left, ctxExpr|ctxAssign)
+ }
+
+ // Use ctxMultiOK so we can emit an "N variables but M values" error
+ // to be consistent with typecheckas2 (#26616).
+ n.Right = typecheck(n.Right, ctxExpr|ctxMultiOK)
+ checkassign(n, n.Left)
+ if n.Right != nil && n.Right.Type != nil {
+ if n.Right.Type.IsFuncArgStruct() {
+ yyerror("assignment mismatch: 1 variable but %v returns %d values", n.Right.Left, n.Right.Type.NumFields())
+ // Multi-value RHS isn't actually valid for OAS; nil out
+ // to indicate failed typechecking.
+ n.Right.Type = nil
+ } else if n.Left.Type != nil {
+ n.Right = assignconv(n.Right, n.Left.Type, "assignment")
+ }
+ }
+
+ if n.Left.Name != nil && n.Left.Name.Defn == n && n.Left.Name.Param.Ntype == nil {
+ n.Right = defaultlit(n.Right, nil)
+ n.Left.Type = n.Right.Type
+ }
+
+ // second half of dance.
+ // now that right is done, typecheck the left
+ // just to get it over with. see dance above.
+ n.SetTypecheck(1)
+
+ if n.Left.Typecheck() == 0 {
+ n.Left = typecheck(n.Left, ctxExpr|ctxAssign)
+ }
+ if !n.Left.isBlank() {
+ checkwidth(n.Left.Type) // ensure width is calculated for backend
+ }
+}
+
+func checkassignto(src *types.Type, dst *Node) {
+ if op, why := assignop(src, dst.Type); op == OXXX {
+ yyerror("cannot assign %v to %L in multiple assignment%s", src, dst, why)
+ return
+ }
+}
+
+func typecheckas2(n *Node) {
+ if enableTrace && trace {
+ defer tracePrint("typecheckas2", n)(nil)
+ }
+
+ ls := n.List.Slice()
+ for i1, n1 := range ls {
+ // delicate little dance.
+ n1 = resolve(n1)
+ ls[i1] = n1
+
+ if n1.Name == nil || n1.Name.Defn != n || n1.Name.Param.Ntype != nil {
+ ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
+ }
+ }
+
+ cl := n.List.Len()
+ cr := n.Rlist.Len()
+ if cl > 1 && cr == 1 {
+ n.Rlist.SetFirst(typecheck(n.Rlist.First(), ctxExpr|ctxMultiOK))
+ } else {
+ typecheckslice(n.Rlist.Slice(), ctxExpr)
+ }
+ checkassignlist(n, n.List)
+
+ var l *Node
+ var r *Node
+ if cl == cr {
+ // easy
+ ls := n.List.Slice()
+ rs := n.Rlist.Slice()
+ for il, nl := range ls {
+ nr := rs[il]
+ if nl.Type != nil && nr.Type != nil {
+ rs[il] = assignconv(nr, nl.Type, "assignment")
+ }
+ if nl.Name != nil && nl.Name.Defn == n && nl.Name.Param.Ntype == nil {
+ rs[il] = defaultlit(rs[il], nil)
+ nl.Type = rs[il].Type
+ }
+ }
+
+ goto out
+ }
+
+ l = n.List.First()
+ r = n.Rlist.First()
+
+ // x,y,z = f()
+ if cr == 1 {
+ if r.Type == nil {
+ goto out
+ }
+ switch r.Op {
+ case OCALLMETH, OCALLINTER, OCALLFUNC:
+ if !r.Type.IsFuncArgStruct() {
+ break
+ }
+ cr = r.Type.NumFields()
+ if cr != cl {
+ goto mismatch
+ }
+ n.Op = OAS2FUNC
+ n.Right = r
+ n.Rlist.Set(nil)
+ for i, l := range n.List.Slice() {
+ f := r.Type.Field(i)
+ if f.Type != nil && l.Type != nil {
+ checkassignto(f.Type, l)
+ }
+ if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil {
+ l.Type = f.Type
+ }
+ }
+ goto out
+ }
+ }
+
+ // x, ok = y
+ if cl == 2 && cr == 1 {
+ if r.Type == nil {
+ goto out
+ }
+ switch r.Op {
+ case OINDEXMAP, ORECV, ODOTTYPE:
+ switch r.Op {
+ case OINDEXMAP:
+ n.Op = OAS2MAPR
+ case ORECV:
+ n.Op = OAS2RECV
+ case ODOTTYPE:
+ n.Op = OAS2DOTTYPE
+ r.Op = ODOTTYPE2
+ }
+ n.Right = r
+ n.Rlist.Set(nil)
+ if l.Type != nil {
+ checkassignto(r.Type, l)
+ }
+ if l.Name != nil && l.Name.Defn == n {
+ l.Type = r.Type
+ }
+ l := n.List.Second()
+ if l.Type != nil && !l.Type.IsBoolean() {
+ checkassignto(types.Types[TBOOL], l)
+ }
+ if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil {
+ l.Type = types.Types[TBOOL]
+ }
+ goto out
+ }
+ }
+
+mismatch:
+ switch r.Op {
+ default:
+ yyerror("assignment mismatch: %d variables but %d values", cl, cr)
+ case OCALLFUNC, OCALLMETH, OCALLINTER:
+ yyerror("assignment mismatch: %d variables but %v returns %d values", cl, r.Left, cr)
+ }
+
+ // second half of dance
+out:
+ n.SetTypecheck(1)
+ ls = n.List.Slice()
+ for i1, n1 := range ls {
+ if n1.Typecheck() == 0 {
+ ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
+ }
+ }
+}
+
+// type check function definition
+func typecheckfunc(n *Node) {
+ if enableTrace && trace {
+ defer tracePrint("typecheckfunc", n)(nil)
+ }
+
+ for _, ln := range n.Func.Dcl {
+ if ln.Op == ONAME && (ln.Class() == PPARAM || ln.Class() == PPARAMOUT) {
+ ln.Name.Decldepth = 1
+ }
+ }
+
+ n.Func.Nname = typecheck(n.Func.Nname, ctxExpr|ctxAssign)
+ t := n.Func.Nname.Type
+ if t == nil {
+ return
+ }
+ n.Type = t
+ t.FuncType().Nname = asTypesNode(n.Func.Nname)
+ rcvr := t.Recv()
+ if rcvr != nil && n.Func.Shortname != nil {
+ m := addmethod(n.Func.Shortname, t, true, n.Func.Pragma&Nointerface != 0)
+ if m == nil {
+ return
+ }
+
+ n.Func.Nname.Sym = methodSym(rcvr.Type, n.Func.Shortname)
+ declare(n.Func.Nname, PFUNC)
+ }
+
+ if Ctxt.Flag_dynlink && !inimport && n.Func.Nname != nil {
+ makefuncsym(n.Func.Nname.Sym)
+ }
+}
+
+// The result of stringtoruneslit MUST be assigned back to n, e.g.
+// n.Left = stringtoruneslit(n.Left)
+func stringtoruneslit(n *Node) *Node {
+ if n.Left.Op != OLITERAL || n.Left.Val().Ctype() != CTSTR {
+ Fatalf("stringtoarraylit %v", n)
+ }
+
+ var l []*Node
+ i := 0
+ for _, r := range n.Left.StringVal() {
+ l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
+ i++
+ }
+
+ nn := nod(OCOMPLIT, nil, typenod(n.Type))
+ nn.List.Set(l)
+ nn = typecheck(nn, ctxExpr)
+ return nn
+}
+
+var mapqueue []*Node
+
+func checkMapKeys() {
+ for _, n := range mapqueue {
+ k := n.Type.MapType().Key
+ if !k.Broke() && !IsComparable(k) {
+ yyerrorl(n.Pos, "invalid map key type %v", k)
+ }
+ }
+ mapqueue = nil
+}
+
+func setUnderlying(t, underlying *types.Type) {
+ if underlying.Etype == TFORW {
+ // This type isn't computed yet; when it is, update n.
+ underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t)
+ return
+ }
+
+ n := asNode(t.Nod)
+ ft := t.ForwardType()
+ cache := t.Cache
+
+ // TODO(mdempsky): Fix Type rekinding.
+ *t = *underlying
+
+ // Restore unnecessarily clobbered attributes.
+ t.Nod = asTypesNode(n)
+ t.Sym = n.Sym
+ if n.Name != nil {
+ t.Vargen = n.Name.Vargen
+ }
+ t.Cache = cache
+ t.SetDeferwidth(false)
+
+ // spec: "The declared type does not inherit any methods bound
+ // to the existing type, but the method set of an interface
+ // type [...] remains unchanged."
+ if !t.IsInterface() {
+ *t.Methods() = types.Fields{}
+ *t.AllMethods() = types.Fields{}
+ }
+
+ // Propagate go:notinheap pragma from the Name to the Type.
+ if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma()&NotInHeap != 0 {
+ t.SetNotInHeap(true)
+ }
+
+ // Update types waiting on this type.
+ for _, w := range ft.Copyto {
+ setUnderlying(w, t)
+ }
+
+ // Double-check use of type as embedded type.
+ if ft.Embedlineno.IsKnown() {
+ if t.IsPtr() || t.IsUnsafePtr() {
+ yyerrorl(ft.Embedlineno, "embedded type cannot be a pointer")
+ }
+ }
+}
+
+func typecheckdeftype(n *Node) {
+ if enableTrace && trace {
+ defer tracePrint("typecheckdeftype", n)(nil)
+ }
+
+ n.SetTypecheck(1)
+ n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
+ t := n.Name.Param.Ntype.Type
+ if t == nil {
+ n.SetDiag(true)
+ n.Type = nil
+ } else if n.Type == nil {
+ n.SetDiag(true)
+ } else {
+ // copy new type and clear fields
+ // that don't come along.
+ setUnderlying(n.Type, t)
+ }
+}
+
+func typecheckdef(n *Node) {
+ if enableTrace && trace {
+ defer tracePrint("typecheckdef", n)(nil)
+ }
+
+ lno := setlineno(n)
+
+ if n.Op == ONONAME {
+ if !n.Diag() {
+ n.SetDiag(true)
+
+ // Note: adderrorname looks for this string and
+ // adds context about the outer expression
+ yyerrorl(lineno, "undefined: %v", n.Sym)
+ }
+ lineno = lno
+ return
+ }
+
+ if n.Walkdef() == 1 {
+ lineno = lno
+ return
+ }
+
+ typecheckdefstack = append(typecheckdefstack, n)
+ if n.Walkdef() == 2 {
+ flusherrors()
+ fmt.Printf("typecheckdef loop:")
+ for i := len(typecheckdefstack) - 1; i >= 0; i-- {
+ n := typecheckdefstack[i]
+ fmt.Printf(" %v", n.Sym)
+ }
+ fmt.Printf("\n")
+ Fatalf("typecheckdef loop")
+ }
+
+ n.SetWalkdef(2)
+
+ if n.Type != nil || n.Sym == nil { // builtin or no name
+ goto ret
+ }
+
+ switch n.Op {
+ default:
+ Fatalf("typecheckdef %v", n.Op)
+
+ case OLITERAL:
+ if n.Name.Param.Ntype != nil {
+ n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
+ n.Type = n.Name.Param.Ntype.Type
+ n.Name.Param.Ntype = nil
+ if n.Type == nil {
+ n.SetDiag(true)
+ goto ret
+ }
+ }
+
+ e := n.Name.Defn
+ n.Name.Defn = nil
+ if e == nil {
+ Dump("typecheckdef nil defn", n)
+ yyerrorl(n.Pos, "xxx")
+ }
+
+ e = typecheck(e, ctxExpr)
+ if e.Type == nil {
+ goto ret
+ }
+ if !e.isGoConst() {
+ if !e.Diag() {
+ if Isconst(e, CTNIL) {
+ yyerrorl(n.Pos, "const initializer cannot be nil")
+ } else {
+ yyerrorl(n.Pos, "const initializer %v is not a constant", e)
+ }
+ e.SetDiag(true)
+ }
+ goto ret
+ }
+
+ t := n.Type
+ if t != nil {
+ if !okforconst[t.Etype] {
+ yyerrorl(n.Pos, "invalid constant type %v", t)
+ goto ret
+ }
+
+ if !e.Type.IsUntyped() && !types.Identical(t, e.Type) {
+ yyerrorl(n.Pos, "cannot use %L as type %v in const initializer", e, t)
+ goto ret
+ }
+
+ e = convlit(e, t)
+ }
+
+ n.SetVal(e.Val())
+ n.Type = e.Type
+
+ case ONAME:
+ if n.Name.Param.Ntype != nil {
+ n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
+ n.Type = n.Name.Param.Ntype.Type
+ if n.Type == nil {
+ n.SetDiag(true)
+ goto ret
+ }
+ }
+
+ if n.Type != nil {
+ break
+ }
+ if n.Name.Defn == nil {
+ if n.SubOp() != 0 { // like OPRINTN
+ break
+ }
+ if nsavederrors+nerrors > 0 {
+ // Can have undefined variables in x := foo
+ // that make x have an n.name.Defn == nil.
+ // If there are other errors anyway, don't
+ // bother adding to the noise.
+ break
+ }
+
+ Fatalf("var without type, init: %v", n.Sym)
+ }
+
+ if n.Name.Defn.Op == ONAME {
+ n.Name.Defn = typecheck(n.Name.Defn, ctxExpr)
+ n.Type = n.Name.Defn.Type
+ break
+ }
+
+ n.Name.Defn = typecheck(n.Name.Defn, ctxStmt) // fills in n.Type
+
+ case OTYPE:
+ if p := n.Name.Param; p.Alias() {
+ // Type alias declaration: Simply use the rhs type - no need
+ // to create a new type.
+ // If we have a syntax error, p.Ntype may be nil.
+ if p.Ntype != nil {
+ p.Ntype = typecheck(p.Ntype, ctxType)
+ n.Type = p.Ntype.Type
+ if n.Type == nil {
+ n.SetDiag(true)
+ goto ret
+ }
+ // For package-level type aliases, set n.Sym.Def so we can identify
+ // it as a type alias during export. See also #31959.
+ if n.Name.Curfn == nil {
+ n.Sym.Def = asTypesNode(p.Ntype)
+ }
+ }
+ break
+ }
+
+ // regular type declaration
+ defercheckwidth()
+ n.SetWalkdef(1)
+ setTypeNode(n, types.New(TFORW))
+ n.Type.Sym = n.Sym
+ nerrors0 := nerrors
+ typecheckdeftype(n)
+ if n.Type.Etype == TFORW && nerrors > nerrors0 {
+ // Something went wrong during type-checking,
+ // but it was reported. Silence future errors.
+ n.Type.SetBroke(true)
+ }
+ resumecheckwidth()
+ }
+
+ret:
+ if n.Op != OLITERAL && n.Type != nil && n.Type.IsUntyped() {
+ Fatalf("got %v for %v", n.Type, n)
+ }
+ last := len(typecheckdefstack) - 1
+ if typecheckdefstack[last] != n {
+ Fatalf("typecheckdefstack mismatch")
+ }
+ typecheckdefstack[last] = nil
+ typecheckdefstack = typecheckdefstack[:last]
+
+ lineno = lno
+ n.SetWalkdef(1)
+}
+
+func checkmake(t *types.Type, arg string, np **Node) bool {
+ n := *np
+ if !n.Type.IsInteger() && n.Type.Etype != TIDEAL {
+ yyerror("non-integer %s argument in make(%v) - %v", arg, t, n.Type)
+ return false
+ }
+
+ // Do range checks for constants before defaultlit
+ // to avoid redundant "constant NNN overflows int" errors.
+ switch consttype(n) {
+ case CTINT, CTRUNE, CTFLT, CTCPLX:
+ v := toint(n.Val()).U.(*Mpint)
+ if v.CmpInt64(0) < 0 {
+ yyerror("negative %s argument in make(%v)", arg, t)
+ return false
+ }
+ if v.Cmp(maxintval[TINT]) > 0 {
+ yyerror("%s argument too large in make(%v)", arg, t)
+ return false
+ }
+ }
+
+ // defaultlit is necessary for non-constants too: n might be 1.1<<k.
+ // TODO(gri) The length argument requirements for (array/slice) make
+ // are the same as for index expressions. Factor the code better;
+ // for instance, indexlit might be called here and incorporate some
+ // of the bounds checks done for make.
+ n = defaultlit(n, types.Types[TINT])
+ *np = n
+
+ return true
+}
+
+func markbreak(n *Node, implicit *Node) {
+ if n == nil {
+ return
+ }
+
+ switch n.Op {
+ case OBREAK:
+ if n.Sym == nil {
+ if implicit != nil {
+ implicit.SetHasBreak(true)
+ }
+ } else {
+ lab := asNode(n.Sym.Label)
+ if lab != nil {
+ lab.SetHasBreak(true)
+ }
+ }
+ case OFOR, OFORUNTIL, OSWITCH, OTYPESW, OSELECT, ORANGE:
+ implicit = n
+ fallthrough
+ default:
+ markbreak(n.Left, implicit)
+ markbreak(n.Right, implicit)
+ markbreaklist(n.Ninit, implicit)
+ markbreaklist(n.Nbody, implicit)
+ markbreaklist(n.List, implicit)
+ markbreaklist(n.Rlist, implicit)
+ }
+}
+
+func markbreaklist(l Nodes, implicit *Node) {
+ s := l.Slice()
+ for i := 0; i < len(s); i++ {
+ n := s[i]
+ if n == nil {
+ continue
+ }
+ if n.Op == OLABEL && i+1 < len(s) && n.Name.Defn == s[i+1] {
+ switch n.Name.Defn.Op {
+ case OFOR, OFORUNTIL, OSWITCH, OTYPESW, OSELECT, ORANGE:
+ n.Sym.Label = asTypesNode(n.Name.Defn)
+ markbreak(n.Name.Defn, n.Name.Defn)
+ n.Sym.Label = nil
+ i++
+ continue
+ }
+ }
+
+ markbreak(n, implicit)
+ }
+}
+
+// isterminating reports whether the Nodes list ends with a terminating statement.
+func (l Nodes) isterminating() bool {
+ s := l.Slice()
+ c := len(s)
+ if c == 0 {
+ return false
+ }
+ return s[c-1].isterminating()
+}
+
+// Isterminating reports whether the node n, the last one in a
+// statement list, is a terminating statement.
+func (n *Node) isterminating() bool {
+ switch n.Op {
+ // NOTE: OLABEL is treated as a separate statement,
+ // not a separate prefix, so skipping to the last statement
+ // in the block handles the labeled statement case by
+ // skipping over the label. No case OLABEL here.
+
+ case OBLOCK:
+ return n.List.isterminating()
+
+ case OGOTO, ORETURN, ORETJMP, OPANIC, OFALL:
+ return true
+
+ case OFOR, OFORUNTIL:
+ if n.Left != nil {
+ return false
+ }
+ if n.HasBreak() {
+ return false
+ }
+ return true
+
+ case OIF:
+ return n.Nbody.isterminating() && n.Rlist.isterminating()
+
+ case OSWITCH, OTYPESW, OSELECT:
+ if n.HasBreak() {
+ return false
+ }
+ def := false
+ for _, n1 := range n.List.Slice() {
+ if !n1.Nbody.isterminating() {
+ return false
+ }
+ if n1.List.Len() == 0 { // default
+ def = true
+ }
+ }
+
+ if n.Op != OSELECT && !def {
+ return false
+ }
+ return true
+ }
+
+ return false
+}
+
+// checkreturn makes sure that fn terminates appropriately.
+func checkreturn(fn *Node) {
+ if fn.Type.NumResults() != 0 && fn.Nbody.Len() != 0 {
+ markbreaklist(fn.Nbody, nil)
+ if !fn.Nbody.isterminating() {
+ yyerrorl(fn.Func.Endlineno, "missing return at end of function")
+ }
+ }
+}
+
+func deadcode(fn *Node) {
+ deadcodeslice(fn.Nbody)
+ deadcodefn(fn)
+}
+
+func deadcodefn(fn *Node) {
+ if fn.Nbody.Len() == 0 {
+ return
+ }
+
+ for _, n := range fn.Nbody.Slice() {
+ if n.Ninit.Len() > 0 {
+ return
+ }
+ switch n.Op {
+ case OIF:
+ if !Isconst(n.Left, CTBOOL) || n.Nbody.Len() > 0 || n.Rlist.Len() > 0 {
+ return
+ }
+ case OFOR:
+ if !Isconst(n.Left, CTBOOL) || n.Left.BoolVal() {
+ return
+ }
+ default:
+ return
+ }
+ }
+
+ fn.Nbody.Set([]*Node{nod(OEMPTY, nil, nil)})
+}
+
+func deadcodeslice(nn Nodes) {
+ var lastLabel = -1
+ for i, n := range nn.Slice() {
+ if n != nil && n.Op == OLABEL {
+ lastLabel = i
+ }
+ }
+ for i, n := range nn.Slice() {
+ // Cut is set to true when all nodes after i'th position
+ // should be removed.
+ // In other words, it marks whole slice "tail" as dead.
+ cut := false
+ if n == nil {
+ continue
+ }
+ if n.Op == OIF {
+ n.Left = deadcodeexpr(n.Left)
+ if Isconst(n.Left, CTBOOL) {
+ var body Nodes
+ if n.Left.BoolVal() {
+ n.Rlist = Nodes{}
+ body = n.Nbody
+ } else {
+ n.Nbody = Nodes{}
+ body = n.Rlist
+ }
+ // If "then" or "else" branch ends with panic or return statement,
+ // it is safe to remove all statements after this node.
+ // isterminating is not used to avoid goto-related complications.
+ // We must be careful not to deadcode-remove labels, as they
+ // might be the target of a goto. See issue 28616.
+ if body := body.Slice(); len(body) != 0 {
+ switch body[(len(body) - 1)].Op {
+ case ORETURN, ORETJMP, OPANIC:
+ if i > lastLabel {
+ cut = true
+ }
+ }
+ }
+ }
+ }
+
+ deadcodeslice(n.Ninit)
+ deadcodeslice(n.Nbody)
+ deadcodeslice(n.List)
+ deadcodeslice(n.Rlist)
+ if cut {
+ *nn.slice = nn.Slice()[:i+1]
+ break
+ }
+ }
+}
+
+func deadcodeexpr(n *Node) *Node {
+ // Perform dead-code elimination on short-circuited boolean
+ // expressions involving constants with the intent of
+ // producing a constant 'if' condition.
+ switch n.Op {
+ case OANDAND:
+ n.Left = deadcodeexpr(n.Left)
+ n.Right = deadcodeexpr(n.Right)
+ if Isconst(n.Left, CTBOOL) {
+ if n.Left.BoolVal() {
+ return n.Right // true && x => x
+ } else {
+ return n.Left // false && x => false
+ }
+ }
+ case OOROR:
+ n.Left = deadcodeexpr(n.Left)
+ n.Right = deadcodeexpr(n.Right)
+ if Isconst(n.Left, CTBOOL) {
+ if n.Left.BoolVal() {
+ return n.Left // true || x => true
+ } else {
+ return n.Right // false || x => x
+ }
+ }
+ }
+ return n
+}
+
+// setTypeNode sets n to an OTYPE node representing t.
+func setTypeNode(n *Node, t *types.Type) {
+ n.Op = OTYPE
+ n.Type = t
+ n.Type.Nod = asTypesNode(n)
+}
+
+// getIotaValue returns the current value for "iota",
+// or -1 if not within a ConstSpec.
+func getIotaValue() int64 {
+ if i := len(typecheckdefstack); i > 0 {
+ if x := typecheckdefstack[i-1]; x.Op == OLITERAL {
+ return x.Iota()
+ }
+ }
+
+ if Curfn != nil && Curfn.Iota() >= 0 {
+ return Curfn.Iota()
+ }
+
+ return -1
+}
+
+// curpkg returns the current package, based on Curfn.
+func curpkg() *types.Pkg {
+ fn := Curfn
+ if fn == nil {
+ // Initialization expressions for package-scope variables.
+ return localpkg
+ }
+
+ // TODO(mdempsky): Standardize on either ODCLFUNC or ONAME for
+ // Curfn, rather than mixing them.
+ if fn.Op == ODCLFUNC {
+ fn = fn.Func.Nname
+ }
+
+ return fnpkg(fn)
+}
diff --git a/src/cmd/compile/internal/gc/types.go b/src/cmd/compile/internal/gc/types.go
new file mode 100644
index 0000000..748f845
--- /dev/null
+++ b/src/cmd/compile/internal/gc/types.go
@@ -0,0 +1,58 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+)
+
+// convenience constants
+const (
+ Txxx = types.Txxx
+
+ TINT8 = types.TINT8
+ TUINT8 = types.TUINT8
+ TINT16 = types.TINT16
+ TUINT16 = types.TUINT16
+ TINT32 = types.TINT32
+ TUINT32 = types.TUINT32
+ TINT64 = types.TINT64
+ TUINT64 = types.TUINT64
+ TINT = types.TINT
+ TUINT = types.TUINT
+ TUINTPTR = types.TUINTPTR
+
+ TCOMPLEX64 = types.TCOMPLEX64
+ TCOMPLEX128 = types.TCOMPLEX128
+
+ TFLOAT32 = types.TFLOAT32
+ TFLOAT64 = types.TFLOAT64
+
+ TBOOL = types.TBOOL
+
+ TPTR = types.TPTR
+ TFUNC = types.TFUNC
+ TSLICE = types.TSLICE
+ TARRAY = types.TARRAY
+ TSTRUCT = types.TSTRUCT
+ TCHAN = types.TCHAN
+ TMAP = types.TMAP
+ TINTER = types.TINTER
+ TFORW = types.TFORW
+ TANY = types.TANY
+ TSTRING = types.TSTRING
+ TUNSAFEPTR = types.TUNSAFEPTR
+
+ // pseudo-types for literals
+ TIDEAL = types.TIDEAL
+ TNIL = types.TNIL
+ TBLANK = types.TBLANK
+
+ // pseudo-types for frame layout
+ TFUNCARGS = types.TFUNCARGS
+ TCHANARGS = types.TCHANARGS
+
+ NTYPE = types.NTYPE
+)
diff --git a/src/cmd/compile/internal/gc/types_acc.go b/src/cmd/compile/internal/gc/types_acc.go
new file mode 100644
index 0000000..7240f72
--- /dev/null
+++ b/src/cmd/compile/internal/gc/types_acc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements convertions between *types.Node and *Node.
+// TODO(gri) try to eliminate these soon
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "unsafe"
+)
+
+func asNode(n *types.Node) *Node { return (*Node)(unsafe.Pointer(n)) }
+func asTypesNode(n *Node) *types.Node { return (*types.Node)(unsafe.Pointer(n)) }
diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go
new file mode 100644
index 0000000..ff8cabd
--- /dev/null
+++ b/src/cmd/compile/internal/gc/universe.go
@@ -0,0 +1,453 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(gri) This file should probably become part of package types.
+
+package gc
+
+import "cmd/compile/internal/types"
+
+// builtinpkg is a fake package that declares the universe block.
+var builtinpkg *types.Pkg
+
+var basicTypes = [...]struct {
+ name string
+ etype types.EType
+}{
+ {"int8", TINT8},
+ {"int16", TINT16},
+ {"int32", TINT32},
+ {"int64", TINT64},
+ {"uint8", TUINT8},
+ {"uint16", TUINT16},
+ {"uint32", TUINT32},
+ {"uint64", TUINT64},
+ {"float32", TFLOAT32},
+ {"float64", TFLOAT64},
+ {"complex64", TCOMPLEX64},
+ {"complex128", TCOMPLEX128},
+ {"bool", TBOOL},
+ {"string", TSTRING},
+}
+
+var typedefs = [...]struct {
+ name string
+ etype types.EType
+ sameas32 types.EType
+ sameas64 types.EType
+}{
+ {"int", TINT, TINT32, TINT64},
+ {"uint", TUINT, TUINT32, TUINT64},
+ {"uintptr", TUINTPTR, TUINT32, TUINT64},
+}
+
+var builtinFuncs = [...]struct {
+ name string
+ op Op
+}{
+ {"append", OAPPEND},
+ {"cap", OCAP},
+ {"close", OCLOSE},
+ {"complex", OCOMPLEX},
+ {"copy", OCOPY},
+ {"delete", ODELETE},
+ {"imag", OIMAG},
+ {"len", OLEN},
+ {"make", OMAKE},
+ {"new", ONEW},
+ {"panic", OPANIC},
+ {"print", OPRINT},
+ {"println", OPRINTN},
+ {"real", OREAL},
+ {"recover", ORECOVER},
+}
+
+// isBuiltinFuncName reports whether name matches a builtin function
+// name.
+func isBuiltinFuncName(name string) bool {
+ for _, fn := range &builtinFuncs {
+ if fn.name == name {
+ return true
+ }
+ }
+ return false
+}
+
+var unsafeFuncs = [...]struct {
+ name string
+ op Op
+}{
+ {"Alignof", OALIGNOF},
+ {"Offsetof", OOFFSETOF},
+ {"Sizeof", OSIZEOF},
+}
+
+// initUniverse initializes the universe block.
+func initUniverse() {
+ lexinit()
+ typeinit()
+ lexinit1()
+}
+
+// lexinit initializes known symbols and the basic types.
+func lexinit() {
+ for _, s := range &basicTypes {
+ etype := s.etype
+ if int(etype) >= len(types.Types) {
+ Fatalf("lexinit: %s bad etype", s.name)
+ }
+ s2 := builtinpkg.Lookup(s.name)
+ t := types.Types[etype]
+ if t == nil {
+ t = types.New(etype)
+ t.Sym = s2
+ if etype != TANY && etype != TSTRING {
+ dowidth(t)
+ }
+ types.Types[etype] = t
+ }
+ s2.Def = asTypesNode(typenod(t))
+ asNode(s2.Def).Name = new(Name)
+ }
+
+ for _, s := range &builtinFuncs {
+ s2 := builtinpkg.Lookup(s.name)
+ s2.Def = asTypesNode(newname(s2))
+ asNode(s2.Def).SetSubOp(s.op)
+ }
+
+ for _, s := range &unsafeFuncs {
+ s2 := unsafepkg.Lookup(s.name)
+ s2.Def = asTypesNode(newname(s2))
+ asNode(s2.Def).SetSubOp(s.op)
+ }
+
+ types.UntypedString = types.New(TSTRING)
+ types.UntypedBool = types.New(TBOOL)
+ types.Types[TANY] = types.New(TANY)
+
+ s := builtinpkg.Lookup("true")
+ s.Def = asTypesNode(nodbool(true))
+ asNode(s.Def).Sym = lookup("true")
+ asNode(s.Def).Name = new(Name)
+ asNode(s.Def).Type = types.UntypedBool
+
+ s = builtinpkg.Lookup("false")
+ s.Def = asTypesNode(nodbool(false))
+ asNode(s.Def).Sym = lookup("false")
+ asNode(s.Def).Name = new(Name)
+ asNode(s.Def).Type = types.UntypedBool
+
+ s = lookup("_")
+ s.Block = -100
+ s.Def = asTypesNode(newname(s))
+ types.Types[TBLANK] = types.New(TBLANK)
+ asNode(s.Def).Type = types.Types[TBLANK]
+ nblank = asNode(s.Def)
+
+ s = builtinpkg.Lookup("_")
+ s.Block = -100
+ s.Def = asTypesNode(newname(s))
+ types.Types[TBLANK] = types.New(TBLANK)
+ asNode(s.Def).Type = types.Types[TBLANK]
+
+ types.Types[TNIL] = types.New(TNIL)
+ s = builtinpkg.Lookup("nil")
+ var v Val
+ v.U = new(NilVal)
+ s.Def = asTypesNode(nodlit(v))
+ asNode(s.Def).Sym = s
+ asNode(s.Def).Name = new(Name)
+
+ s = builtinpkg.Lookup("iota")
+ s.Def = asTypesNode(nod(OIOTA, nil, nil))
+ asNode(s.Def).Sym = s
+ asNode(s.Def).Name = new(Name)
+}
+
+func typeinit() {
+ if Widthptr == 0 {
+ Fatalf("typeinit before betypeinit")
+ }
+
+ for et := types.EType(0); et < NTYPE; et++ {
+ simtype[et] = et
+ }
+
+ types.Types[TPTR] = types.New(TPTR)
+ dowidth(types.Types[TPTR])
+
+ t := types.New(TUNSAFEPTR)
+ types.Types[TUNSAFEPTR] = t
+ t.Sym = unsafepkg.Lookup("Pointer")
+ t.Sym.Def = asTypesNode(typenod(t))
+ asNode(t.Sym.Def).Name = new(Name)
+ dowidth(types.Types[TUNSAFEPTR])
+
+ for et := TINT8; et <= TUINT64; et++ {
+ isInt[et] = true
+ }
+ isInt[TINT] = true
+ isInt[TUINT] = true
+ isInt[TUINTPTR] = true
+
+ isFloat[TFLOAT32] = true
+ isFloat[TFLOAT64] = true
+
+ isComplex[TCOMPLEX64] = true
+ isComplex[TCOMPLEX128] = true
+
+ // initialize okfor
+ for et := types.EType(0); et < NTYPE; et++ {
+ if isInt[et] || et == TIDEAL {
+ okforeq[et] = true
+ okforcmp[et] = true
+ okforarith[et] = true
+ okforadd[et] = true
+ okforand[et] = true
+ okforconst[et] = true
+ issimple[et] = true
+ minintval[et] = new(Mpint)
+ maxintval[et] = new(Mpint)
+ }
+
+ if isFloat[et] {
+ okforeq[et] = true
+ okforcmp[et] = true
+ okforadd[et] = true
+ okforarith[et] = true
+ okforconst[et] = true
+ issimple[et] = true
+ minfltval[et] = newMpflt()
+ maxfltval[et] = newMpflt()
+ }
+
+ if isComplex[et] {
+ okforeq[et] = true
+ okforadd[et] = true
+ okforarith[et] = true
+ okforconst[et] = true
+ issimple[et] = true
+ }
+ }
+
+ issimple[TBOOL] = true
+
+ okforadd[TSTRING] = true
+
+ okforbool[TBOOL] = true
+
+ okforcap[TARRAY] = true
+ okforcap[TCHAN] = true
+ okforcap[TSLICE] = true
+
+ okforconst[TBOOL] = true
+ okforconst[TSTRING] = true
+
+ okforlen[TARRAY] = true
+ okforlen[TCHAN] = true
+ okforlen[TMAP] = true
+ okforlen[TSLICE] = true
+ okforlen[TSTRING] = true
+
+ okforeq[TPTR] = true
+ okforeq[TUNSAFEPTR] = true
+ okforeq[TINTER] = true
+ okforeq[TCHAN] = true
+ okforeq[TSTRING] = true
+ okforeq[TBOOL] = true
+ okforeq[TMAP] = true // nil only; refined in typecheck
+ okforeq[TFUNC] = true // nil only; refined in typecheck
+ okforeq[TSLICE] = true // nil only; refined in typecheck
+ okforeq[TARRAY] = true // only if element type is comparable; refined in typecheck
+ okforeq[TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
+
+ okforcmp[TSTRING] = true
+
+ var i int
+ for i = 0; i < len(okfor); i++ {
+ okfor[i] = okfornone[:]
+ }
+
+ // binary
+ okfor[OADD] = okforadd[:]
+ okfor[OAND] = okforand[:]
+ okfor[OANDAND] = okforbool[:]
+ okfor[OANDNOT] = okforand[:]
+ okfor[ODIV] = okforarith[:]
+ okfor[OEQ] = okforeq[:]
+ okfor[OGE] = okforcmp[:]
+ okfor[OGT] = okforcmp[:]
+ okfor[OLE] = okforcmp[:]
+ okfor[OLT] = okforcmp[:]
+ okfor[OMOD] = okforand[:]
+ okfor[OMUL] = okforarith[:]
+ okfor[ONE] = okforeq[:]
+ okfor[OOR] = okforand[:]
+ okfor[OOROR] = okforbool[:]
+ okfor[OSUB] = okforarith[:]
+ okfor[OXOR] = okforand[:]
+ okfor[OLSH] = okforand[:]
+ okfor[ORSH] = okforand[:]
+
+ // unary
+ okfor[OBITNOT] = okforand[:]
+ okfor[ONEG] = okforarith[:]
+ okfor[ONOT] = okforbool[:]
+ okfor[OPLUS] = okforarith[:]
+
+ // special
+ okfor[OCAP] = okforcap[:]
+ okfor[OLEN] = okforlen[:]
+
+ // comparison
+ iscmp[OLT] = true
+ iscmp[OGT] = true
+ iscmp[OGE] = true
+ iscmp[OLE] = true
+ iscmp[OEQ] = true
+ iscmp[ONE] = true
+
+ maxintval[TINT8].SetString("0x7f")
+ minintval[TINT8].SetString("-0x80")
+ maxintval[TINT16].SetString("0x7fff")
+ minintval[TINT16].SetString("-0x8000")
+ maxintval[TINT32].SetString("0x7fffffff")
+ minintval[TINT32].SetString("-0x80000000")
+ maxintval[TINT64].SetString("0x7fffffffffffffff")
+ minintval[TINT64].SetString("-0x8000000000000000")
+
+ maxintval[TUINT8].SetString("0xff")
+ maxintval[TUINT16].SetString("0xffff")
+ maxintval[TUINT32].SetString("0xffffffff")
+ maxintval[TUINT64].SetString("0xffffffffffffffff")
+
+ // f is valid float if min < f < max. (min and max are not themselves valid.)
+ maxfltval[TFLOAT32].SetString("33554431p103") // 2^24-1 p (127-23) + 1/2 ulp
+ minfltval[TFLOAT32].SetString("-33554431p103")
+ maxfltval[TFLOAT64].SetString("18014398509481983p970") // 2^53-1 p (1023-52) + 1/2 ulp
+ minfltval[TFLOAT64].SetString("-18014398509481983p970")
+
+ maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
+ minfltval[TCOMPLEX64] = minfltval[TFLOAT32]
+ maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64]
+ minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
+
+ types.Types[TINTER] = types.New(TINTER) // empty interface
+
+ // simple aliases
+ simtype[TMAP] = TPTR
+ simtype[TCHAN] = TPTR
+ simtype[TFUNC] = TPTR
+ simtype[TUNSAFEPTR] = TPTR
+
+ slicePtrOffset = 0
+ sliceLenOffset = Rnd(slicePtrOffset+int64(Widthptr), int64(Widthptr))
+ sliceCapOffset = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
+ sizeofSlice = Rnd(sliceCapOffset+int64(Widthptr), int64(Widthptr))
+
+ // string is same as slice wo the cap
+ sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
+
+ dowidth(types.Types[TSTRING])
+ dowidth(types.UntypedString)
+}
+
+func makeErrorInterface() *types.Type {
+ field := types.NewField()
+ field.Type = types.Types[TSTRING]
+ f := functypefield(fakeRecvField(), nil, []*types.Field{field})
+
+ field = types.NewField()
+ field.Sym = lookup("Error")
+ field.Type = f
+
+ t := types.New(TINTER)
+ t.SetInterface([]*types.Field{field})
+ return t
+}
+
+func lexinit1() {
+ // error type
+ s := builtinpkg.Lookup("error")
+ types.Errortype = makeErrorInterface()
+ types.Errortype.Sym = s
+ types.Errortype.Orig = makeErrorInterface()
+ s.Def = asTypesNode(typenod(types.Errortype))
+ dowidth(types.Errortype)
+
+ // We create separate byte and rune types for better error messages
+ // rather than just creating type alias *types.Sym's for the uint8 and
+ // int32 types. Hence, (bytetype|runtype).Sym.isAlias() is false.
+ // TODO(gri) Should we get rid of this special case (at the cost
+ // of less informative error messages involving bytes and runes)?
+ // (Alternatively, we could introduce an OTALIAS node representing
+ // type aliases, albeit at the cost of having to deal with it everywhere).
+
+ // byte alias
+ s = builtinpkg.Lookup("byte")
+ types.Bytetype = types.New(TUINT8)
+ types.Bytetype.Sym = s
+ s.Def = asTypesNode(typenod(types.Bytetype))
+ asNode(s.Def).Name = new(Name)
+ dowidth(types.Bytetype)
+
+ // rune alias
+ s = builtinpkg.Lookup("rune")
+ types.Runetype = types.New(TINT32)
+ types.Runetype.Sym = s
+ s.Def = asTypesNode(typenod(types.Runetype))
+ asNode(s.Def).Name = new(Name)
+ dowidth(types.Runetype)
+
+ // backend-dependent builtin types (e.g. int).
+ for _, s := range &typedefs {
+ s1 := builtinpkg.Lookup(s.name)
+
+ sameas := s.sameas32
+ if Widthptr == 8 {
+ sameas = s.sameas64
+ }
+
+ simtype[s.etype] = sameas
+ minfltval[s.etype] = minfltval[sameas]
+ maxfltval[s.etype] = maxfltval[sameas]
+ minintval[s.etype] = minintval[sameas]
+ maxintval[s.etype] = maxintval[sameas]
+
+ t := types.New(s.etype)
+ t.Sym = s1
+ types.Types[s.etype] = t
+ s1.Def = asTypesNode(typenod(t))
+ asNode(s1.Def).Name = new(Name)
+ s1.Origpkg = builtinpkg
+
+ dowidth(t)
+ }
+}
+
+// finishUniverse makes the universe block visible within the current package.
+func finishUniverse() {
+ // Operationally, this is similar to a dot import of builtinpkg, except
+ // that we silently skip symbols that are already declared in the
+ // package block rather than emitting a redeclared symbol error.
+
+ for _, s := range builtinpkg.Syms {
+ if s.Def == nil {
+ continue
+ }
+ s1 := lookup(s.Name)
+ if s1.Def != nil {
+ continue
+ }
+
+ s1.Def = s.Def
+ s1.Block = s.Block
+ }
+
+ nodfp = newname(lookup(".fp"))
+ nodfp.Type = types.Types[TINT32]
+ nodfp.SetClass(PPARAM)
+ nodfp.Name.SetUsed(true)
+}
diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go
new file mode 100644
index 0000000..2233961
--- /dev/null
+++ b/src/cmd/compile/internal/gc/unsafe.go
@@ -0,0 +1,76 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+// evalunsafe evaluates a package unsafe operation and returns the result.
+func evalunsafe(n *Node) int64 {
+ switch n.Op {
+ case OALIGNOF, OSIZEOF:
+ n.Left = typecheck(n.Left, ctxExpr)
+ n.Left = defaultlit(n.Left, nil)
+ tr := n.Left.Type
+ if tr == nil {
+ return 0
+ }
+ dowidth(tr)
+ if n.Op == OALIGNOF {
+ return int64(tr.Align)
+ }
+ return tr.Width
+
+ case OOFFSETOF:
+ // must be a selector.
+ if n.Left.Op != OXDOT {
+ yyerror("invalid expression %v", n)
+ return 0
+ }
+
+ // Remember base of selector to find it back after dot insertion.
+ // Since r->left may be mutated by typechecking, check it explicitly
+ // first to track it correctly.
+ n.Left.Left = typecheck(n.Left.Left, ctxExpr)
+ base := n.Left.Left
+
+ n.Left = typecheck(n.Left, ctxExpr)
+ if n.Left.Type == nil {
+ return 0
+ }
+ switch n.Left.Op {
+ case ODOT, ODOTPTR:
+ break
+ case OCALLPART:
+ yyerror("invalid expression %v: argument is a method value", n)
+ return 0
+ default:
+ yyerror("invalid expression %v", n)
+ return 0
+ }
+
+ // Sum offsets for dots until we reach base.
+ var v int64
+ for r := n.Left; r != base; r = r.Left {
+ switch r.Op {
+ case ODOTPTR:
+ // For Offsetof(s.f), s may itself be a pointer,
+ // but accessing f must not otherwise involve
+ // indirection via embedded pointer types.
+ if r.Left != base {
+ yyerror("invalid expression %v: selector implies indirection of embedded %v", n, r.Left)
+ return 0
+ }
+ fallthrough
+ case ODOT:
+ v += r.Xoffset
+ default:
+ Dump("unsafenmagic", n.Left)
+ Fatalf("impossible %#v node after dot insertion", r.Op)
+ }
+ }
+ return v
+ }
+
+ Fatalf("unexpected op %v", n.Op)
+ return 0
+}
diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go
new file mode 100644
index 0000000..58be2f8
--- /dev/null
+++ b/src/cmd/compile/internal/gc/util.go
@@ -0,0 +1,103 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+)
+
+// Line returns n's position as a string. If n has been inlined,
+// it uses the outermost position where n has been inlined.
+func (n *Node) Line() string {
+ return linestr(n.Pos)
+}
+
+var atExitFuncs []func()
+
+func atExit(f func()) {
+ atExitFuncs = append(atExitFuncs, f)
+}
+
+func Exit(code int) {
+ for i := len(atExitFuncs) - 1; i >= 0; i-- {
+ f := atExitFuncs[i]
+ atExitFuncs = atExitFuncs[:i]
+ f()
+ }
+ os.Exit(code)
+}
+
+var (
+ blockprofile string
+ cpuprofile string
+ memprofile string
+ memprofilerate int64
+ traceprofile string
+ traceHandler func(string)
+ mutexprofile string
+)
+
+func startProfile() {
+ if cpuprofile != "" {
+ f, err := os.Create(cpuprofile)
+ if err != nil {
+ Fatalf("%v", err)
+ }
+ if err := pprof.StartCPUProfile(f); err != nil {
+ Fatalf("%v", err)
+ }
+ atExit(pprof.StopCPUProfile)
+ }
+ if memprofile != "" {
+ if memprofilerate != 0 {
+ runtime.MemProfileRate = int(memprofilerate)
+ }
+ f, err := os.Create(memprofile)
+ if err != nil {
+ Fatalf("%v", err)
+ }
+ atExit(func() {
+ // Profile all outstanding allocations.
+ runtime.GC()
+ // compilebench parses the memory profile to extract memstats,
+ // which are only written in the legacy pprof format.
+ // See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
+ const writeLegacyFormat = 1
+ if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil {
+ Fatalf("%v", err)
+ }
+ })
+ } else {
+ // Not doing memory profiling; disable it entirely.
+ runtime.MemProfileRate = 0
+ }
+ if blockprofile != "" {
+ f, err := os.Create(blockprofile)
+ if err != nil {
+ Fatalf("%v", err)
+ }
+ runtime.SetBlockProfileRate(1)
+ atExit(func() {
+ pprof.Lookup("block").WriteTo(f, 0)
+ f.Close()
+ })
+ }
+ if mutexprofile != "" {
+ f, err := os.Create(mutexprofile)
+ if err != nil {
+ Fatalf("%v", err)
+ }
+ startMutexProfiling()
+ atExit(func() {
+ pprof.Lookup("mutex").WriteTo(f, 0)
+ f.Close()
+ })
+ }
+ if traceprofile != "" && traceHandler != nil {
+ traceHandler(traceprofile)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
new file mode 100644
index 0000000..02a7269
--- /dev/null
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -0,0 +1,4125 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/sys"
+ "encoding/binary"
+ "fmt"
+ "strings"
+)
+
+// The constant is known to runtime.
+const tmpstringbufsize = 32
+const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
+
+func walk(fn *Node) {
+ Curfn = fn
+
+ if Debug.W != 0 {
+ s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym)
+ dumplist(s, Curfn.Nbody)
+ }
+
+ lno := lineno
+
+ // Final typecheck for any unused variables.
+ for i, ln := range fn.Func.Dcl {
+ if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) {
+ ln = typecheck(ln, ctxExpr|ctxAssign)
+ fn.Func.Dcl[i] = ln
+ }
+ }
+
+ // Propagate the used flag for typeswitch variables up to the NONAME in its definition.
+ for _, ln := range fn.Func.Dcl {
+ if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Name.Used() {
+ ln.Name.Defn.Left.Name.SetUsed(true)
+ }
+ }
+
+ for _, ln := range fn.Func.Dcl {
+ if ln.Op != ONAME || (ln.Class() != PAUTO && ln.Class() != PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() {
+ continue
+ }
+ if defn := ln.Name.Defn; defn != nil && defn.Op == OTYPESW {
+ if defn.Left.Name.Used() {
+ continue
+ }
+ yyerrorl(defn.Left.Pos, "%v declared but not used", ln.Sym)
+ defn.Left.Name.SetUsed(true) // suppress repeats
+ } else {
+ yyerrorl(ln.Pos, "%v declared but not used", ln.Sym)
+ }
+ }
+
+ lineno = lno
+ if nerrors != 0 {
+ return
+ }
+ walkstmtlist(Curfn.Nbody.Slice())
+ if Debug.W != 0 {
+ s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
+ dumplist(s, Curfn.Nbody)
+ }
+
+ zeroResults()
+ heapmoves()
+ if Debug.W != 0 && Curfn.Func.Enter.Len() > 0 {
+ s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
+ dumplist(s, Curfn.Func.Enter)
+ }
+}
+
+func walkstmtlist(s []*Node) {
+ for i := range s {
+ s[i] = walkstmt(s[i])
+ }
+}
+
+func paramoutheap(fn *Node) bool {
+ for _, ln := range fn.Func.Dcl {
+ switch ln.Class() {
+ case PPARAMOUT:
+ if ln.isParamStackCopy() || ln.Name.Addrtaken() {
+ return true
+ }
+
+ case PAUTO:
+ // stop early - parameters are over
+ return false
+ }
+ }
+
+ return false
+}
+
+// The result of walkstmt MUST be assigned back to n, e.g.
+// n.Left = walkstmt(n.Left)
+func walkstmt(n *Node) *Node {
+ if n == nil {
+ return n
+ }
+
+ setlineno(n)
+
+ walkstmtlist(n.Ninit.Slice())
+
+ switch n.Op {
+ default:
+ if n.Op == ONAME {
+ yyerror("%v is not a top level statement", n.Sym)
+ } else {
+ yyerror("%v is not a top level statement", n.Op)
+ }
+ Dump("nottop", n)
+
+ case OAS,
+ OASOP,
+ OAS2,
+ OAS2DOTTYPE,
+ OAS2RECV,
+ OAS2FUNC,
+ OAS2MAPR,
+ OCLOSE,
+ OCOPY,
+ OCALLMETH,
+ OCALLINTER,
+ OCALL,
+ OCALLFUNC,
+ ODELETE,
+ OSEND,
+ OPRINT,
+ OPRINTN,
+ OPANIC,
+ OEMPTY,
+ ORECOVER,
+ OGETG:
+ if n.Typecheck() == 0 {
+ Fatalf("missing typecheck: %+v", n)
+ }
+ wascopy := n.Op == OCOPY
+ init := n.Ninit
+ n.Ninit.Set(nil)
+ n = walkexpr(n, &init)
+ n = addinit(n, init.Slice())
+ if wascopy && n.Op == OCONVNOP {
+ n.Op = OEMPTY // don't leave plain values as statements.
+ }
+
+ // special case for a receive where we throw away
+ // the value received.
+ case ORECV:
+ if n.Typecheck() == 0 {
+ Fatalf("missing typecheck: %+v", n)
+ }
+ init := n.Ninit
+ n.Ninit.Set(nil)
+
+ n.Left = walkexpr(n.Left, &init)
+ n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, n.Left, nodnil())
+ n = walkexpr(n, &init)
+
+ n = addinit(n, init.Slice())
+
+ case OBREAK,
+ OCONTINUE,
+ OFALL,
+ OGOTO,
+ OLABEL,
+ ODCLCONST,
+ ODCLTYPE,
+ OCHECKNIL,
+ OVARDEF,
+ OVARKILL,
+ OVARLIVE:
+ break
+
+ case ODCL:
+ v := n.Left
+ if v.Class() == PAUTOHEAP {
+ if compiling_runtime {
+ yyerror("%v escapes to heap, not allowed in runtime", v)
+ }
+ if prealloc[v] == nil {
+ prealloc[v] = callnew(v.Type)
+ }
+ nn := nod(OAS, v.Name.Param.Heapaddr, prealloc[v])
+ nn.SetColas(true)
+ nn = typecheck(nn, ctxStmt)
+ return walkstmt(nn)
+ }
+
+ case OBLOCK:
+ walkstmtlist(n.List.Slice())
+
+ case OCASE:
+ yyerror("case statement out of place")
+
+ case ODEFER:
+ Curfn.Func.SetHasDefer(true)
+ Curfn.Func.numDefers++
+ if Curfn.Func.numDefers > maxOpenDefers {
+ // Don't allow open-coded defers if there are more than
+ // 8 defers in the function, since we use a single
+ // byte to record active defers.
+ Curfn.Func.SetOpenCodedDeferDisallowed(true)
+ }
+ if n.Esc != EscNever {
+ // If n.Esc is not EscNever, then this defer occurs in a loop,
+ // so open-coded defers cannot be used in this function.
+ Curfn.Func.SetOpenCodedDeferDisallowed(true)
+ }
+ fallthrough
+ case OGO:
+ switch n.Left.Op {
+ case OPRINT, OPRINTN:
+ n.Left = wrapCall(n.Left, &n.Ninit)
+
+ case ODELETE:
+ if mapfast(n.Left.List.First().Type) == mapslow {
+ n.Left = wrapCall(n.Left, &n.Ninit)
+ } else {
+ n.Left = walkexpr(n.Left, &n.Ninit)
+ }
+
+ case OCOPY:
+ n.Left = copyany(n.Left, &n.Ninit, true)
+
+ case OCALLFUNC, OCALLMETH, OCALLINTER:
+ if n.Left.Nbody.Len() > 0 {
+ n.Left = wrapCall(n.Left, &n.Ninit)
+ } else {
+ n.Left = walkexpr(n.Left, &n.Ninit)
+ }
+
+ default:
+ n.Left = walkexpr(n.Left, &n.Ninit)
+ }
+
+ case OFOR, OFORUNTIL:
+ if n.Left != nil {
+ walkstmtlist(n.Left.Ninit.Slice())
+ init := n.Left.Ninit
+ n.Left.Ninit.Set(nil)
+ n.Left = walkexpr(n.Left, &init)
+ n.Left = addinit(n.Left, init.Slice())
+ }
+
+ n.Right = walkstmt(n.Right)
+ if n.Op == OFORUNTIL {
+ walkstmtlist(n.List.Slice())
+ }
+ walkstmtlist(n.Nbody.Slice())
+
+ case OIF:
+ n.Left = walkexpr(n.Left, &n.Ninit)
+ walkstmtlist(n.Nbody.Slice())
+ walkstmtlist(n.Rlist.Slice())
+
+ case ORETURN:
+ Curfn.Func.numReturns++
+ if n.List.Len() == 0 {
+ break
+ }
+ if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) || Curfn.Func.HasDefer() {
+ // assign to the function out parameters,
+ // so that reorder3 can fix up conflicts
+ var rl []*Node
+
+ for _, ln := range Curfn.Func.Dcl {
+ cl := ln.Class()
+ if cl == PAUTO || cl == PAUTOHEAP {
+ break
+ }
+ if cl == PPARAMOUT {
+ if ln.isParamStackCopy() {
+ ln = walkexpr(typecheck(nod(ODEREF, ln.Name.Param.Heapaddr, nil), ctxExpr), nil)
+ }
+ rl = append(rl, ln)
+ }
+ }
+
+ if got, want := n.List.Len(), len(rl); got != want {
+ // order should have rewritten multi-value function calls
+ // with explicit OAS2FUNC nodes.
+ Fatalf("expected %v return arguments, have %v", want, got)
+ }
+
+ // move function calls out, to make reorder3's job easier.
+ walkexprlistsafe(n.List.Slice(), &n.Ninit)
+
+ ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit)
+ n.List.Set(reorder3(ll))
+ break
+ }
+ walkexprlist(n.List.Slice(), &n.Ninit)
+
+ // For each return parameter (lhs), assign the corresponding result (rhs).
+ lhs := Curfn.Type.Results()
+ rhs := n.List.Slice()
+ res := make([]*Node, lhs.NumFields())
+ for i, nl := range lhs.FieldSlice() {
+ nname := asNode(nl.Nname)
+ if nname.isParamHeapCopy() {
+ nname = nname.Name.Param.Stackcopy
+ }
+ a := nod(OAS, nname, rhs[i])
+ res[i] = convas(a, &n.Ninit)
+ }
+ n.List.Set(res)
+
+ case ORETJMP:
+ break
+
+ case OINLMARK:
+ break
+
+ case OSELECT:
+ walkselect(n)
+
+ case OSWITCH:
+ walkswitch(n)
+
+ case ORANGE:
+ n = walkrange(n)
+ }
+
+ if n.Op == ONAME {
+ Fatalf("walkstmt ended up with name: %+v", n)
+ }
+ return n
+}
+
+// walk the whole tree of the body of an
+// expression or simple statement.
+// the types expressions are calculated.
+// compile-time constants are evaluated.
+// complex side effects like statements are appended to init
+func walkexprlist(s []*Node, init *Nodes) {
+ for i := range s {
+ s[i] = walkexpr(s[i], init)
+ }
+}
+
+func walkexprlistsafe(s []*Node, init *Nodes) {
+ for i, n := range s {
+ s[i] = safeexpr(n, init)
+ s[i] = walkexpr(s[i], init)
+ }
+}
+
+func walkexprlistcheap(s []*Node, init *Nodes) {
+ for i, n := range s {
+ s[i] = cheapexpr(n, init)
+ s[i] = walkexpr(s[i], init)
+ }
+}
+
+// convFuncName builds the runtime function name for interface conversion.
+// It also reports whether the function expects the data by address.
+// Not all names are possible. For example, we never generate convE2E or convE2I.
+func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
+ tkind := to.Tie()
+ switch from.Tie() {
+ case 'I':
+ if tkind == 'I' {
+ return "convI2I", false
+ }
+ case 'T':
+ switch {
+ case from.Size() == 2 && from.Align == 2:
+ return "convT16", false
+ case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
+ return "convT32", false
+ case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !from.HasPointers():
+ return "convT64", false
+ }
+ if sc := from.SoleComponent(); sc != nil {
+ switch {
+ case sc.IsString():
+ return "convTstring", false
+ case sc.IsSlice():
+ return "convTslice", false
+ }
+ }
+
+ switch tkind {
+ case 'E':
+ if !from.HasPointers() {
+ return "convT2Enoptr", true
+ }
+ return "convT2E", true
+ case 'I':
+ if !from.HasPointers() {
+ return "convT2Inoptr", true
+ }
+ return "convT2I", true
+ }
+ }
+ Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
+ panic("unreachable")
+}
+
+// The result of walkexpr MUST be assigned back to n, e.g.
+// n.Left = walkexpr(n.Left, init)
+func walkexpr(n *Node, init *Nodes) *Node {
+ if n == nil {
+ return n
+ }
+
+ // Eagerly checkwidth all expressions for the back end.
+ if n.Type != nil && !n.Type.WidthCalculated() {
+ switch n.Type.Etype {
+ case TBLANK, TNIL, TIDEAL:
+ default:
+ checkwidth(n.Type)
+ }
+ }
+
+ if init == &n.Ninit {
+ // not okay to use n->ninit when walking n,
+ // because we might replace n with some other node
+ // and would lose the init list.
+ Fatalf("walkexpr init == &n->ninit")
+ }
+
+ if n.Ninit.Len() != 0 {
+ walkstmtlist(n.Ninit.Slice())
+ init.AppendNodes(&n.Ninit)
+ }
+
+ lno := setlineno(n)
+
+ if Debug.w > 1 {
+ Dump("before walk expr", n)
+ }
+
+ if n.Typecheck() != 1 {
+ Fatalf("missed typecheck: %+v", n)
+ }
+
+ if n.Type.IsUntyped() {
+ Fatalf("expression has untyped type: %+v", n)
+ }
+
+ if n.Op == ONAME && n.Class() == PAUTOHEAP {
+ nn := nod(ODEREF, n.Name.Param.Heapaddr, nil)
+ nn = typecheck(nn, ctxExpr)
+ nn = walkexpr(nn, init)
+ nn.Left.MarkNonNil()
+ return nn
+ }
+
+opswitch:
+ switch n.Op {
+ default:
+ Dump("walk", n)
+ Fatalf("walkexpr: switch 1 unknown op %+S", n)
+
+ case ONONAME, OEMPTY, OGETG, ONEWOBJ:
+
+ case OTYPE, ONAME, OLITERAL:
+ // TODO(mdempsky): Just return n; see discussion on CL 38655.
+ // Perhaps refactor to use Node.mayBeShared for these instead.
+ // If these return early, make sure to still call
+ // stringsym for constant strings.
+
+ case ONOT, ONEG, OPLUS, OBITNOT, OREAL, OIMAG, ODOTMETH, ODOTINTER,
+ ODEREF, OSPTR, OITAB, OIDATA, OADDR:
+ n.Left = walkexpr(n.Left, init)
+
+ case OEFACE, OAND, OANDNOT, OSUB, OMUL, OADD, OOR, OXOR, OLSH, ORSH:
+ n.Left = walkexpr(n.Left, init)
+ n.Right = walkexpr(n.Right, init)
+
+ case ODOT, ODOTPTR:
+ usefield(n)
+ n.Left = walkexpr(n.Left, init)
+
+ case ODOTTYPE, ODOTTYPE2:
+ n.Left = walkexpr(n.Left, init)
+ // Set up interface type addresses for back end.
+ n.Right = typename(n.Type)
+ if n.Op == ODOTTYPE {
+ n.Right.Right = typename(n.Left.Type)
+ }
+ if !n.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() {
+ n.List.Set1(itabname(n.Type, n.Left.Type))
+ }
+
+ case OLEN, OCAP:
+ if isRuneCount(n) {
+ // Replace len([]rune(string)) with runtime.countrunes(string).
+ n = mkcall("countrunes", n.Type, init, conv(n.Left.Left, types.Types[TSTRING]))
+ break
+ }
+
+ n.Left = walkexpr(n.Left, init)
+
+ // replace len(*[10]int) with 10.
+ // delayed until now to preserve side effects.
+ t := n.Left.Type
+
+ if t.IsPtr() {
+ t = t.Elem()
+ }
+ if t.IsArray() {
+ safeexpr(n.Left, init)
+ setintconst(n, t.NumElem())
+ n.SetTypecheck(1)
+ }
+
+ case OCOMPLEX:
+ // Use results from call expression as arguments for complex.
+ if n.Left == nil && n.Right == nil {
+ n.Left = n.List.First()
+ n.Right = n.List.Second()
+ }
+ n.Left = walkexpr(n.Left, init)
+ n.Right = walkexpr(n.Right, init)
+
+ case OEQ, ONE, OLT, OLE, OGT, OGE:
+ n = walkcompare(n, init)
+
+ case OANDAND, OOROR:
+ n.Left = walkexpr(n.Left, init)
+
+ // cannot put side effects from n.Right on init,
+ // because they cannot run before n.Left is checked.
+ // save elsewhere and store on the eventual n.Right.
+ var ll Nodes
+
+ n.Right = walkexpr(n.Right, &ll)
+ n.Right = addinit(n.Right, ll.Slice())
+
+ case OPRINT, OPRINTN:
+ n = walkprint(n, init)
+
+ case OPANIC:
+ n = mkcall("gopanic", nil, init, n.Left)
+
+ case ORECOVER:
+ n = mkcall("gorecover", n.Type, init, nod(OADDR, nodfp, nil))
+
+ case OCLOSUREVAR, OCFUNC:
+
+ case OCALLINTER, OCALLFUNC, OCALLMETH:
+ if n.Op == OCALLINTER || n.Op == OCALLMETH {
+ // We expect both interface call reflect.Type.Method and concrete
+ // call reflect.(*rtype).Method.
+ usemethod(n)
+ }
+ if n.Op == OCALLINTER {
+ markUsedIfaceMethod(n)
+ }
+
+ if n.Op == OCALLFUNC && n.Left.Op == OCLOSURE {
+ // Transform direct call of a closure to call of a normal function.
+ // transformclosure already did all preparation work.
+
+ // Prepend captured variables to argument list.
+ n.List.Prepend(n.Left.Func.Enter.Slice()...)
+
+ n.Left.Func.Enter.Set(nil)
+
+ // Replace OCLOSURE with ONAME/PFUNC.
+ n.Left = n.Left.Func.Closure.Func.Nname
+
+ // Update type of OCALLFUNC node.
+ // Output arguments had not changed, but their offsets could.
+ if n.Left.Type.NumResults() == 1 {
+ n.Type = n.Left.Type.Results().Field(0).Type
+ } else {
+ n.Type = n.Left.Type.Results()
+ }
+ }
+
+ walkCall(n, init)
+
+ case OAS, OASOP:
+ init.AppendNodes(&n.Ninit)
+
+ // Recognize m[k] = append(m[k], ...) so we can reuse
+ // the mapassign call.
+ mapAppend := n.Left.Op == OINDEXMAP && n.Right.Op == OAPPEND
+ if mapAppend && !samesafeexpr(n.Left, n.Right.List.First()) {
+ Fatalf("not same expressions: %v != %v", n.Left, n.Right.List.First())
+ }
+
+ n.Left = walkexpr(n.Left, init)
+ n.Left = safeexpr(n.Left, init)
+
+ if mapAppend {
+ n.Right.List.SetFirst(n.Left)
+ }
+
+ if n.Op == OASOP {
+ // Rewrite x op= y into x = x op y.
+ n.Right = nod(n.SubOp(), n.Left, n.Right)
+ n.Right = typecheck(n.Right, ctxExpr)
+
+ n.Op = OAS
+ n.ResetAux()
+ }
+
+ if oaslit(n, init) {
+ break
+ }
+
+ if n.Right == nil {
+ // TODO(austin): Check all "implicit zeroing"
+ break
+ }
+
+ if !instrumenting && isZero(n.Right) {
+ break
+ }
+
+ switch n.Right.Op {
+ default:
+ n.Right = walkexpr(n.Right, init)
+
+ case ORECV:
+ // x = <-c; n.Left is x, n.Right.Left is c.
+ // order.stmt made sure x is addressable.
+ n.Right.Left = walkexpr(n.Right.Left, init)
+
+ n1 := nod(OADDR, n.Left, nil)
+ r := n.Right.Left // the channel
+ n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, r, n1)
+ n = walkexpr(n, init)
+ break opswitch
+
+ case OAPPEND:
+ // x = append(...)
+ r := n.Right
+ if r.Type.Elem().NotInHeap() {
+ yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type.Elem())
+ }
+ switch {
+ case isAppendOfMake(r):
+ // x = append(y, make([]T, y)...)
+ r = extendslice(r, init)
+ case r.IsDDD():
+ r = appendslice(r, init) // also works for append(slice, string).
+ default:
+ r = walkappend(r, init, n)
+ }
+ n.Right = r
+ if r.Op == OAPPEND {
+ // Left in place for back end.
+ // Do not add a new write barrier.
+ // Set up address of type for back end.
+ r.Left = typename(r.Type.Elem())
+ break opswitch
+ }
+ // Otherwise, lowered for race detector.
+ // Treat as ordinary assignment.
+ }
+
+ if n.Left != nil && n.Right != nil {
+ n = convas(n, init)
+ }
+
+ case OAS2:
+ init.AppendNodes(&n.Ninit)
+ walkexprlistsafe(n.List.Slice(), init)
+ walkexprlistsafe(n.Rlist.Slice(), init)
+ ll := ascompatee(OAS, n.List.Slice(), n.Rlist.Slice(), init)
+ ll = reorder3(ll)
+ n = liststmt(ll)
+
+ // a,b,... = fn()
+ case OAS2FUNC:
+ init.AppendNodes(&n.Ninit)
+
+ r := n.Right
+ walkexprlistsafe(n.List.Slice(), init)
+ r = walkexpr(r, init)
+
+ if isIntrinsicCall(r) {
+ n.Right = r
+ break
+ }
+ init.Append(r)
+
+ ll := ascompatet(n.List, r.Type)
+ n = liststmt(ll)
+
+ // x, y = <-c
+ // order.stmt made sure x is addressable or blank.
+ case OAS2RECV:
+ init.AppendNodes(&n.Ninit)
+
+ r := n.Right
+ walkexprlistsafe(n.List.Slice(), init)
+ r.Left = walkexpr(r.Left, init)
+ var n1 *Node
+ if n.List.First().isBlank() {
+ n1 = nodnil()
+ } else {
+ n1 = nod(OADDR, n.List.First(), nil)
+ }
+ fn := chanfn("chanrecv2", 2, r.Left.Type)
+ ok := n.List.Second()
+ call := mkcall1(fn, types.Types[TBOOL], init, r.Left, n1)
+ n = nod(OAS, ok, call)
+ n = typecheck(n, ctxStmt)
+
+ // a,b = m[i]
+ case OAS2MAPR:
+ init.AppendNodes(&n.Ninit)
+
+ r := n.Right
+ walkexprlistsafe(n.List.Slice(), init)
+ r.Left = walkexpr(r.Left, init)
+ r.Right = walkexpr(r.Right, init)
+ t := r.Left.Type
+
+ fast := mapfast(t)
+ var key *Node
+ if fast != mapslow {
+ // fast versions take key by value
+ key = r.Right
+ } else {
+ // standard version takes key by reference
+ // order.expr made sure key is addressable.
+ key = nod(OADDR, r.Right, nil)
+ }
+
+ // from:
+ // a,b = m[i]
+ // to:
+ // var,b = mapaccess2*(t, m, i)
+ // a = *var
+ a := n.List.First()
+
+ if w := t.Elem().Width; w <= zeroValSize {
+ fn := mapfn(mapaccess2[fast], t)
+ r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key)
+ } else {
+ fn := mapfn("mapaccess2_fat", t)
+ z := zeroaddr(w)
+ r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z)
+ }
+
+ // mapaccess2* returns a typed bool, but due to spec changes,
+ // the boolean result of i.(T) is now untyped so we make it the
+ // same type as the variable on the lhs.
+ if ok := n.List.Second(); !ok.isBlank() && ok.Type.IsBoolean() {
+ r.Type.Field(1).Type = ok.Type
+ }
+ n.Right = r
+ n.Op = OAS2FUNC
+
+ // don't generate a = *var if a is _
+ if !a.isBlank() {
+ var_ := temp(types.NewPtr(t.Elem()))
+ var_.SetTypecheck(1)
+ var_.MarkNonNil() // mapaccess always returns a non-nil pointer
+ n.List.SetFirst(var_)
+ n = walkexpr(n, init)
+ init.Append(n)
+ n = nod(OAS, a, nod(ODEREF, var_, nil))
+ }
+
+ n = typecheck(n, ctxStmt)
+ n = walkexpr(n, init)
+
+ case ODELETE:
+ init.AppendNodes(&n.Ninit)
+ map_ := n.List.First()
+ key := n.List.Second()
+ map_ = walkexpr(map_, init)
+ key = walkexpr(key, init)
+
+ t := map_.Type
+ fast := mapfast(t)
+ if fast == mapslow {
+ // order.stmt made sure key is addressable.
+ key = nod(OADDR, key, nil)
+ }
+ n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key)
+
+ case OAS2DOTTYPE:
+ walkexprlistsafe(n.List.Slice(), init)
+ n.Right = walkexpr(n.Right, init)
+
+ case OCONVIFACE:
+ n.Left = walkexpr(n.Left, init)
+
+ fromType := n.Left.Type
+ toType := n.Type
+
+ if !fromType.IsInterface() && !Curfn.Func.Nname.isBlank() { // skip unnamed functions (func _())
+ markTypeUsedInInterface(fromType, Curfn.Func.lsym)
+ }
+
+ // typeword generates the type word of the interface value.
+ typeword := func() *Node {
+ if toType.IsEmptyInterface() {
+ return typename(fromType)
+ }
+ return itabname(fromType, toType)
+ }
+
+ // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
+ if isdirectiface(fromType) {
+ l := nod(OEFACE, typeword(), n.Left)
+ l.Type = toType
+ l.SetTypecheck(n.Typecheck())
+ n = l
+ break
+ }
+
+ if staticuint64s == nil {
+ staticuint64s = newname(Runtimepkg.Lookup("staticuint64s"))
+ staticuint64s.SetClass(PEXTERN)
+ // The actual type is [256]uint64, but we use [256*8]uint8 so we can address
+ // individual bytes.
+ staticuint64s.Type = types.NewArray(types.Types[TUINT8], 256*8)
+ zerobase = newname(Runtimepkg.Lookup("zerobase"))
+ zerobase.SetClass(PEXTERN)
+ zerobase.Type = types.Types[TUINTPTR]
+ }
+
+ // Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
+ // by using an existing addressable value identical to n.Left
+ // or creating one on the stack.
+ var value *Node
+ switch {
+ case fromType.Size() == 0:
+ // n.Left is zero-sized. Use zerobase.
+ cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246.
+ value = zerobase
+ case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
+ // n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
+ // and staticuint64s[n.Left * 8 + 7] on big-endian.
+ n.Left = cheapexpr(n.Left, init)
+ // byteindex widens n.Left so that the multiplication doesn't overflow.
+ index := nod(OLSH, byteindex(n.Left), nodintconst(3))
+ if thearch.LinkArch.ByteOrder == binary.BigEndian {
+ index = nod(OADD, index, nodintconst(7))
+ }
+ value = nod(OINDEX, staticuint64s, index)
+ value.SetBounded(true)
+ case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly():
+ // n.Left is a readonly global; use it directly.
+ value = n.Left
+ case !fromType.IsInterface() && n.Esc == EscNone && fromType.Width <= 1024:
+ // n.Left does not escape. Use a stack temporary initialized to n.Left.
+ value = temp(fromType)
+ init.Append(typecheck(nod(OAS, value, n.Left), ctxStmt))
+ }
+
+ if value != nil {
+ // Value is identical to n.Left.
+ // Construct the interface directly: {type/itab, &value}.
+ l := nod(OEFACE, typeword(), typecheck(nod(OADDR, value, nil), ctxExpr))
+ l.Type = toType
+ l.SetTypecheck(n.Typecheck())
+ n = l
+ break
+ }
+
+ // Implement interface to empty interface conversion.
+ // tmp = i.itab
+ // if tmp != nil {
+ // tmp = tmp.type
+ // }
+ // e = iface{tmp, i.data}
+ if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() {
+ // Evaluate the input interface.
+ c := temp(fromType)
+ init.Append(nod(OAS, c, n.Left))
+
+ // Get the itab out of the interface.
+ tmp := temp(types.NewPtr(types.Types[TUINT8]))
+ init.Append(nod(OAS, tmp, typecheck(nod(OITAB, c, nil), ctxExpr)))
+
+ // Get the type out of the itab.
+ nif := nod(OIF, typecheck(nod(ONE, tmp, nodnil()), ctxExpr), nil)
+ nif.Nbody.Set1(nod(OAS, tmp, itabType(tmp)))
+ init.Append(nif)
+
+ // Build the result.
+ e := nod(OEFACE, tmp, ifaceData(n.Pos, c, types.NewPtr(types.Types[TUINT8])))
+ e.Type = toType // assign type manually, typecheck doesn't understand OEFACE.
+ e.SetTypecheck(1)
+ n = e
+ break
+ }
+
+ fnname, needsaddr := convFuncName(fromType, toType)
+
+ if !needsaddr && !fromType.IsInterface() {
+ // Use a specialized conversion routine that only returns a data pointer.
+ // ptr = convT2X(val)
+ // e = iface{typ/tab, ptr}
+ fn := syslook(fnname)
+ dowidth(fromType)
+ fn = substArgTypes(fn, fromType)
+ dowidth(fn.Type)
+ call := nod(OCALL, fn, nil)
+ call.List.Set1(n.Left)
+ call = typecheck(call, ctxExpr)
+ call = walkexpr(call, init)
+ call = safeexpr(call, init)
+ e := nod(OEFACE, typeword(), call)
+ e.Type = toType
+ e.SetTypecheck(1)
+ n = e
+ break
+ }
+
+ var tab *Node
+ if fromType.IsInterface() {
+ // convI2I
+ tab = typename(toType)
+ } else {
+ // convT2x
+ tab = typeword()
+ }
+
+ v := n.Left
+ if needsaddr {
+ // Types of large or unknown size are passed by reference.
+ // Orderexpr arranged for n.Left to be a temporary for all
+ // the conversions it could see. Comparison of an interface
+ // with a non-interface, especially in a switch on interface value
+ // with non-interface cases, is not visible to order.stmt, so we
+ // have to fall back on allocating a temp here.
+ if !islvalue(v) {
+ v = copyexpr(v, v.Type, init)
+ }
+ v = nod(OADDR, v, nil)
+ }
+
+ dowidth(fromType)
+ fn := syslook(fnname)
+ fn = substArgTypes(fn, fromType, toType)
+ dowidth(fn.Type)
+ n = nod(OCALL, fn, nil)
+ n.List.Set2(tab, v)
+ n = typecheck(n, ctxExpr)
+ n = walkexpr(n, init)
+
+ case OCONV, OCONVNOP:
+ n.Left = walkexpr(n.Left, init)
+ if n.Op == OCONVNOP && checkPtr(Curfn, 1) {
+ if n.Type.IsPtr() && n.Left.Type.IsUnsafePtr() { // unsafe.Pointer to *T
+ n = walkCheckPtrAlignment(n, init, nil)
+ break
+ }
+ if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() { // uintptr to unsafe.Pointer
+ n = walkCheckPtrArithmetic(n, init)
+ break
+ }
+ }
+ param, result := rtconvfn(n.Left.Type, n.Type)
+ if param == Txxx {
+ break
+ }
+ fn := basicnames[param] + "to" + basicnames[result]
+ n = conv(mkcall(fn, types.Types[result], init, conv(n.Left, types.Types[param])), n.Type)
+
+ case ODIV, OMOD:
+ n.Left = walkexpr(n.Left, init)
+ n.Right = walkexpr(n.Right, init)
+
+ // rewrite complex div into function call.
+ et := n.Left.Type.Etype
+
+ if isComplex[et] && n.Op == ODIV {
+ t := n.Type
+ n = mkcall("complex128div", types.Types[TCOMPLEX128], init, conv(n.Left, types.Types[TCOMPLEX128]), conv(n.Right, types.Types[TCOMPLEX128]))
+ n = conv(n, t)
+ break
+ }
+
+ // Nothing to do for float divisions.
+ if isFloat[et] {
+ break
+ }
+
+ // rewrite 64-bit div and mod on 32-bit architectures.
+ // TODO: Remove this code once we can introduce
+ // runtime calls late in SSA processing.
+ if Widthreg < 8 && (et == TINT64 || et == TUINT64) {
+ if n.Right.Op == OLITERAL {
+ // Leave div/mod by constant powers of 2 or small 16-bit constants.
+ // The SSA backend will handle those.
+ switch et {
+ case TINT64:
+ c := n.Right.Int64Val()
+ if c < 0 {
+ c = -c
+ }
+ if c != 0 && c&(c-1) == 0 {
+ break opswitch
+ }
+ case TUINT64:
+ c := uint64(n.Right.Int64Val())
+ if c < 1<<16 {
+ break opswitch
+ }
+ if c != 0 && c&(c-1) == 0 {
+ break opswitch
+ }
+ }
+ }
+ var fn string
+ if et == TINT64 {
+ fn = "int64"
+ } else {
+ fn = "uint64"
+ }
+ if n.Op == ODIV {
+ fn += "div"
+ } else {
+ fn += "mod"
+ }
+ n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et]))
+ }
+
+ case OINDEX:
+ n.Left = walkexpr(n.Left, init)
+
+ // save the original node for bounds checking elision.
+ // If it was a ODIV/OMOD walk might rewrite it.
+ r := n.Right
+
+ n.Right = walkexpr(n.Right, init)
+
+ // if range of type cannot exceed static array bound,
+ // disable bounds check.
+ if n.Bounded() {
+ break
+ }
+ t := n.Left.Type
+ if t != nil && t.IsPtr() {
+ t = t.Elem()
+ }
+ if t.IsArray() {
+ n.SetBounded(bounded(r, t.NumElem()))
+ if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
+ Warn("index bounds check elided")
+ }
+ if smallintconst(n.Right) && !n.Bounded() {
+ yyerror("index out of bounds")
+ }
+ } else if Isconst(n.Left, CTSTR) {
+ n.SetBounded(bounded(r, int64(len(n.Left.StringVal()))))
+ if Debug.m != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
+ Warn("index bounds check elided")
+ }
+ if smallintconst(n.Right) && !n.Bounded() {
+ yyerror("index out of bounds")
+ }
+ }
+
+ if Isconst(n.Right, CTINT) {
+ if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
+ yyerror("index out of bounds")
+ }
+ }
+
+ case OINDEXMAP:
+ // Replace m[k] with *map{access1,assign}(maptype, m, &k)
+ n.Left = walkexpr(n.Left, init)
+ n.Right = walkexpr(n.Right, init)
+ map_ := n.Left
+ key := n.Right
+ t := map_.Type
+ if n.IndexMapLValue() {
+ // This m[k] expression is on the left-hand side of an assignment.
+ fast := mapfast(t)
+ if fast == mapslow {
+ // standard version takes key by reference.
+ // order.expr made sure key is addressable.
+ key = nod(OADDR, key, nil)
+ }
+ n = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key)
+ } else {
+ // m[k] is not the target of an assignment.
+ fast := mapfast(t)
+ if fast == mapslow {
+ // standard version takes key by reference.
+ // order.expr made sure key is addressable.
+ key = nod(OADDR, key, nil)
+ }
+
+ if w := t.Elem().Width; w <= zeroValSize {
+ n = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, typename(t), map_, key)
+ } else {
+ z := zeroaddr(w)
+ n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z)
+ }
+ }
+ n.Type = types.NewPtr(t.Elem())
+ n.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
+ n = nod(ODEREF, n, nil)
+ n.Type = t.Elem()
+ n.SetTypecheck(1)
+
+ case ORECV:
+ Fatalf("walkexpr ORECV") // should see inside OAS only
+
+ case OSLICEHEADER:
+ n.Left = walkexpr(n.Left, init)
+ n.List.SetFirst(walkexpr(n.List.First(), init))
+ n.List.SetSecond(walkexpr(n.List.Second(), init))
+
+ case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
+ checkSlice := checkPtr(Curfn, 1) && n.Op == OSLICE3ARR && n.Left.Op == OCONVNOP && n.Left.Left.Type.IsUnsafePtr()
+ if checkSlice {
+ n.Left.Left = walkexpr(n.Left.Left, init)
+ } else {
+ n.Left = walkexpr(n.Left, init)
+ }
+ low, high, max := n.SliceBounds()
+ low = walkexpr(low, init)
+ if low != nil && isZero(low) {
+ // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k].
+ low = nil
+ }
+ high = walkexpr(high, init)
+ max = walkexpr(max, init)
+ n.SetSliceBounds(low, high, max)
+ if checkSlice {
+ n.Left = walkCheckPtrAlignment(n.Left, init, max)
+ }
+ if n.Op.IsSlice3() {
+ if max != nil && max.Op == OCAP && samesafeexpr(n.Left, max.Left) {
+ // Reduce x[i:j:cap(x)] to x[i:j].
+ if n.Op == OSLICE3 {
+ n.Op = OSLICE
+ } else {
+ n.Op = OSLICEARR
+ }
+ n = reduceSlice(n)
+ }
+ } else {
+ n = reduceSlice(n)
+ }
+
+ case ONEW:
+ if n.Type.Elem().NotInHeap() {
+ yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type.Elem())
+ }
+ if n.Esc == EscNone {
+ if n.Type.Elem().Width >= maxImplicitStackVarSize {
+ Fatalf("large ONEW with EscNone: %v", n)
+ }
+ r := temp(n.Type.Elem())
+ r = nod(OAS, r, nil) // zero temp
+ r = typecheck(r, ctxStmt)
+ init.Append(r)
+ r = nod(OADDR, r.Left, nil)
+ r = typecheck(r, ctxExpr)
+ n = r
+ } else {
+ n = callnew(n.Type.Elem())
+ }
+
+ case OADDSTR:
+ n = addstr(n, init)
+
+ case OAPPEND:
+ // order should make sure we only see OAS(node, OAPPEND), which we handle above.
+ Fatalf("append outside assignment")
+
+ case OCOPY:
+ n = copyany(n, init, instrumenting && !compiling_runtime)
+
+ // cannot use chanfn - closechan takes any, not chan any
+ case OCLOSE:
+ fn := syslook("closechan")
+
+ fn = substArgTypes(fn, n.Left.Type)
+ n = mkcall1(fn, nil, init, n.Left)
+
+ case OMAKECHAN:
+ // When size fits into int, use makechan instead of
+ // makechan64, which is faster and shorter on 32 bit platforms.
+ size := n.Left
+ fnname := "makechan64"
+ argtype := types.Types[TINT64]
+
+ // Type checking guarantees that TIDEAL size is positive and fits in an int.
+ // The case of size overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makechan during runtime.
+ if size.Type.IsKind(TIDEAL) || maxintval[size.Type.Etype].Cmp(maxintval[TUINT]) <= 0 {
+ fnname = "makechan"
+ argtype = types.Types[TINT]
+ }
+
+ n = mkcall1(chanfn(fnname, 1, n.Type), n.Type, init, typename(n.Type), conv(size, argtype))
+
+ case OMAKEMAP:
+ t := n.Type
+ hmapType := hmap(t)
+ hint := n.Left
+
+ // var h *hmap
+ var h *Node
+ if n.Esc == EscNone {
+ // Allocate hmap on stack.
+
+ // var hv hmap
+ hv := temp(hmapType)
+ zero := nod(OAS, hv, nil)
+ zero = typecheck(zero, ctxStmt)
+ init.Append(zero)
+ // h = &hv
+ h = nod(OADDR, hv, nil)
+
+ // Allocate one bucket pointed to by hmap.buckets on stack if hint
+ // is not larger than BUCKETSIZE. In case hint is larger than
+ // BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
+ // Maximum key and elem size is 128 bytes, larger objects
+ // are stored with an indirection. So max bucket size is 2048+eps.
+ if !Isconst(hint, CTINT) ||
+ hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
+
+ // In case hint is larger than BUCKETSIZE runtime.makemap
+ // will allocate the buckets on the heap, see #20184
+ //
+ // if hint <= BUCKETSIZE {
+ // var bv bmap
+ // b = &bv
+ // h.buckets = b
+ // }
+
+ nif := nod(OIF, nod(OLE, hint, nodintconst(BUCKETSIZE)), nil)
+ nif.SetLikely(true)
+
+ // var bv bmap
+ bv := temp(bmap(t))
+ zero = nod(OAS, bv, nil)
+ nif.Nbody.Append(zero)
+
+ // b = &bv
+ b := nod(OADDR, bv, nil)
+
+ // h.buckets = b
+ bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
+ na := nod(OAS, nodSym(ODOT, h, bsym), b)
+ nif.Nbody.Append(na)
+
+ nif = typecheck(nif, ctxStmt)
+ nif = walkstmt(nif)
+ init.Append(nif)
+ }
+ }
+
+ if Isconst(hint, CTINT) && hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
+ // Handling make(map[any]any) and
+ // make(map[any]any, hint) where hint <= BUCKETSIZE
+ // special allows for faster map initialization and
+ // improves binary size by using calls with fewer arguments.
+ // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
+ // and no buckets will be allocated by makemap. Therefore,
+ // no buckets need to be allocated in this code path.
+ if n.Esc == EscNone {
+ // Only need to initialize h.hash0 since
+ // hmap h has been allocated on the stack already.
+ // h.hash0 = fastrand()
+ rand := mkcall("fastrand", types.Types[TUINT32], init)
+ hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
+ a := nod(OAS, nodSym(ODOT, h, hashsym), rand)
+ a = typecheck(a, ctxStmt)
+ a = walkexpr(a, init)
+ init.Append(a)
+ n = convnop(h, t)
+ } else {
+ // Call runtime.makehmap to allocate an
+ // hmap on the heap and initialize hmap's hash0 field.
+ fn := syslook("makemap_small")
+ fn = substArgTypes(fn, t.Key(), t.Elem())
+ n = mkcall1(fn, n.Type, init)
+ }
+ } else {
+ if n.Esc != EscNone {
+ h = nodnil()
+ }
+ // Map initialization with a variable or large hint is
+ // more complicated. We therefore generate a call to
+ // runtime.makemap to initialize hmap and allocate the
+ // map buckets.
+
+ // When hint fits into int, use makemap instead of
+ // makemap64, which is faster and shorter on 32 bit platforms.
+ fnname := "makemap64"
+ argtype := types.Types[TINT64]
+
+ // Type checking guarantees that TIDEAL hint is positive and fits in an int.
+ // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
+ // The case of hint overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makemap during runtime.
+ if hint.Type.IsKind(TIDEAL) || maxintval[hint.Type.Etype].Cmp(maxintval[TUINT]) <= 0 {
+ fnname = "makemap"
+ argtype = types.Types[TINT]
+ }
+
+ fn := syslook(fnname)
+ fn = substArgTypes(fn, hmapType, t.Key(), t.Elem())
+ n = mkcall1(fn, n.Type, init, typename(n.Type), conv(hint, argtype), h)
+ }
+
+ case OMAKESLICE:
+ l := n.Left
+ r := n.Right
+ if r == nil {
+ r = safeexpr(l, init)
+ l = r
+ }
+ t := n.Type
+ if t.Elem().NotInHeap() {
+ yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ }
+ if n.Esc == EscNone {
+ if why := heapAllocReason(n); why != "" {
+ Fatalf("%v has EscNone, but %v", n, why)
+ }
+ // var arr [r]T
+ // n = arr[:l]
+ i := indexconst(r)
+ if i < 0 {
+ Fatalf("walkexpr: invalid index %v", r)
+ }
+
+ // cap is constrained to [0,2^31) or [0,2^63) depending on whether
+ // we're in 32-bit or 64-bit systems. So it's safe to do:
+ //
+ // if uint64(len) > cap {
+ // if len < 0 { panicmakeslicelen() }
+ // panicmakeslicecap()
+ // }
+ nif := nod(OIF, nod(OGT, conv(l, types.Types[TUINT64]), nodintconst(i)), nil)
+ niflen := nod(OIF, nod(OLT, l, nodintconst(0)), nil)
+ niflen.Nbody.Set1(mkcall("panicmakeslicelen", nil, init))
+ nif.Nbody.Append(niflen, mkcall("panicmakeslicecap", nil, init))
+ nif = typecheck(nif, ctxStmt)
+ init.Append(nif)
+
+ t = types.NewArray(t.Elem(), i) // [r]T
+ var_ := temp(t)
+ a := nod(OAS, var_, nil) // zero temp
+ a = typecheck(a, ctxStmt)
+ init.Append(a)
+ r := nod(OSLICE, var_, nil) // arr[:l]
+ r.SetSliceBounds(nil, l, nil)
+ r = conv(r, n.Type) // in case n.Type is named.
+ r = typecheck(r, ctxExpr)
+ r = walkexpr(r, init)
+ n = r
+ } else {
+ // n escapes; set up a call to makeslice.
+ // When len and cap can fit into int, use makeslice instead of
+ // makeslice64, which is faster and shorter on 32 bit platforms.
+
+ len, cap := l, r
+
+ fnname := "makeslice64"
+ argtype := types.Types[TINT64]
+
+ // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
+ // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makeslice during runtime.
+ if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) &&
+ (cap.Type.IsKind(TIDEAL) || maxintval[cap.Type.Etype].Cmp(maxintval[TUINT]) <= 0) {
+ fnname = "makeslice"
+ argtype = types.Types[TINT]
+ }
+
+ m := nod(OSLICEHEADER, nil, nil)
+ m.Type = t
+
+ fn := syslook(fnname)
+ m.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))
+ m.Left.MarkNonNil()
+ m.List.Set2(conv(len, types.Types[TINT]), conv(cap, types.Types[TINT]))
+
+ m = typecheck(m, ctxExpr)
+ m = walkexpr(m, init)
+ n = m
+ }
+
+ case OMAKESLICECOPY:
+ if n.Esc == EscNone {
+ Fatalf("OMAKESLICECOPY with EscNone: %v", n)
+ }
+
+ t := n.Type
+ if t.Elem().NotInHeap() {
+ yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ }
+
+ length := conv(n.Left, types.Types[TINT])
+ copylen := nod(OLEN, n.Right, nil)
+ copyptr := nod(OSPTR, n.Right, nil)
+
+ if !t.Elem().HasPointers() && n.Bounded() {
+ // When len(to)==len(from) and elements have no pointers:
+ // replace make+copy with runtime.mallocgc+runtime.memmove.
+
+ // We do not check for overflow of len(to)*elem.Width here
+ // since len(from) is an existing checked slice capacity
+ // with same elem.Width for the from slice.
+ size := nod(OMUL, conv(length, types.Types[TUINTPTR]), conv(nodintconst(t.Elem().Width), types.Types[TUINTPTR]))
+
+ // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
+ fn := syslook("mallocgc")
+ sh := nod(OSLICEHEADER, nil, nil)
+ sh.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, size, nodnil(), nodbool(false))
+ sh.Left.MarkNonNil()
+ sh.List.Set2(length, length)
+ sh.Type = t
+
+ s := temp(t)
+ r := typecheck(nod(OAS, s, sh), ctxStmt)
+ r = walkexpr(r, init)
+ init.Append(r)
+
+ // instantiate memmove(to *any, frm *any, size uintptr)
+ fn = syslook("memmove")
+ fn = substArgTypes(fn, t.Elem(), t.Elem())
+ ncopy := mkcall1(fn, nil, init, nod(OSPTR, s, nil), copyptr, size)
+ ncopy = typecheck(ncopy, ctxStmt)
+ ncopy = walkexpr(ncopy, init)
+ init.Append(ncopy)
+
+ n = s
+ } else { // Replace make+copy with runtime.makeslicecopy.
+ // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
+ fn := syslook("makeslicecopy")
+ s := nod(OSLICEHEADER, nil, nil)
+ s.Left = mkcall1(fn, types.Types[TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[TUNSAFEPTR]))
+ s.Left.MarkNonNil()
+ s.List.Set2(length, length)
+ s.Type = t
+ n = typecheck(s, ctxExpr)
+ n = walkexpr(n, init)
+ }
+
+ case ORUNESTR:
+ a := nodnil()
+ if n.Esc == EscNone {
+ t := types.NewArray(types.Types[TUINT8], 4)
+ a = nod(OADDR, temp(t), nil)
+ }
+ // intstring(*[4]byte, rune)
+ n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[TINT64]))
+
+ case OBYTES2STR, ORUNES2STR:
+ a := nodnil()
+ if n.Esc == EscNone {
+ // Create temporary buffer for string on stack.
+ t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
+ a = nod(OADDR, temp(t), nil)
+ }
+ if n.Op == ORUNES2STR {
+ // slicerunetostring(*[32]byte, []rune) string
+ n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
+ } else {
+ // slicebytetostring(*[32]byte, ptr *byte, n int) string
+ n.Left = cheapexpr(n.Left, init)
+ ptr, len := n.Left.backingArrayPtrLen()
+ n = mkcall("slicebytetostring", n.Type, init, a, ptr, len)
+ }
+
+ case OBYTES2STRTMP:
+ n.Left = walkexpr(n.Left, init)
+ if !instrumenting {
+ // Let the backend handle OBYTES2STRTMP directly
+ // to avoid a function call to slicebytetostringtmp.
+ break
+ }
+ // slicebytetostringtmp(ptr *byte, n int) string
+ n.Left = cheapexpr(n.Left, init)
+ ptr, len := n.Left.backingArrayPtrLen()
+ n = mkcall("slicebytetostringtmp", n.Type, init, ptr, len)
+
+ case OSTR2BYTES:
+ s := n.Left
+ if Isconst(s, CTSTR) {
+ sc := s.StringVal()
+
+ // Allocate a [n]byte of the right size.
+ t := types.NewArray(types.Types[TUINT8], int64(len(sc)))
+ var a *Node
+ if n.Esc == EscNone && len(sc) <= int(maxImplicitStackVarSize) {
+ a = nod(OADDR, temp(t), nil)
+ } else {
+ a = callnew(t)
+ }
+ p := temp(t.PtrTo()) // *[n]byte
+ init.Append(typecheck(nod(OAS, p, a), ctxStmt))
+
+ // Copy from the static string data to the [n]byte.
+ if len(sc) > 0 {
+ as := nod(OAS,
+ nod(ODEREF, p, nil),
+ nod(ODEREF, convnop(nod(OSPTR, s, nil), t.PtrTo()), nil))
+ as = typecheck(as, ctxStmt)
+ as = walkstmt(as)
+ init.Append(as)
+ }
+
+ // Slice the [n]byte to a []byte.
+ n.Op = OSLICEARR
+ n.Left = p
+ n = walkexpr(n, init)
+ break
+ }
+
+ a := nodnil()
+ if n.Esc == EscNone {
+ // Create temporary buffer for slice on stack.
+ t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
+ a = nod(OADDR, temp(t), nil)
+ }
+ // stringtoslicebyte(*32[byte], string) []byte
+ n = mkcall("stringtoslicebyte", n.Type, init, a, conv(s, types.Types[TSTRING]))
+
+ case OSTR2BYTESTMP:
+ // []byte(string) conversion that creates a slice
+ // referring to the actual string bytes.
+ // This conversion is handled later by the backend and
+ // is only for use by internal compiler optimizations
+ // that know that the slice won't be mutated.
+ // The only such case today is:
+ // for i, c := range []byte(string)
+ n.Left = walkexpr(n.Left, init)
+
+ case OSTR2RUNES:
+ a := nodnil()
+ if n.Esc == EscNone {
+ // Create temporary buffer for slice on stack.
+ t := types.NewArray(types.Types[TINT32], tmpstringbufsize)
+ a = nod(OADDR, temp(t), nil)
+ }
+ // stringtoslicerune(*[32]rune, string) []rune
+ n = mkcall("stringtoslicerune", n.Type, init, a, conv(n.Left, types.Types[TSTRING]))
+
+ case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT, OPTRLIT:
+ if isStaticCompositeLiteral(n) && !canSSAType(n.Type) {
+ // n can be directly represented in the read-only data section.
+ // Make direct reference to the static data. See issue 12841.
+ vstat := readonlystaticname(n.Type)
+ fixedlit(inInitFunction, initKindStatic, n, vstat, init)
+ n = vstat
+ n = typecheck(n, ctxExpr)
+ break
+ }
+ var_ := temp(n.Type)
+ anylit(n, var_, init)
+ n = var_
+
+ case OSEND:
+ n1 := n.Right
+ n1 = assignconv(n1, n.Left.Type.Elem(), "chan send")
+ n1 = walkexpr(n1, init)
+ n1 = nod(OADDR, n1, nil)
+ n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, n.Left, n1)
+
+ case OCLOSURE:
+ n = walkclosure(n, init)
+
+ case OCALLPART:
+ n = walkpartialcall(n, init)
+ }
+
+ // Expressions that are constant at run time but not
+ // considered const by the language spec are not turned into
+ // constants until walk. For example, if n is y%1 == 0, the
+ // walk of y%1 may have replaced it by 0.
+ // Check whether n with its updated args is itself now a constant.
+ t := n.Type
+ evconst(n)
+ if n.Type != t {
+ Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type)
+ }
+ if n.Op == OLITERAL {
+ n = typecheck(n, ctxExpr)
+ // Emit string symbol now to avoid emitting
+ // any concurrently during the backend.
+ if s, ok := n.Val().U.(string); ok {
+ _ = stringsym(n.Pos, s)
+ }
+ }
+
+ updateHasCall(n)
+
+ if Debug.w != 0 && n != nil {
+ Dump("after walk expr", n)
+ }
+
+ lineno = lno
+ return n
+}
+
+// markTypeUsedInInterface marks that type t is converted to an interface.
+// This information is used in the linker in dead method elimination.
+func markTypeUsedInInterface(t *types.Type, from *obj.LSym) {
+ tsym := typenamesym(t).Linksym()
+ // Emit a marker relocation. The linker will know the type is converted
+ // to an interface if "from" is reachable.
+ r := obj.Addrel(from)
+ r.Sym = tsym
+ r.Type = objabi.R_USEIFACE
+}
+
+// markUsedIfaceMethod marks that an interface method is used in the current
+// function. n is OCALLINTER node.
+func markUsedIfaceMethod(n *Node) {
+ ityp := n.Left.Left.Type
+ tsym := typenamesym(ityp).Linksym()
+ r := obj.Addrel(Curfn.Func.lsym)
+ r.Sym = tsym
+ // n.Left.Xoffset is the method index * Widthptr (the offset of code pointer
+ // in itab).
+ midx := n.Left.Xoffset / int64(Widthptr)
+ r.Add = ifaceMethodOffset(ityp, midx)
+ r.Type = objabi.R_USEIFACEMETHOD
+}
+
+// rtconvfn returns the parameter and result types that will be used by a
+// runtime function to convert from type src to type dst. The runtime function
+// name can be derived from the names of the returned types.
+//
+// If no such function is necessary, it returns (Txxx, Txxx).
+func rtconvfn(src, dst *types.Type) (param, result types.EType) {
+ if thearch.SoftFloat {
+ return Txxx, Txxx
+ }
+
+ switch thearch.LinkArch.Family {
+ case sys.ARM, sys.MIPS:
+ if src.IsFloat() {
+ switch dst.Etype {
+ case TINT64, TUINT64:
+ return TFLOAT64, dst.Etype
+ }
+ }
+ if dst.IsFloat() {
+ switch src.Etype {
+ case TINT64, TUINT64:
+ return src.Etype, TFLOAT64
+ }
+ }
+
+ case sys.I386:
+ if src.IsFloat() {
+ switch dst.Etype {
+ case TINT64, TUINT64:
+ return TFLOAT64, dst.Etype
+ case TUINT32, TUINT, TUINTPTR:
+ return TFLOAT64, TUINT32
+ }
+ }
+ if dst.IsFloat() {
+ switch src.Etype {
+ case TINT64, TUINT64:
+ return src.Etype, TFLOAT64
+ case TUINT32, TUINT, TUINTPTR:
+ return TUINT32, TFLOAT64
+ }
+ }
+ }
+ return Txxx, Txxx
+}
+
+// TODO(josharian): combine this with its caller and simplify
+func reduceSlice(n *Node) *Node {
+ low, high, max := n.SliceBounds()
+ if high != nil && high.Op == OLEN && samesafeexpr(n.Left, high.Left) {
+ // Reduce x[i:len(x)] to x[i:].
+ high = nil
+ }
+ n.SetSliceBounds(low, high, max)
+ if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil {
+ // Reduce x[:] to x.
+ if Debug_slice > 0 {
+ Warn("slice: omit slice operation")
+ }
+ return n.Left
+ }
+ return n
+}
+
+func ascompatee1(l *Node, r *Node, init *Nodes) *Node {
+ // convas will turn map assigns into function calls,
+ // making it impossible for reorder3 to work.
+ n := nod(OAS, l, r)
+
+ if l.Op == OINDEXMAP {
+ return n
+ }
+
+ return convas(n, init)
+}
+
+func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node {
+ // check assign expression list to
+ // an expression list. called in
+ // expr-list = expr-list
+
+ // ensure order of evaluation for function calls
+ for i := range nl {
+ nl[i] = safeexpr(nl[i], init)
+ }
+ for i1 := range nr {
+ nr[i1] = safeexpr(nr[i1], init)
+ }
+
+ var nn []*Node
+ i := 0
+ for ; i < len(nl); i++ {
+ if i >= len(nr) {
+ break
+ }
+ // Do not generate 'x = x' during return. See issue 4014.
+ if op == ORETURN && samesafeexpr(nl[i], nr[i]) {
+ continue
+ }
+ nn = append(nn, ascompatee1(nl[i], nr[i], init))
+ }
+
+ // cannot happen: caller checked that lists had same length
+ if i < len(nl) || i < len(nr) {
+ var nln, nrn Nodes
+ nln.Set(nl)
+ nrn.Set(nr)
+ Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.funcname())
+ }
+ return nn
+}
+
+// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
+func fncall(l *Node, rt *types.Type) bool {
+ if l.HasCall() || l.Op == OINDEXMAP {
+ return true
+ }
+ if types.Identical(l.Type, rt) {
+ return false
+ }
+ // There might be a conversion required, which might involve a runtime call.
+ return true
+}
+
+// check assign type list to
+// an expression list. called in
+// expr-list = func()
+func ascompatet(nl Nodes, nr *types.Type) []*Node {
+ if nl.Len() != nr.NumFields() {
+ Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
+ }
+
+ var nn, mm Nodes
+ for i, l := range nl.Slice() {
+ if l.isBlank() {
+ continue
+ }
+ r := nr.Field(i)
+
+ // Any assignment to an lvalue that might cause a function call must be
+ // deferred until all the returned values have been read.
+ if fncall(l, r.Type) {
+ tmp := temp(r.Type)
+ tmp = typecheck(tmp, ctxExpr)
+ a := nod(OAS, l, tmp)
+ a = convas(a, &mm)
+ mm.Append(a)
+ l = tmp
+ }
+
+ res := nod(ORESULT, nil, nil)
+ res.Xoffset = Ctxt.FixedFrameSize() + r.Offset
+ res.Type = r.Type
+ res.SetTypecheck(1)
+
+ a := nod(OAS, l, res)
+ a = convas(a, &nn)
+ updateHasCall(a)
+ if a.HasCall() {
+ Dump("ascompatet ucount", a)
+ Fatalf("ascompatet: too many function calls evaluating parameters")
+ }
+
+ nn.Append(a)
+ }
+ return append(nn.Slice(), mm.Slice()...)
+}
+
+// package all the arguments that match a ... T parameter into a []T.
+func mkdotargslice(typ *types.Type, args []*Node) *Node {
+ var n *Node
+ if len(args) == 0 {
+ n = nodnil()
+ n.Type = typ
+ } else {
+ n = nod(OCOMPLIT, nil, typenod(typ))
+ n.List.Append(args...)
+ n.SetImplicit(true)
+ }
+
+ n = typecheck(n, ctxExpr)
+ if n.Type == nil {
+ Fatalf("mkdotargslice: typecheck failed")
+ }
+ return n
+}
+
+// fixVariadicCall rewrites calls to variadic functions to use an
+// explicit ... argument if one is not already present.
+func fixVariadicCall(call *Node) {
+ fntype := call.Left.Type
+ if !fntype.IsVariadic() || call.IsDDD() {
+ return
+ }
+
+ vi := fntype.NumParams() - 1
+ vt := fntype.Params().Field(vi).Type
+
+ args := call.List.Slice()
+ extra := args[vi:]
+ slice := mkdotargslice(vt, extra)
+ for i := range extra {
+ extra[i] = nil // allow GC
+ }
+
+ call.List.Set(append(args[:vi], slice))
+ call.SetIsDDD(true)
+}
+
+func walkCall(n *Node, init *Nodes) {
+ if n.Rlist.Len() != 0 {
+ return // already walked
+ }
+
+ params := n.Left.Type.Params()
+ args := n.List.Slice()
+
+ n.Left = walkexpr(n.Left, init)
+ walkexprlist(args, init)
+
+ // If this is a method call, add the receiver at the beginning of the args.
+ if n.Op == OCALLMETH {
+ withRecv := make([]*Node, len(args)+1)
+ withRecv[0] = n.Left.Left
+ n.Left.Left = nil
+ copy(withRecv[1:], args)
+ args = withRecv
+ }
+
+ // For any argument whose evaluation might require a function call,
+ // store that argument into a temporary variable,
+ // to prevent that calls from clobbering arguments already on the stack.
+ // When instrumenting, all arguments might require function calls.
+ var tempAssigns []*Node
+ for i, arg := range args {
+ updateHasCall(arg)
+ // Determine param type.
+ var t *types.Type
+ if n.Op == OCALLMETH {
+ if i == 0 {
+ t = n.Left.Type.Recv().Type
+ } else {
+ t = params.Field(i - 1).Type
+ }
+ } else {
+ t = params.Field(i).Type
+ }
+ if instrumenting || fncall(arg, t) {
+ // make assignment of fncall to tempAt
+ tmp := temp(t)
+ a := nod(OAS, tmp, arg)
+ a = convas(a, init)
+ tempAssigns = append(tempAssigns, a)
+ // replace arg with temp
+ args[i] = tmp
+ }
+ }
+
+ n.List.Set(tempAssigns)
+ n.Rlist.Set(args)
+}
+
+// generate code for print
+func walkprint(nn *Node, init *Nodes) *Node {
+ // Hoist all the argument evaluation up before the lock.
+ walkexprlistcheap(nn.List.Slice(), init)
+
+ // For println, add " " between elements and "\n" at the end.
+ if nn.Op == OPRINTN {
+ s := nn.List.Slice()
+ t := make([]*Node, 0, len(s)*2)
+ for i, n := range s {
+ if i != 0 {
+ t = append(t, nodstr(" "))
+ }
+ t = append(t, n)
+ }
+ t = append(t, nodstr("\n"))
+ nn.List.Set(t)
+ }
+
+ // Collapse runs of constant strings.
+ s := nn.List.Slice()
+ t := make([]*Node, 0, len(s))
+ for i := 0; i < len(s); {
+ var strs []string
+ for i < len(s) && Isconst(s[i], CTSTR) {
+ strs = append(strs, s[i].StringVal())
+ i++
+ }
+ if len(strs) > 0 {
+ t = append(t, nodstr(strings.Join(strs, "")))
+ }
+ if i < len(s) {
+ t = append(t, s[i])
+ i++
+ }
+ }
+ nn.List.Set(t)
+
+ calls := []*Node{mkcall("printlock", nil, init)}
+ for i, n := range nn.List.Slice() {
+ if n.Op == OLITERAL {
+ switch n.Val().Ctype() {
+ case CTRUNE:
+ n = defaultlit(n, types.Runetype)
+
+ case CTINT:
+ n = defaultlit(n, types.Types[TINT64])
+
+ case CTFLT:
+ n = defaultlit(n, types.Types[TFLOAT64])
+ }
+ }
+
+ if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL {
+ n = defaultlit(n, types.Types[TINT64])
+ }
+ n = defaultlit(n, nil)
+ nn.List.SetIndex(i, n)
+ if n.Type == nil || n.Type.Etype == TFORW {
+ continue
+ }
+
+ var on *Node
+ switch n.Type.Etype {
+ case TINTER:
+ if n.Type.IsEmptyInterface() {
+ on = syslook("printeface")
+ } else {
+ on = syslook("printiface")
+ }
+ on = substArgTypes(on, n.Type) // any-1
+ case TPTR:
+ if n.Type.Elem().NotInHeap() {
+ on = syslook("printuintptr")
+ n = nod(OCONV, n, nil)
+ n.Type = types.Types[TUNSAFEPTR]
+ n = nod(OCONV, n, nil)
+ n.Type = types.Types[TUINTPTR]
+ break
+ }
+ fallthrough
+ case TCHAN, TMAP, TFUNC, TUNSAFEPTR:
+ on = syslook("printpointer")
+ on = substArgTypes(on, n.Type) // any-1
+ case TSLICE:
+ on = syslook("printslice")
+ on = substArgTypes(on, n.Type) // any-1
+ case TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR:
+ if isRuntimePkg(n.Type.Sym.Pkg) && n.Type.Sym.Name == "hex" {
+ on = syslook("printhex")
+ } else {
+ on = syslook("printuint")
+ }
+ case TINT, TINT8, TINT16, TINT32, TINT64:
+ on = syslook("printint")
+ case TFLOAT32, TFLOAT64:
+ on = syslook("printfloat")
+ case TCOMPLEX64, TCOMPLEX128:
+ on = syslook("printcomplex")
+ case TBOOL:
+ on = syslook("printbool")
+ case TSTRING:
+ cs := ""
+ if Isconst(n, CTSTR) {
+ cs = n.StringVal()
+ }
+ switch cs {
+ case " ":
+ on = syslook("printsp")
+ case "\n":
+ on = syslook("printnl")
+ default:
+ on = syslook("printstring")
+ }
+ default:
+ badtype(OPRINT, n.Type, nil)
+ continue
+ }
+
+ r := nod(OCALL, on, nil)
+ if params := on.Type.Params().FieldSlice(); len(params) > 0 {
+ t := params[0].Type
+ if !types.Identical(t, n.Type) {
+ n = nod(OCONV, n, nil)
+ n.Type = t
+ }
+ r.List.Append(n)
+ }
+ calls = append(calls, r)
+ }
+
+ calls = append(calls, mkcall("printunlock", nil, init))
+
+ typecheckslice(calls, ctxStmt)
+ walkexprlist(calls, init)
+
+ r := nod(OEMPTY, nil, nil)
+ r = typecheck(r, ctxStmt)
+ r = walkexpr(r, init)
+ r.Ninit.Set(calls)
+ return r
+}
+
+func callnew(t *types.Type) *Node {
+ dowidth(t)
+ n := nod(ONEWOBJ, typename(t), nil)
+ n.Type = types.NewPtr(t)
+ n.SetTypecheck(1)
+ n.MarkNonNil()
+ return n
+}
+
+// isReflectHeaderDataField reports whether l is an expression p.Data
+// where p has type reflect.SliceHeader or reflect.StringHeader.
+func isReflectHeaderDataField(l *Node) bool {
+ if l.Type != types.Types[TUINTPTR] {
+ return false
+ }
+
+ var tsym *types.Sym
+ switch l.Op {
+ case ODOT:
+ tsym = l.Left.Type.Sym
+ case ODOTPTR:
+ tsym = l.Left.Type.Elem().Sym
+ default:
+ return false
+ }
+
+ if tsym == nil || l.Sym.Name != "Data" || tsym.Pkg.Path != "reflect" {
+ return false
+ }
+ return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
+}
+
+func convas(n *Node, init *Nodes) *Node {
+ if n.Op != OAS {
+ Fatalf("convas: not OAS %v", n.Op)
+ }
+ defer updateHasCall(n)
+
+ n.SetTypecheck(1)
+
+ if n.Left == nil || n.Right == nil {
+ return n
+ }
+
+ lt := n.Left.Type
+ rt := n.Right.Type
+ if lt == nil || rt == nil {
+ return n
+ }
+
+ if n.Left.isBlank() {
+ n.Right = defaultlit(n.Right, nil)
+ return n
+ }
+
+ if !types.Identical(lt, rt) {
+ n.Right = assignconv(n.Right, lt, "assignment")
+ n.Right = walkexpr(n.Right, init)
+ }
+ dowidth(n.Right.Type)
+
+ return n
+}
+
+// from ascompat[ee]
+// a,b = c,d
+// simultaneous assignment. there cannot
+// be later use of an earlier lvalue.
+//
+// function calls have been removed.
+func reorder3(all []*Node) []*Node {
+ // If a needed expression may be affected by an
+ // earlier assignment, make an early copy of that
+ // expression and use the copy instead.
+ var early []*Node
+
+ var mapinit Nodes
+ for i, n := range all {
+ l := n.Left
+
+ // Save subexpressions needed on left side.
+ // Drill through non-dereferences.
+ for {
+ if l.Op == ODOT || l.Op == OPAREN {
+ l = l.Left
+ continue
+ }
+
+ if l.Op == OINDEX && l.Left.Type.IsArray() {
+ l.Right = reorder3save(l.Right, all, i, &early)
+ l = l.Left
+ continue
+ }
+
+ break
+ }
+
+ switch l.Op {
+ default:
+ Fatalf("reorder3 unexpected lvalue %#v", l.Op)
+
+ case ONAME:
+ break
+
+ case OINDEX, OINDEXMAP:
+ l.Left = reorder3save(l.Left, all, i, &early)
+ l.Right = reorder3save(l.Right, all, i, &early)
+ if l.Op == OINDEXMAP {
+ all[i] = convas(all[i], &mapinit)
+ }
+
+ case ODEREF, ODOTPTR:
+ l.Left = reorder3save(l.Left, all, i, &early)
+ }
+
+ // Save expression on right side.
+ all[i].Right = reorder3save(all[i].Right, all, i, &early)
+ }
+
+ early = append(mapinit.Slice(), early...)
+ return append(early, all...)
+}
+
+// if the evaluation of *np would be affected by the
+// assignments in all up to but not including the ith assignment,
+// copy into a temporary during *early and
+// replace *np with that temp.
+// The result of reorder3save MUST be assigned back to n, e.g.
+// n.Left = reorder3save(n.Left, all, i, early)
+func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node {
+ if !aliased(n, all[:i]) {
+ return n
+ }
+
+ q := temp(n.Type)
+ q = nod(OAS, q, n)
+ q = typecheck(q, ctxStmt)
+ *early = append(*early, q)
+ return q.Left
+}
+
+// what's the outer value that a write to n affects?
+// outer value means containing struct or array.
+func outervalue(n *Node) *Node {
+ for {
+ switch n.Op {
+ case OXDOT:
+ Fatalf("OXDOT in walk")
+ case ODOT, OPAREN, OCONVNOP:
+ n = n.Left
+ continue
+ case OINDEX:
+ if n.Left.Type != nil && n.Left.Type.IsArray() {
+ n = n.Left
+ continue
+ }
+ }
+
+ return n
+ }
+}
+
+// Is it possible that the computation of r might be
+// affected by assignments in all?
+func aliased(r *Node, all []*Node) bool {
+ if r == nil {
+ return false
+ }
+
+ // Treat all fields of a struct as referring to the whole struct.
+ // We could do better but we would have to keep track of the fields.
+ for r.Op == ODOT {
+ r = r.Left
+ }
+
+ // Look for obvious aliasing: a variable being assigned
+ // during the all list and appearing in n.
+ // Also record whether there are any writes to addressable
+ // memory (either main memory or variables whose addresses
+ // have been taken).
+ memwrite := false
+ for _, as := range all {
+ // We can ignore assignments to blank.
+ if as.Left.isBlank() {
+ continue
+ }
+
+ l := outervalue(as.Left)
+ if l.Op != ONAME {
+ memwrite = true
+ continue
+ }
+
+ switch l.Class() {
+ default:
+ Fatalf("unexpected class: %v, %v", l, l.Class())
+
+ case PAUTOHEAP, PEXTERN:
+ memwrite = true
+ continue
+
+ case PPARAMOUT:
+ // Assignments to a result parameter in a function with defers
+ // becomes visible early if evaluation of any later expression
+ // panics (#43835).
+ if Curfn.Func.HasDefer() {
+ return true
+ }
+ fallthrough
+ case PAUTO, PPARAM:
+ if l.Name.Addrtaken() {
+ memwrite = true
+ continue
+ }
+
+ if vmatch2(l, r) {
+ // Direct hit: l appears in r.
+ return true
+ }
+ }
+ }
+
+ // The variables being written do not appear in r.
+ // However, r might refer to computed addresses
+ // that are being written.
+
+ // If no computed addresses are affected by the writes, no aliasing.
+ if !memwrite {
+ return false
+ }
+
+ // If r does not refer to computed addresses
+ // (that is, if r only refers to variables whose addresses
+ // have not been taken), no aliasing.
+ if varexpr(r) {
+ return false
+ }
+
+ // Otherwise, both the writes and r refer to computed memory addresses.
+ // Assume that they might conflict.
+ return true
+}
+
+// does the evaluation of n only refer to variables
+// whose addresses have not been taken?
+// (and no other memory)
+func varexpr(n *Node) bool {
+ if n == nil {
+ return true
+ }
+
+ switch n.Op {
+ case OLITERAL:
+ return true
+
+ case ONAME:
+ switch n.Class() {
+ case PAUTO, PPARAM, PPARAMOUT:
+ if !n.Name.Addrtaken() {
+ return true
+ }
+ }
+
+ return false
+
+ case OADD,
+ OSUB,
+ OOR,
+ OXOR,
+ OMUL,
+ ODIV,
+ OMOD,
+ OLSH,
+ ORSH,
+ OAND,
+ OANDNOT,
+ OPLUS,
+ ONEG,
+ OBITNOT,
+ OPAREN,
+ OANDAND,
+ OOROR,
+ OCONV,
+ OCONVNOP,
+ OCONVIFACE,
+ ODOTTYPE:
+ return varexpr(n.Left) && varexpr(n.Right)
+
+ case ODOT: // but not ODOTPTR
+ // Should have been handled in aliased.
+ Fatalf("varexpr unexpected ODOT")
+ }
+
+ // Be conservative.
+ return false
+}
+
+// is the name l mentioned in r?
+func vmatch2(l *Node, r *Node) bool {
+ if r == nil {
+ return false
+ }
+ switch r.Op {
+ // match each right given left
+ case ONAME:
+ return l == r
+
+ case OLITERAL:
+ return false
+ }
+
+ if vmatch2(l, r.Left) {
+ return true
+ }
+ if vmatch2(l, r.Right) {
+ return true
+ }
+ for _, n := range r.List.Slice() {
+ if vmatch2(l, n) {
+ return true
+ }
+ }
+ return false
+}
+
+// is any name mentioned in l also mentioned in r?
+// called by sinit.go
+func vmatch1(l *Node, r *Node) bool {
+ // isolate all left sides
+ if l == nil || r == nil {
+ return false
+ }
+ switch l.Op {
+ case ONAME:
+ switch l.Class() {
+ case PPARAM, PAUTO:
+ break
+
+ default:
+ // assignment to non-stack variable must be
+ // delayed if right has function calls.
+ if r.HasCall() {
+ return true
+ }
+ }
+
+ return vmatch2(l, r)
+
+ case OLITERAL:
+ return false
+ }
+
+ if vmatch1(l.Left, r) {
+ return true
+ }
+ if vmatch1(l.Right, r) {
+ return true
+ }
+ for _, n := range l.List.Slice() {
+ if vmatch1(n, r) {
+ return true
+ }
+ }
+ return false
+}
+
+// paramstoheap returns code to allocate memory for heap-escaped parameters
+// and to copy non-result parameters' values from the stack.
+func paramstoheap(params *types.Type) []*Node {
+ var nn []*Node
+ for _, t := range params.Fields().Slice() {
+ v := asNode(t.Nname)
+ if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result
+ v = nil
+ }
+ if v == nil {
+ continue
+ }
+
+ if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil {
+ nn = append(nn, walkstmt(nod(ODCL, v, nil)))
+ if stackcopy.Class() == PPARAM {
+ nn = append(nn, walkstmt(typecheck(nod(OAS, v, stackcopy), ctxStmt)))
+ }
+ }
+ }
+
+ return nn
+}
+
+// zeroResults zeros the return values at the start of the function.
+// We need to do this very early in the function. Defer might stop a
+// panic and show the return values as they exist at the time of
+// panic. For precise stacks, the garbage collector assumes results
+// are always live, so we need to zero them before any allocations,
+// even allocations to move params/results to the heap.
+// The generated code is added to Curfn's Enter list.
+func zeroResults() {
+ for _, f := range Curfn.Type.Results().Fields().Slice() {
+ v := asNode(f.Nname)
+ if v != nil && v.Name.Param.Heapaddr != nil {
+ // The local which points to the return value is the
+ // thing that needs zeroing. This is already handled
+ // by a Needzero annotation in plive.go:livenessepilogue.
+ continue
+ }
+ if v.isParamHeapCopy() {
+ // TODO(josharian/khr): Investigate whether we can switch to "continue" here,
+ // and document more in either case.
+ // In the review of CL 114797, Keith wrote (roughly):
+ // I don't think the zeroing below matters.
+ // The stack return value will never be marked as live anywhere in the function.
+ // It is not written to until deferreturn returns.
+ v = v.Name.Param.Stackcopy
+ }
+ // Zero the stack location containing f.
+ Curfn.Func.Enter.Append(nodl(Curfn.Pos, OAS, v, nil))
+ }
+}
+
+// returnsfromheap returns code to copy values for heap-escaped parameters
+// back to the stack.
+func returnsfromheap(params *types.Type) []*Node {
+ var nn []*Node
+ for _, t := range params.Fields().Slice() {
+ v := asNode(t.Nname)
+ if v == nil {
+ continue
+ }
+ if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class() == PPARAMOUT {
+ nn = append(nn, walkstmt(typecheck(nod(OAS, stackcopy, v), ctxStmt)))
+ }
+ }
+
+ return nn
+}
+
+// heapmoves generates code to handle migrating heap-escaped parameters
+// between the stack and the heap. The generated code is added to Curfn's
+// Enter and Exit lists.
+func heapmoves() {
+ lno := lineno
+ lineno = Curfn.Pos
+ nn := paramstoheap(Curfn.Type.Recvs())
+ nn = append(nn, paramstoheap(Curfn.Type.Params())...)
+ nn = append(nn, paramstoheap(Curfn.Type.Results())...)
+ Curfn.Func.Enter.Append(nn...)
+ lineno = Curfn.Func.Endlineno
+ Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...)
+ lineno = lno
+}
+
+func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node {
+ if fn.Type == nil || fn.Type.Etype != TFUNC {
+ Fatalf("mkcall %v %v", fn, fn.Type)
+ }
+
+ n := fn.Type.NumParams()
+ if n != len(va) {
+ Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
+ }
+
+ r := nod(OCALL, fn, nil)
+ r.List.Set(va)
+ if fn.Type.NumResults() > 0 {
+ r = typecheck(r, ctxExpr|ctxMultiOK)
+ } else {
+ r = typecheck(r, ctxStmt)
+ }
+ r = walkexpr(r, init)
+ r.Type = t
+ return r
+}
+
+func mkcall(name string, t *types.Type, init *Nodes, args ...*Node) *Node {
+ return vmkcall(syslook(name), t, init, args)
+}
+
+func mkcall1(fn *Node, t *types.Type, init *Nodes, args ...*Node) *Node {
+ return vmkcall(fn, t, init, args)
+}
+
+func conv(n *Node, t *types.Type) *Node {
+ if types.Identical(n.Type, t) {
+ return n
+ }
+ n = nod(OCONV, n, nil)
+ n.Type = t
+ n = typecheck(n, ctxExpr)
+ return n
+}
+
+// convnop converts node n to type t using the OCONVNOP op
+// and typechecks the result with ctxExpr.
+func convnop(n *Node, t *types.Type) *Node {
+ if types.Identical(n.Type, t) {
+ return n
+ }
+ n = nod(OCONVNOP, n, nil)
+ n.Type = t
+ n = typecheck(n, ctxExpr)
+ return n
+}
+
+// byteindex converts n, which is byte-sized, to an int used to index into an array.
+// We cannot use conv, because we allow converting bool to int here,
+// which is forbidden in user code.
+func byteindex(n *Node) *Node {
+ // We cannot convert from bool to int directly.
+ // While converting from int8 to int is possible, it would yield
+ // the wrong result for negative values.
+ // Reinterpreting the value as an unsigned byte solves both cases.
+ if !types.Identical(n.Type, types.Types[TUINT8]) {
+ n = nod(OCONV, n, nil)
+ n.Type = types.Types[TUINT8]
+ n.SetTypecheck(1)
+ }
+ n = nod(OCONV, n, nil)
+ n.Type = types.Types[TINT]
+ n.SetTypecheck(1)
+ return n
+}
+
+func chanfn(name string, n int, t *types.Type) *Node {
+ if !t.IsChan() {
+ Fatalf("chanfn %v", t)
+ }
+ fn := syslook(name)
+ switch n {
+ default:
+ Fatalf("chanfn %d", n)
+ case 1:
+ fn = substArgTypes(fn, t.Elem())
+ case 2:
+ fn = substArgTypes(fn, t.Elem(), t.Elem())
+ }
+ return fn
+}
+
+func mapfn(name string, t *types.Type) *Node {
+ if !t.IsMap() {
+ Fatalf("mapfn %v", t)
+ }
+ fn := syslook(name)
+ fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
+ return fn
+}
+
+func mapfndel(name string, t *types.Type) *Node {
+ if !t.IsMap() {
+ Fatalf("mapfn %v", t)
+ }
+ fn := syslook(name)
+ fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key())
+ return fn
+}
+
+const (
+ mapslow = iota
+ mapfast32
+ mapfast32ptr
+ mapfast64
+ mapfast64ptr
+ mapfaststr
+ nmapfast
+)
+
+type mapnames [nmapfast]string
+
+func mkmapnames(base string, ptr string) mapnames {
+ return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
+}
+
+var mapaccess1 = mkmapnames("mapaccess1", "")
+var mapaccess2 = mkmapnames("mapaccess2", "")
+var mapassign = mkmapnames("mapassign", "ptr")
+var mapdelete = mkmapnames("mapdelete", "")
+
+func mapfast(t *types.Type) int {
+ // Check runtime/map.go:maxElemSize before changing.
+ if t.Elem().Width > 128 {
+ return mapslow
+ }
+ switch algtype(t.Key()) {
+ case AMEM32:
+ if !t.Key().HasPointers() {
+ return mapfast32
+ }
+ if Widthptr == 4 {
+ return mapfast32ptr
+ }
+ Fatalf("small pointer %v", t.Key())
+ case AMEM64:
+ if !t.Key().HasPointers() {
+ return mapfast64
+ }
+ if Widthptr == 8 {
+ return mapfast64ptr
+ }
+ // Two-word object, at least one of which is a pointer.
+ // Use the slow path.
+ case ASTRING:
+ return mapfaststr
+ }
+ return mapslow
+}
+
+func writebarrierfn(name string, l *types.Type, r *types.Type) *Node {
+ fn := syslook(name)
+ fn = substArgTypes(fn, l, r)
+ return fn
+}
+
+func addstr(n *Node, init *Nodes) *Node {
+ // order.expr rewrote OADDSTR to have a list of strings.
+ c := n.List.Len()
+
+ if c < 2 {
+ Fatalf("addstr count %d too small", c)
+ }
+
+ buf := nodnil()
+ if n.Esc == EscNone {
+ sz := int64(0)
+ for _, n1 := range n.List.Slice() {
+ if n1.Op == OLITERAL {
+ sz += int64(len(n1.StringVal()))
+ }
+ }
+
+ // Don't allocate the buffer if the result won't fit.
+ if sz < tmpstringbufsize {
+ // Create temporary buffer for result string on stack.
+ t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
+ buf = nod(OADDR, temp(t), nil)
+ }
+ }
+
+ // build list of string arguments
+ args := []*Node{buf}
+ for _, n2 := range n.List.Slice() {
+ args = append(args, conv(n2, types.Types[TSTRING]))
+ }
+
+ var fn string
+ if c <= 5 {
+ // small numbers of strings use direct runtime helpers.
+ // note: order.expr knows this cutoff too.
+ fn = fmt.Sprintf("concatstring%d", c)
+ } else {
+ // large numbers of strings are passed to the runtime as a slice.
+ fn = "concatstrings"
+
+ t := types.NewSlice(types.Types[TSTRING])
+ slice := nod(OCOMPLIT, nil, typenod(t))
+ if prealloc[n] != nil {
+ prealloc[slice] = prealloc[n]
+ }
+ slice.List.Set(args[1:]) // skip buf arg
+ args = []*Node{buf, slice}
+ slice.Esc = EscNone
+ }
+
+ cat := syslook(fn)
+ r := nod(OCALL, cat, nil)
+ r.List.Set(args)
+ r = typecheck(r, ctxExpr)
+ r = walkexpr(r, init)
+ r.Type = n.Type
+
+ return r
+}
+
+func walkAppendArgs(n *Node, init *Nodes) {
+ walkexprlistsafe(n.List.Slice(), init)
+
+ // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ ls := n.List.Slice()
+ for i1, n1 := range ls {
+ ls[i1] = cheapexpr(n1, init)
+ }
+}
+
+// expand append(l1, l2...) to
+// init {
+// s := l1
+// n := len(s) + len(l2)
+// // Compare as uint so growslice can panic on overflow.
+// if uint(n) > uint(cap(s)) {
+// s = growslice(s, n)
+// }
+// s = s[:n]
+// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+// }
+// s
+//
+// l2 is allowed to be a string.
+func appendslice(n *Node, init *Nodes) *Node {
+ walkAppendArgs(n, init)
+
+ l1 := n.List.First()
+ l2 := n.List.Second()
+ l2 = cheapexpr(l2, init)
+ n.List.SetSecond(l2)
+
+ var nodes Nodes
+
+ // var s []T
+ s := temp(l1.Type)
+ nodes.Append(nod(OAS, s, l1)) // s = l1
+
+ elemtype := s.Type.Elem()
+
+ // n := len(s) + len(l2)
+ nn := temp(types.Types[TINT])
+ nodes.Append(nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), nod(OLEN, l2, nil))))
+
+ // if uint(n) > uint(cap(s))
+ nif := nod(OIF, nil, nil)
+ nuint := conv(nn, types.Types[TUINT])
+ scapuint := conv(nod(OCAP, s, nil), types.Types[TUINT])
+ nif.Left = nod(OGT, nuint, scapuint)
+
+ // instantiate growslice(typ *type, []any, int) []any
+ fn := syslook("growslice")
+ fn = substArgTypes(fn, elemtype, elemtype)
+
+ // s = growslice(T, s, n)
+ nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn)))
+ nodes.Append(nif)
+
+ // s = s[:n]
+ nt := nod(OSLICE, s, nil)
+ nt.SetSliceBounds(nil, nn, nil)
+ nt.SetBounded(true)
+ nodes.Append(nod(OAS, s, nt))
+
+ var ncopy *Node
+ if elemtype.HasPointers() {
+ // copy(s[len(l1):], l2)
+ nptr1 := nod(OSLICE, s, nil)
+ nptr1.Type = s.Type
+ nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
+ nptr1 = cheapexpr(nptr1, &nodes)
+
+ nptr2 := l2
+
+ Curfn.Func.setWBPos(n.Pos)
+
+ // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
+ fn := syslook("typedslicecopy")
+ fn = substArgTypes(fn, l1.Type.Elem(), l2.Type.Elem())
+ ptr1, len1 := nptr1.backingArrayPtrLen()
+ ptr2, len2 := nptr2.backingArrayPtrLen()
+ ncopy = mkcall1(fn, types.Types[TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2)
+ } else if instrumenting && !compiling_runtime {
+ // rely on runtime to instrument:
+ // copy(s[len(l1):], l2)
+ // l2 can be a slice or string.
+ nptr1 := nod(OSLICE, s, nil)
+ nptr1.Type = s.Type
+ nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
+ nptr1 = cheapexpr(nptr1, &nodes)
+ nptr2 := l2
+
+ ptr1, len1 := nptr1.backingArrayPtrLen()
+ ptr2, len2 := nptr2.backingArrayPtrLen()
+
+ fn := syslook("slicecopy")
+ fn = substArgTypes(fn, ptr1.Type.Elem(), ptr2.Type.Elem())
+ ncopy = mkcall1(fn, types.Types[TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width))
+ } else {
+ // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+ nptr1 := nod(OINDEX, s, nod(OLEN, l1, nil))
+ nptr1.SetBounded(true)
+ nptr1 = nod(OADDR, nptr1, nil)
+
+ nptr2 := nod(OSPTR, l2, nil)
+
+ nwid := cheapexpr(conv(nod(OLEN, l2, nil), types.Types[TUINTPTR]), &nodes)
+ nwid = nod(OMUL, nwid, nodintconst(elemtype.Width))
+
+ // instantiate func memmove(to *any, frm *any, length uintptr)
+ fn := syslook("memmove")
+ fn = substArgTypes(fn, elemtype, elemtype)
+ ncopy = mkcall1(fn, nil, &nodes, nptr1, nptr2, nwid)
+ }
+ ln := append(nodes.Slice(), ncopy)
+
+ typecheckslice(ln, ctxStmt)
+ walkstmtlist(ln)
+ init.Append(ln...)
+ return s
+}
+
+// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
+// isAppendOfMake assumes n has already been typechecked.
+func isAppendOfMake(n *Node) bool {
+ if Debug.N != 0 || instrumenting {
+ return false
+ }
+
+ if n.Typecheck() == 0 {
+ Fatalf("missing typecheck: %+v", n)
+ }
+
+ if n.Op != OAPPEND || !n.IsDDD() || n.List.Len() != 2 {
+ return false
+ }
+
+ second := n.List.Second()
+ if second.Op != OMAKESLICE || second.Right != nil {
+ return false
+ }
+
+ // y must be either an integer constant or the largest possible positive value
+ // of variable y needs to fit into an uint.
+
+ // typecheck made sure that constant arguments to make are not negative and fit into an int.
+
+ // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime.
+ y := second.Left
+ if !Isconst(y, CTINT) && maxintval[y.Type.Etype].Cmp(maxintval[TUINT]) > 0 {
+ return false
+ }
+
+ return true
+}
+
+// extendslice rewrites append(l1, make([]T, l2)...) to
+// init {
+// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true)
+// } else {
+// panicmakeslicelen()
+// }
+// s := l1
+// n := len(s) + l2
+// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
+// // cap is a positive int and n can become negative when len(s) + l2
+// // overflows int. Interpreting n when negative as uint makes it larger
+// // than cap(s). growslice will check the int n arg and panic if n is
+// // negative. This prevents the overflow from being undetected.
+// if uint(n) > uint(cap(s)) {
+// s = growslice(T, s, n)
+// }
+// s = s[:n]
+// lptr := &l1[0]
+// sptr := &s[0]
+// if lptr == sptr || !T.HasPointers() {
+// // growslice did not clear the whole underlying array (or did not get called)
+// hp := &s[len(l1)]
+// hn := l2 * sizeof(T)
+// memclr(hp, hn)
+// }
+// }
+// s
+func extendslice(n *Node, init *Nodes) *Node {
+ // isAppendOfMake made sure all possible positive values of l2 fit into an uint.
+ // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
+ // check of l2 < 0 at runtime which is generated below.
+ l2 := conv(n.List.Second().Left, types.Types[TINT])
+ l2 = typecheck(l2, ctxExpr)
+ n.List.SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second().
+
+ walkAppendArgs(n, init)
+
+ l1 := n.List.First()
+ l2 = n.List.Second() // re-read l2, as it may have been updated by walkAppendArgs
+
+ var nodes []*Node
+
+ // if l2 >= 0 (likely happens), do nothing
+ nifneg := nod(OIF, nod(OGE, l2, nodintconst(0)), nil)
+ nifneg.SetLikely(true)
+
+ // else panicmakeslicelen()
+ nifneg.Rlist.Set1(mkcall("panicmakeslicelen", nil, init))
+ nodes = append(nodes, nifneg)
+
+ // s := l1
+ s := temp(l1.Type)
+ nodes = append(nodes, nod(OAS, s, l1))
+
+ elemtype := s.Type.Elem()
+
+ // n := len(s) + l2
+ nn := temp(types.Types[TINT])
+ nodes = append(nodes, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), l2)))
+
+ // if uint(n) > uint(cap(s))
+ nuint := conv(nn, types.Types[TUINT])
+ capuint := conv(nod(OCAP, s, nil), types.Types[TUINT])
+ nif := nod(OIF, nod(OGT, nuint, capuint), nil)
+
+ // instantiate growslice(typ *type, old []any, newcap int) []any
+ fn := syslook("growslice")
+ fn = substArgTypes(fn, elemtype, elemtype)
+
+ // s = growslice(T, s, n)
+ nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn)))
+ nodes = append(nodes, nif)
+
+ // s = s[:n]
+ nt := nod(OSLICE, s, nil)
+ nt.SetSliceBounds(nil, nn, nil)
+ nt.SetBounded(true)
+ nodes = append(nodes, nod(OAS, s, nt))
+
+ // lptr := &l1[0]
+ l1ptr := temp(l1.Type.Elem().PtrTo())
+ tmp := nod(OSPTR, l1, nil)
+ nodes = append(nodes, nod(OAS, l1ptr, tmp))
+
+ // sptr := &s[0]
+ sptr := temp(elemtype.PtrTo())
+ tmp = nod(OSPTR, s, nil)
+ nodes = append(nodes, nod(OAS, sptr, tmp))
+
+ // hp := &s[len(l1)]
+ hp := nod(OINDEX, s, nod(OLEN, l1, nil))
+ hp.SetBounded(true)
+ hp = nod(OADDR, hp, nil)
+ hp = convnop(hp, types.Types[TUNSAFEPTR])
+
+ // hn := l2 * sizeof(elem(s))
+ hn := nod(OMUL, l2, nodintconst(elemtype.Width))
+ hn = conv(hn, types.Types[TUINTPTR])
+
+ clrname := "memclrNoHeapPointers"
+ hasPointers := elemtype.HasPointers()
+ if hasPointers {
+ clrname = "memclrHasPointers"
+ Curfn.Func.setWBPos(n.Pos)
+ }
+
+ var clr Nodes
+ clrfn := mkcall(clrname, nil, &clr, hp, hn)
+ clr.Append(clrfn)
+
+ if hasPointers {
+ // if l1ptr == sptr
+ nifclr := nod(OIF, nod(OEQ, l1ptr, sptr), nil)
+ nifclr.Nbody = clr
+ nodes = append(nodes, nifclr)
+ } else {
+ nodes = append(nodes, clr.Slice()...)
+ }
+
+ typecheckslice(nodes, ctxStmt)
+ walkstmtlist(nodes)
+ init.Append(nodes...)
+ return s
+}
+
+// Rewrite append(src, x, y, z) so that any side effects in
+// x, y, z (including runtime panics) are evaluated in
+// initialization statements before the append.
+// For normal code generation, stop there and leave the
+// rest to cgen_append.
+//
+// For race detector, expand append(src, a [, b]* ) to
+//
+// init {
+// s := src
+// const argc = len(args) - 1
+// if cap(s) - len(s) < argc {
+// s = growslice(s, len(s)+argc)
+// }
+// n := len(s)
+// s = s[:n+argc]
+// s[n] = a
+// s[n+1] = b
+// ...
+// }
+// s
+func walkappend(n *Node, init *Nodes, dst *Node) *Node {
+ if !samesafeexpr(dst, n.List.First()) {
+ n.List.SetFirst(safeexpr(n.List.First(), init))
+ n.List.SetFirst(walkexpr(n.List.First(), init))
+ }
+ walkexprlistsafe(n.List.Slice()[1:], init)
+
+ nsrc := n.List.First()
+
+ // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ // Using cheapexpr also makes sure that the evaluation
+ // of all arguments (and especially any panics) happen
+ // before we begin to modify the slice in a visible way.
+ ls := n.List.Slice()[1:]
+ for i, n := range ls {
+ n = cheapexpr(n, init)
+ if !types.Identical(n.Type, nsrc.Type.Elem()) {
+ n = assignconv(n, nsrc.Type.Elem(), "append")
+ n = walkexpr(n, init)
+ }
+ ls[i] = n
+ }
+
+ argc := n.List.Len() - 1
+ if argc < 1 {
+ return nsrc
+ }
+
+ // General case, with no function calls left as arguments.
+ // Leave for gen, except that instrumentation requires old form.
+ if !instrumenting || compiling_runtime {
+ return n
+ }
+
+ var l []*Node
+
+ ns := temp(nsrc.Type)
+ l = append(l, nod(OAS, ns, nsrc)) // s = src
+
+ na := nodintconst(int64(argc)) // const argc
+ nx := nod(OIF, nil, nil) // if cap(s) - len(s) < argc
+ nx.Left = nod(OLT, nod(OSUB, nod(OCAP, ns, nil), nod(OLEN, ns, nil)), na)
+
+ fn := syslook("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
+ fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem())
+
+ nx.Nbody.Set1(nod(OAS, ns,
+ mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns,
+ nod(OADD, nod(OLEN, ns, nil), na))))
+
+ l = append(l, nx)
+
+ nn := temp(types.Types[TINT])
+ l = append(l, nod(OAS, nn, nod(OLEN, ns, nil))) // n = len(s)
+
+ nx = nod(OSLICE, ns, nil) // ...s[:n+argc]
+ nx.SetSliceBounds(nil, nod(OADD, nn, na), nil)
+ nx.SetBounded(true)
+ l = append(l, nod(OAS, ns, nx)) // s = s[:n+argc]
+
+ ls = n.List.Slice()[1:]
+ for i, n := range ls {
+ nx = nod(OINDEX, ns, nn) // s[n] ...
+ nx.SetBounded(true)
+ l = append(l, nod(OAS, nx, n)) // s[n] = arg
+ if i+1 < len(ls) {
+ l = append(l, nod(OAS, nn, nod(OADD, nn, nodintconst(1)))) // n = n + 1
+ }
+ }
+
+ typecheckslice(l, ctxStmt)
+ walkstmtlist(l)
+ init.Append(l...)
+ return ns
+}
+
+// Lower copy(a, b) to a memmove call or a runtime call.
+//
+// init {
+// n := len(a)
+// if n > len(b) { n = len(b) }
+// if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) }
+// }
+// n;
+//
+// Also works if b is a string.
+//
+func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
+ if n.Left.Type.Elem().HasPointers() {
+ Curfn.Func.setWBPos(n.Pos)
+ fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem())
+ n.Left = cheapexpr(n.Left, init)
+ ptrL, lenL := n.Left.backingArrayPtrLen()
+ n.Right = cheapexpr(n.Right, init)
+ ptrR, lenR := n.Right.backingArrayPtrLen()
+ return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), ptrL, lenL, ptrR, lenR)
+ }
+
+ if runtimecall {
+ // rely on runtime to instrument:
+ // copy(n.Left, n.Right)
+ // n.Right can be a slice or string.
+
+ n.Left = cheapexpr(n.Left, init)
+ ptrL, lenL := n.Left.backingArrayPtrLen()
+ n.Right = cheapexpr(n.Right, init)
+ ptrR, lenR := n.Right.backingArrayPtrLen()
+
+ fn := syslook("slicecopy")
+ fn = substArgTypes(fn, ptrL.Type.Elem(), ptrR.Type.Elem())
+
+ return mkcall1(fn, n.Type, init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left.Type.Elem().Width))
+ }
+
+ n.Left = walkexpr(n.Left, init)
+ n.Right = walkexpr(n.Right, init)
+ nl := temp(n.Left.Type)
+ nr := temp(n.Right.Type)
+ var l []*Node
+ l = append(l, nod(OAS, nl, n.Left))
+ l = append(l, nod(OAS, nr, n.Right))
+
+ nfrm := nod(OSPTR, nr, nil)
+ nto := nod(OSPTR, nl, nil)
+
+ nlen := temp(types.Types[TINT])
+
+ // n = len(to)
+ l = append(l, nod(OAS, nlen, nod(OLEN, nl, nil)))
+
+ // if n > len(frm) { n = len(frm) }
+ nif := nod(OIF, nil, nil)
+
+ nif.Left = nod(OGT, nlen, nod(OLEN, nr, nil))
+ nif.Nbody.Append(nod(OAS, nlen, nod(OLEN, nr, nil)))
+ l = append(l, nif)
+
+ // if to.ptr != frm.ptr { memmove( ... ) }
+ ne := nod(OIF, nod(ONE, nto, nfrm), nil)
+ ne.SetLikely(true)
+ l = append(l, ne)
+
+ fn := syslook("memmove")
+ fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem())
+ nwid := temp(types.Types[TUINTPTR])
+ setwid := nod(OAS, nwid, conv(nlen, types.Types[TUINTPTR]))
+ ne.Nbody.Append(setwid)
+ nwid = nod(OMUL, nwid, nodintconst(nl.Type.Elem().Width))
+ call := mkcall1(fn, nil, init, nto, nfrm, nwid)
+ ne.Nbody.Append(call)
+
+ typecheckslice(l, ctxStmt)
+ walkstmtlist(l)
+ init.Append(l...)
+ return nlen
+}
+
+func eqfor(t *types.Type) (n *Node, needsize bool) {
+ // Should only arrive here with large memory or
+ // a struct/array containing a non-memory field/element.
+ // Small memory is handled inline, and single non-memory
+ // is handled by walkcompare.
+ switch a, _ := algtype1(t); a {
+ case AMEM:
+ n := syslook("memequal")
+ n = substArgTypes(n, t, t)
+ return n, true
+ case ASPECIAL:
+ sym := typesymprefix(".eq", t)
+ n := newname(sym)
+ setNodeNameFunc(n)
+ n.Type = functype(nil, []*Node{
+ anonfield(types.NewPtr(t)),
+ anonfield(types.NewPtr(t)),
+ }, []*Node{
+ anonfield(types.Types[TBOOL]),
+ })
+ return n, false
+ }
+ Fatalf("eqfor %v", t)
+ return nil, false
+}
+
+// The result of walkcompare MUST be assigned back to n, e.g.
+// n.Left = walkcompare(n.Left, init)
+func walkcompare(n *Node, init *Nodes) *Node {
+ if n.Left.Type.IsInterface() && n.Right.Type.IsInterface() && n.Left.Op != OLITERAL && n.Right.Op != OLITERAL {
+ return walkcompareInterface(n, init)
+ }
+
+ if n.Left.Type.IsString() && n.Right.Type.IsString() {
+ return walkcompareString(n, init)
+ }
+
+ n.Left = walkexpr(n.Left, init)
+ n.Right = walkexpr(n.Right, init)
+
+ // Given mixed interface/concrete comparison,
+ // rewrite into types-equal && data-equal.
+ // This is efficient, avoids allocations, and avoids runtime calls.
+ if n.Left.Type.IsInterface() != n.Right.Type.IsInterface() {
+ // Preserve side-effects in case of short-circuiting; see #32187.
+ l := cheapexpr(n.Left, init)
+ r := cheapexpr(n.Right, init)
+ // Swap so that l is the interface value and r is the concrete value.
+ if n.Right.Type.IsInterface() {
+ l, r = r, l
+ }
+
+ // Handle both == and !=.
+ eq := n.Op
+ andor := OOROR
+ if eq == OEQ {
+ andor = OANDAND
+ }
+ // Check for types equal.
+ // For empty interface, this is:
+ // l.tab == type(r)
+ // For non-empty interface, this is:
+ // l.tab != nil && l.tab._type == type(r)
+ var eqtype *Node
+ tab := nod(OITAB, l, nil)
+ rtyp := typename(r.Type)
+ if l.Type.IsEmptyInterface() {
+ tab.Type = types.NewPtr(types.Types[TUINT8])
+ tab.SetTypecheck(1)
+ eqtype = nod(eq, tab, rtyp)
+ } else {
+ nonnil := nod(brcom(eq), nodnil(), tab)
+ match := nod(eq, itabType(tab), rtyp)
+ eqtype = nod(andor, nonnil, match)
+ }
+ // Check for data equal.
+ eqdata := nod(eq, ifaceData(n.Pos, l, r.Type), r)
+ // Put it all together.
+ expr := nod(andor, eqtype, eqdata)
+ n = finishcompare(n, expr, init)
+ return n
+ }
+
+ // Must be comparison of array or struct.
+ // Otherwise back end handles it.
+ // While we're here, decide whether to
+ // inline or call an eq alg.
+ t := n.Left.Type
+ var inline bool
+
+ maxcmpsize := int64(4)
+ unalignedLoad := canMergeLoads()
+ if unalignedLoad {
+ // Keep this low enough to generate less code than a function call.
+ maxcmpsize = 2 * int64(thearch.LinkArch.RegSize)
+ }
+
+ switch t.Etype {
+ default:
+ if Debug_libfuzzer != 0 && t.IsInteger() {
+ n.Left = cheapexpr(n.Left, init)
+ n.Right = cheapexpr(n.Right, init)
+
+ // If exactly one comparison operand is
+ // constant, invoke the constcmp functions
+ // instead, and arrange for the constant
+ // operand to be the first argument.
+ l, r := n.Left, n.Right
+ if r.Op == OLITERAL {
+ l, r = r, l
+ }
+ constcmp := l.Op == OLITERAL && r.Op != OLITERAL
+
+ var fn string
+ var paramType *types.Type
+ switch t.Size() {
+ case 1:
+ fn = "libfuzzerTraceCmp1"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp1"
+ }
+ paramType = types.Types[TUINT8]
+ case 2:
+ fn = "libfuzzerTraceCmp2"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp2"
+ }
+ paramType = types.Types[TUINT16]
+ case 4:
+ fn = "libfuzzerTraceCmp4"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp4"
+ }
+ paramType = types.Types[TUINT32]
+ case 8:
+ fn = "libfuzzerTraceCmp8"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp8"
+ }
+ paramType = types.Types[TUINT64]
+ default:
+ Fatalf("unexpected integer size %d for %v", t.Size(), t)
+ }
+ init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init)))
+ }
+ return n
+ case TARRAY:
+ // We can compare several elements at once with 2/4/8 byte integer compares
+ inline = t.NumElem() <= 1 || (issimple[t.Elem().Etype] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
+ case TSTRUCT:
+ inline = t.NumComponents(types.IgnoreBlankFields) <= 4
+ }
+
+ cmpl := n.Left
+ for cmpl != nil && cmpl.Op == OCONVNOP {
+ cmpl = cmpl.Left
+ }
+ cmpr := n.Right
+ for cmpr != nil && cmpr.Op == OCONVNOP {
+ cmpr = cmpr.Left
+ }
+
+ // Chose not to inline. Call equality function directly.
+ if !inline {
+ // eq algs take pointers; cmpl and cmpr must be addressable
+ if !islvalue(cmpl) || !islvalue(cmpr) {
+ Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
+ }
+
+ fn, needsize := eqfor(t)
+ call := nod(OCALL, fn, nil)
+ call.List.Append(nod(OADDR, cmpl, nil))
+ call.List.Append(nod(OADDR, cmpr, nil))
+ if needsize {
+ call.List.Append(nodintconst(t.Width))
+ }
+ res := call
+ if n.Op != OEQ {
+ res = nod(ONOT, res, nil)
+ }
+ n = finishcompare(n, res, init)
+ return n
+ }
+
+ // inline: build boolean expression comparing element by element
+ andor := OANDAND
+ if n.Op == ONE {
+ andor = OOROR
+ }
+ var expr *Node
+ compare := func(el, er *Node) {
+ a := nod(n.Op, el, er)
+ if expr == nil {
+ expr = a
+ } else {
+ expr = nod(andor, expr, a)
+ }
+ }
+ cmpl = safeexpr(cmpl, init)
+ cmpr = safeexpr(cmpr, init)
+ if t.IsStruct() {
+ for _, f := range t.Fields().Slice() {
+ sym := f.Sym
+ if sym.IsBlank() {
+ continue
+ }
+ compare(
+ nodSym(OXDOT, cmpl, sym),
+ nodSym(OXDOT, cmpr, sym),
+ )
+ }
+ } else {
+ step := int64(1)
+ remains := t.NumElem() * t.Elem().Width
+ combine64bit := unalignedLoad && Widthreg == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger()
+ combine32bit := unalignedLoad && t.Elem().Width <= 2 && t.Elem().IsInteger()
+ combine16bit := unalignedLoad && t.Elem().Width == 1 && t.Elem().IsInteger()
+ for i := int64(0); remains > 0; {
+ var convType *types.Type
+ switch {
+ case remains >= 8 && combine64bit:
+ convType = types.Types[TINT64]
+ step = 8 / t.Elem().Width
+ case remains >= 4 && combine32bit:
+ convType = types.Types[TUINT32]
+ step = 4 / t.Elem().Width
+ case remains >= 2 && combine16bit:
+ convType = types.Types[TUINT16]
+ step = 2 / t.Elem().Width
+ default:
+ step = 1
+ }
+ if step == 1 {
+ compare(
+ nod(OINDEX, cmpl, nodintconst(i)),
+ nod(OINDEX, cmpr, nodintconst(i)),
+ )
+ i++
+ remains -= t.Elem().Width
+ } else {
+ elemType := t.Elem().ToUnsigned()
+ cmplw := nod(OINDEX, cmpl, nodintconst(i))
+ cmplw = conv(cmplw, elemType) // convert to unsigned
+ cmplw = conv(cmplw, convType) // widen
+ cmprw := nod(OINDEX, cmpr, nodintconst(i))
+ cmprw = conv(cmprw, elemType)
+ cmprw = conv(cmprw, convType)
+ // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
+ // ssa will generate a single large load.
+ for offset := int64(1); offset < step; offset++ {
+ lb := nod(OINDEX, cmpl, nodintconst(i+offset))
+ lb = conv(lb, elemType)
+ lb = conv(lb, convType)
+ lb = nod(OLSH, lb, nodintconst(8*t.Elem().Width*offset))
+ cmplw = nod(OOR, cmplw, lb)
+ rb := nod(OINDEX, cmpr, nodintconst(i+offset))
+ rb = conv(rb, elemType)
+ rb = conv(rb, convType)
+ rb = nod(OLSH, rb, nodintconst(8*t.Elem().Width*offset))
+ cmprw = nod(OOR, cmprw, rb)
+ }
+ compare(cmplw, cmprw)
+ i += step
+ remains -= step * t.Elem().Width
+ }
+ }
+ }
+ if expr == nil {
+ expr = nodbool(n.Op == OEQ)
+ // We still need to use cmpl and cmpr, in case they contain
+ // an expression which might panic. See issue 23837.
+ t := temp(cmpl.Type)
+ a1 := nod(OAS, t, cmpl)
+ a1 = typecheck(a1, ctxStmt)
+ a2 := nod(OAS, t, cmpr)
+ a2 = typecheck(a2, ctxStmt)
+ init.Append(a1, a2)
+ }
+ n = finishcompare(n, expr, init)
+ return n
+}
+
+func tracecmpArg(n *Node, t *types.Type, init *Nodes) *Node {
+ // Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
+ if n.Op == OLITERAL && n.Type.IsSigned() && n.Int64Val() < 0 {
+ n = copyexpr(n, n.Type, init)
+ }
+
+ return conv(n, t)
+}
+
+func walkcompareInterface(n *Node, init *Nodes) *Node {
+ n.Right = cheapexpr(n.Right, init)
+ n.Left = cheapexpr(n.Left, init)
+ eqtab, eqdata := eqinterface(n.Left, n.Right)
+ var cmp *Node
+ if n.Op == OEQ {
+ cmp = nod(OANDAND, eqtab, eqdata)
+ } else {
+ eqtab.Op = ONE
+ cmp = nod(OOROR, eqtab, nod(ONOT, eqdata, nil))
+ }
+ return finishcompare(n, cmp, init)
+}
+
+func walkcompareString(n *Node, init *Nodes) *Node {
+ // Rewrite comparisons to short constant strings as length+byte-wise comparisons.
+ var cs, ncs *Node // const string, non-const string
+ switch {
+ case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR):
+ // ignore; will be constant evaluated
+ case Isconst(n.Left, CTSTR):
+ cs = n.Left
+ ncs = n.Right
+ case Isconst(n.Right, CTSTR):
+ cs = n.Right
+ ncs = n.Left
+ }
+ if cs != nil {
+ cmp := n.Op
+ // Our comparison below assumes that the non-constant string
+ // is on the left hand side, so rewrite "" cmp x to x cmp "".
+ // See issue 24817.
+ if Isconst(n.Left, CTSTR) {
+ cmp = brrev(cmp)
+ }
+
+ // maxRewriteLen was chosen empirically.
+ // It is the value that minimizes cmd/go file size
+ // across most architectures.
+ // See the commit description for CL 26758 for details.
+ maxRewriteLen := 6
+ // Some architectures can load unaligned byte sequence as 1 word.
+ // So we can cover longer strings with the same amount of code.
+ canCombineLoads := canMergeLoads()
+ combine64bit := false
+ if canCombineLoads {
+ // Keep this low enough to generate less code than a function call.
+ maxRewriteLen = 2 * thearch.LinkArch.RegSize
+ combine64bit = thearch.LinkArch.RegSize >= 8
+ }
+
+ var and Op
+ switch cmp {
+ case OEQ:
+ and = OANDAND
+ case ONE:
+ and = OOROR
+ default:
+ // Don't do byte-wise comparisons for <, <=, etc.
+ // They're fairly complicated.
+ // Length-only checks are ok, though.
+ maxRewriteLen = 0
+ }
+ if s := cs.StringVal(); len(s) <= maxRewriteLen {
+ if len(s) > 0 {
+ ncs = safeexpr(ncs, init)
+ }
+ r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s))))
+ remains := len(s)
+ for i := 0; remains > 0; {
+ if remains == 1 || !canCombineLoads {
+ cb := nodintconst(int64(s[i]))
+ ncb := nod(OINDEX, ncs, nodintconst(int64(i)))
+ r = nod(and, r, nod(cmp, ncb, cb))
+ remains--
+ i++
+ continue
+ }
+ var step int
+ var convType *types.Type
+ switch {
+ case remains >= 8 && combine64bit:
+ convType = types.Types[TINT64]
+ step = 8
+ case remains >= 4:
+ convType = types.Types[TUINT32]
+ step = 4
+ case remains >= 2:
+ convType = types.Types[TUINT16]
+ step = 2
+ }
+ ncsubstr := nod(OINDEX, ncs, nodintconst(int64(i)))
+ ncsubstr = conv(ncsubstr, convType)
+ csubstr := int64(s[i])
+ // Calculate large constant from bytes as sequence of shifts and ors.
+ // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
+ // ssa will combine this into a single large load.
+ for offset := 1; offset < step; offset++ {
+ b := nod(OINDEX, ncs, nodintconst(int64(i+offset)))
+ b = conv(b, convType)
+ b = nod(OLSH, b, nodintconst(int64(8*offset)))
+ ncsubstr = nod(OOR, ncsubstr, b)
+ csubstr |= int64(s[i+offset]) << uint8(8*offset)
+ }
+ csubstrPart := nodintconst(csubstr)
+ // Compare "step" bytes as once
+ r = nod(and, r, nod(cmp, csubstrPart, ncsubstr))
+ remains -= step
+ i += step
+ }
+ return finishcompare(n, r, init)
+ }
+ }
+
+ var r *Node
+ if n.Op == OEQ || n.Op == ONE {
+ // prepare for rewrite below
+ n.Left = cheapexpr(n.Left, init)
+ n.Right = cheapexpr(n.Right, init)
+ eqlen, eqmem := eqstring(n.Left, n.Right)
+ // quick check of len before full compare for == or !=.
+ // memequal then tests equality up to length len.
+ if n.Op == OEQ {
+ // len(left) == len(right) && memequal(left, right, len)
+ r = nod(OANDAND, eqlen, eqmem)
+ } else {
+ // len(left) != len(right) || !memequal(left, right, len)
+ eqlen.Op = ONE
+ r = nod(OOROR, eqlen, nod(ONOT, eqmem, nil))
+ }
+ } else {
+ // sys_cmpstring(s1, s2) :: 0
+ r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING]))
+ r = nod(n.Op, r, nodintconst(0))
+ }
+
+ return finishcompare(n, r, init)
+}
+
+// The result of finishcompare MUST be assigned back to n, e.g.
+// n.Left = finishcompare(n.Left, x, r, init)
+func finishcompare(n, r *Node, init *Nodes) *Node {
+ r = typecheck(r, ctxExpr)
+ r = conv(r, n.Type)
+ r = walkexpr(r, init)
+ return r
+}
+
+// return 1 if integer n must be in range [0, max), 0 otherwise
+func bounded(n *Node, max int64) bool {
+ if n.Type == nil || !n.Type.IsInteger() {
+ return false
+ }
+
+ sign := n.Type.IsSigned()
+ bits := int32(8 * n.Type.Width)
+
+ if smallintconst(n) {
+ v := n.Int64Val()
+ return 0 <= v && v < max
+ }
+
+ switch n.Op {
+ case OAND, OANDNOT:
+ v := int64(-1)
+ switch {
+ case smallintconst(n.Left):
+ v = n.Left.Int64Val()
+ case smallintconst(n.Right):
+ v = n.Right.Int64Val()
+ if n.Op == OANDNOT {
+ v = ^v
+ if !sign {
+ v &= 1<<uint(bits) - 1
+ }
+ }
+ }
+ if 0 <= v && v < max {
+ return true
+ }
+
+ case OMOD:
+ if !sign && smallintconst(n.Right) {
+ v := n.Right.Int64Val()
+ if 0 <= v && v <= max {
+ return true
+ }
+ }
+
+ case ODIV:
+ if !sign && smallintconst(n.Right) {
+ v := n.Right.Int64Val()
+ for bits > 0 && v >= 2 {
+ bits--
+ v >>= 1
+ }
+ }
+
+ case ORSH:
+ if !sign && smallintconst(n.Right) {
+ v := n.Right.Int64Val()
+ if v > int64(bits) {
+ return true
+ }
+ bits -= int32(v)
+ }
+ }
+
+ if !sign && bits <= 62 && 1<<uint(bits) <= max {
+ return true
+ }
+
+ return false
+}
+
+// usemethod checks interface method calls for uses of reflect.Type.Method.
+func usemethod(n *Node) {
+ t := n.Left.Type
+
+ // Looking for either of:
+ // Method(int) reflect.Method
+ // MethodByName(string) (reflect.Method, bool)
+ //
+ // TODO(crawshaw): improve precision of match by working out
+ // how to check the method name.
+ if n := t.NumParams(); n != 1 {
+ return
+ }
+ if n := t.NumResults(); n != 1 && n != 2 {
+ return
+ }
+ p0 := t.Params().Field(0)
+ res0 := t.Results().Field(0)
+ var res1 *types.Field
+ if t.NumResults() == 2 {
+ res1 = t.Results().Field(1)
+ }
+
+ if res1 == nil {
+ if p0.Type.Etype != TINT {
+ return
+ }
+ } else {
+ if !p0.Type.IsString() {
+ return
+ }
+ if !res1.Type.IsBoolean() {
+ return
+ }
+ }
+
+ // Don't mark reflect.(*rtype).Method, etc. themselves in the reflect package.
+ // Those functions may be alive via the itab, which should not cause all methods
+ // alive. We only want to mark their callers.
+ if myimportpath == "reflect" {
+ switch Curfn.Func.Nname.Sym.Name { // TODO: is there a better way than hardcoding the names?
+ case "(*rtype).Method", "(*rtype).MethodByName", "(*interfaceType).Method", "(*interfaceType).MethodByName":
+ return
+ }
+ }
+
+ // Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors
+ // (including global variables such as numImports - was issue #19028).
+ // Also need to check for reflect package itself (see Issue #38515).
+ if s := res0.Type.Sym; s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) {
+ Curfn.Func.SetReflectMethod(true)
+ // The LSym is initialized at this point. We need to set the attribute on the LSym.
+ Curfn.Func.lsym.Set(obj.AttrReflectMethod, true)
+ }
+}
+
+func usefield(n *Node) {
+ if objabi.Fieldtrack_enabled == 0 {
+ return
+ }
+
+ switch n.Op {
+ default:
+ Fatalf("usefield %v", n.Op)
+
+ case ODOT, ODOTPTR:
+ break
+ }
+ if n.Sym == nil {
+ // No field name. This DOTPTR was built by the compiler for access
+ // to runtime data structures. Ignore.
+ return
+ }
+
+ t := n.Left.Type
+ if t.IsPtr() {
+ t = t.Elem()
+ }
+ field := n.Opt().(*types.Field)
+ if field == nil {
+ Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym)
+ }
+ if field.Sym != n.Sym || field.Offset != n.Xoffset {
+ Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym, n.Xoffset)
+ }
+ if !strings.Contains(field.Note, "go:\"track\"") {
+ return
+ }
+
+ outer := n.Left.Type
+ if outer.IsPtr() {
+ outer = outer.Elem()
+ }
+ if outer.Sym == nil {
+ yyerror("tracked field must be in named struct type")
+ }
+ if !types.IsExported(field.Sym.Name) {
+ yyerror("tracked field must be exported (upper case)")
+ }
+
+ sym := tracksym(outer, field)
+ if Curfn.Func.FieldTrack == nil {
+ Curfn.Func.FieldTrack = make(map[*types.Sym]struct{})
+ }
+ Curfn.Func.FieldTrack[sym] = struct{}{}
+}
+
+func candiscardlist(l Nodes) bool {
+ for _, n := range l.Slice() {
+ if !candiscard(n) {
+ return false
+ }
+ }
+ return true
+}
+
+func candiscard(n *Node) bool {
+ if n == nil {
+ return true
+ }
+
+ switch n.Op {
+ default:
+ return false
+
+ // Discardable as long as the subpieces are.
+ case ONAME,
+ ONONAME,
+ OTYPE,
+ OPACK,
+ OLITERAL,
+ OADD,
+ OSUB,
+ OOR,
+ OXOR,
+ OADDSTR,
+ OADDR,
+ OANDAND,
+ OBYTES2STR,
+ ORUNES2STR,
+ OSTR2BYTES,
+ OSTR2RUNES,
+ OCAP,
+ OCOMPLIT,
+ OMAPLIT,
+ OSTRUCTLIT,
+ OARRAYLIT,
+ OSLICELIT,
+ OPTRLIT,
+ OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ ODOT,
+ OEQ,
+ ONE,
+ OLT,
+ OLE,
+ OGT,
+ OGE,
+ OKEY,
+ OSTRUCTKEY,
+ OLEN,
+ OMUL,
+ OLSH,
+ ORSH,
+ OAND,
+ OANDNOT,
+ ONEW,
+ ONOT,
+ OBITNOT,
+ OPLUS,
+ ONEG,
+ OOROR,
+ OPAREN,
+ ORUNESTR,
+ OREAL,
+ OIMAG,
+ OCOMPLEX:
+ break
+
+ // Discardable as long as we know it's not division by zero.
+ case ODIV, OMOD:
+ if Isconst(n.Right, CTINT) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 {
+ break
+ }
+ if Isconst(n.Right, CTFLT) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 {
+ break
+ }
+ return false
+
+ // Discardable as long as we know it won't fail because of a bad size.
+ case OMAKECHAN, OMAKEMAP:
+ if Isconst(n.Left, CTINT) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 {
+ break
+ }
+ return false
+
+ // Difficult to tell what sizes are okay.
+ case OMAKESLICE:
+ return false
+
+ case OMAKESLICECOPY:
+ return false
+ }
+
+ if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
+ return false
+ }
+
+ return true
+}
+
+// Rewrite
+// go builtin(x, y, z)
+// into
+// go func(a1, a2, a3) {
+// builtin(a1, a2, a3)
+// }(x, y, z)
+// for print, println, and delete.
+//
+// Rewrite
+// go f(x, y, uintptr(unsafe.Pointer(z)))
+// into
+// go func(a1, a2, a3) {
+// builtin(a1, a2, uintptr(a3))
+// }(x, y, unsafe.Pointer(z))
+// for function contains unsafe-uintptr arguments.
+
+var wrapCall_prgen int
+
+// The result of wrapCall MUST be assigned back to n, e.g.
+// n.Left = wrapCall(n.Left, init)
+func wrapCall(n *Node, init *Nodes) *Node {
+ if n.Ninit.Len() != 0 {
+ walkstmtlist(n.Ninit.Slice())
+ init.AppendNodes(&n.Ninit)
+ }
+
+ isBuiltinCall := n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER
+
+ // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
+ if !isBuiltinCall && n.IsDDD() {
+ last := n.List.Len() - 1
+ if va := n.List.Index(last); va.Op == OSLICELIT {
+ n.List.Set(append(n.List.Slice()[:last], va.List.Slice()...))
+ n.SetIsDDD(false)
+ }
+ }
+
+ wrapArgs := n.List.Slice()
+ // If there's a receiver argument, it needs to be passed through the wrapper too.
+ if n.Op == OCALLMETH || n.Op == OCALLINTER {
+ recv := n.Left.Left
+ wrapArgs = append([]*Node{recv}, wrapArgs...)
+ }
+
+ // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
+ origArgs := make([]*Node, len(wrapArgs))
+ t := nod(OTFUNC, nil, nil)
+ for i, arg := range wrapArgs {
+ s := lookupN("a", i)
+ if !isBuiltinCall && arg.Op == OCONVNOP && arg.Type.IsUintptr() && arg.Left.Type.IsUnsafePtr() {
+ origArgs[i] = arg
+ arg = arg.Left
+ wrapArgs[i] = arg
+ }
+ t.List.Append(symfield(s, arg.Type))
+ }
+
+ wrapCall_prgen++
+ sym := lookupN("wrap·", wrapCall_prgen)
+ fn := dclfunc(sym, t)
+
+ args := paramNnames(t.Type)
+ for i, origArg := range origArgs {
+ if origArg == nil {
+ continue
+ }
+ arg := nod(origArg.Op, args[i], nil)
+ arg.Type = origArg.Type
+ args[i] = arg
+ }
+ if n.Op == OCALLMETH || n.Op == OCALLINTER {
+ // Move wrapped receiver argument back to its appropriate place.
+ recv := typecheck(args[0], ctxExpr)
+ n.Left.Left = recv
+ args = args[1:]
+ }
+ call := nod(n.Op, nil, nil)
+ if !isBuiltinCall {
+ call.Op = OCALL
+ call.Left = n.Left
+ call.SetIsDDD(n.IsDDD())
+ }
+ call.List.Set(args)
+ fn.Nbody.Set1(call)
+
+ funcbody()
+
+ fn = typecheck(fn, ctxStmt)
+ typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ xtop = append(xtop, fn)
+
+ call = nod(OCALL, nil, nil)
+ call.Left = fn.Func.Nname
+ call.List.Set(wrapArgs)
+ call = typecheck(call, ctxStmt)
+ call = walkexpr(call, init)
+ return call
+}
+
+// substArgTypes substitutes the given list of types for
+// successive occurrences of the "any" placeholder in the
+// type syntax expression n.Type.
+// The result of substArgTypes MUST be assigned back to old, e.g.
+// n.Left = substArgTypes(n.Left, t1, t2)
+func substArgTypes(old *Node, types_ ...*types.Type) *Node {
+ n := old.copy()
+
+ for _, t := range types_ {
+ dowidth(t)
+ }
+ n.Type = types.SubstAny(n.Type, &types_)
+ if len(types_) > 0 {
+ Fatalf("substArgTypes: too many argument types")
+ }
+ return n
+}
+
+// canMergeLoads reports whether the backend optimization passes for
+// the current architecture can combine adjacent loads into a single
+// larger, possibly unaligned, load. Note that currently the
+// optimizations must be able to handle little endian byte order.
+func canMergeLoads() bool {
+ switch thearch.LinkArch.Family {
+ case sys.ARM64, sys.AMD64, sys.I386, sys.S390X:
+ return true
+ case sys.PPC64:
+ // Load combining only supported on ppc64le.
+ return thearch.LinkArch.ByteOrder == binary.LittleEndian
+ }
+ return false
+}
+
+// isRuneCount reports whether n is of the form len([]rune(string)).
+// These are optimized into a call to runtime.countrunes.
+func isRuneCount(n *Node) bool {
+ return Debug.N == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTR2RUNES
+}
+
+func walkCheckPtrAlignment(n *Node, init *Nodes, count *Node) *Node {
+ if !n.Type.IsPtr() {
+ Fatalf("expected pointer type: %v", n.Type)
+ }
+ elem := n.Type.Elem()
+ if count != nil {
+ if !elem.IsArray() {
+ Fatalf("expected array type: %v", elem)
+ }
+ elem = elem.Elem()
+ }
+
+ size := elem.Size()
+ if elem.Alignment() == 1 && (size == 0 || size == 1 && count == nil) {
+ return n
+ }
+
+ if count == nil {
+ count = nodintconst(1)
+ }
+
+ n.Left = cheapexpr(n.Left, init)
+ init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left, types.Types[TUNSAFEPTR]), typename(elem), conv(count, types.Types[TUINTPTR])))
+ return n
+}
+
+var walkCheckPtrArithmeticMarker byte
+
+func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
+ // Calling cheapexpr(n, init) below leads to a recursive call
+ // to walkexpr, which leads us back here again. Use n.Opt to
+ // prevent infinite loops.
+ if opt := n.Opt(); opt == &walkCheckPtrArithmeticMarker {
+ return n
+ } else if opt != nil {
+ // We use n.Opt() here because today it's not used for OCONVNOP. If that changes,
+ // there's no guarantee that temporarily replacing it is safe, so just hard fail here.
+ Fatalf("unexpected Opt: %v", opt)
+ }
+ n.SetOpt(&walkCheckPtrArithmeticMarker)
+ defer n.SetOpt(nil)
+
+ // TODO(mdempsky): Make stricter. We only need to exempt
+ // reflect.Value.Pointer and reflect.Value.UnsafeAddr.
+ switch n.Left.Op {
+ case OCALLFUNC, OCALLMETH, OCALLINTER:
+ return n
+ }
+
+ if n.Left.Op == ODOTPTR && isReflectHeaderDataField(n.Left) {
+ return n
+ }
+
+ // Find original unsafe.Pointer operands involved in this
+ // arithmetic expression.
+ //
+ // "It is valid both to add and to subtract offsets from a
+ // pointer in this way. It is also valid to use &^ to round
+ // pointers, usually for alignment."
+ var originals []*Node
+ var walk func(n *Node)
+ walk = func(n *Node) {
+ switch n.Op {
+ case OADD:
+ walk(n.Left)
+ walk(n.Right)
+ case OSUB, OANDNOT:
+ walk(n.Left)
+ case OCONVNOP:
+ if n.Left.Type.IsUnsafePtr() {
+ n.Left = cheapexpr(n.Left, init)
+ originals = append(originals, convnop(n.Left, types.Types[TUNSAFEPTR]))
+ }
+ }
+ }
+ walk(n.Left)
+
+ n = cheapexpr(n, init)
+
+ slice := mkdotargslice(types.NewSlice(types.Types[TUNSAFEPTR]), originals)
+ slice.Esc = EscNone
+
+ init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[TUNSAFEPTR]), slice))
+ // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
+ // the backing store for multiple calls to checkptrArithmetic.
+
+ return n
+}
+
+// checkPtr reports whether pointer checking should be enabled for
+// function fn at a given level. See debugHelpFooter for defined
+// levels.
+func checkPtr(fn *Node, level int) bool {
+ return Debug_checkptr >= level && fn.Func.Pragma&NoCheckPtr == 0
+}
diff --git a/src/cmd/compile/internal/gc/zerorange_test.go b/src/cmd/compile/internal/gc/zerorange_test.go
new file mode 100644
index 0000000..89f4cb9
--- /dev/null
+++ b/src/cmd/compile/internal/gc/zerorange_test.go
@@ -0,0 +1,98 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "testing"
+)
+
+var glob = 3
+var globp *int64
+
+// Testing compilation of arch.ZeroRange of various sizes.
+
+// By storing a pointer to an int64 output param in a global, the compiler must
+// ensure that output param is allocated on the heap. Also, since there is a
+// defer, the pointer to each output param must be zeroed in the prologue (see
+// plive.go:epilogue()). So, we will get a block of one or more stack slots that
+// need to be zeroed. Hence, we are testing compilation completes successfully when
+// zerorange calls of various sizes (8-136 bytes) are generated. We are not
+// testing runtime correctness (which is hard to do for the current uses of
+// ZeroRange).
+
+func TestZeroRange(t *testing.T) {
+ testZeroRange8(t)
+ testZeroRange16(t)
+ testZeroRange32(t)
+ testZeroRange64(t)
+ testZeroRange136(t)
+}
+
+func testZeroRange8(t *testing.T) (r int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ return
+}
+
+func testZeroRange16(t *testing.T) (r, s int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ globp = &s
+ return
+}
+
+func testZeroRange32(t *testing.T) (r, s, t2, u int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ globp = &s
+ globp = &t2
+ globp = &u
+ return
+}
+
+func testZeroRange64(t *testing.T) (r, s, t2, u, v, w, x, y int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ globp = &s
+ globp = &t2
+ globp = &u
+ globp = &v
+ globp = &w
+ globp = &x
+ globp = &y
+ return
+}
+
+func testZeroRange136(t *testing.T) (r, s, t2, u, v, w, x, y, r1, s1, t1, u1, v1, w1, x1, y1, z1 int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ globp = &s
+ globp = &t2
+ globp = &u
+ globp = &v
+ globp = &w
+ globp = &x
+ globp = &y
+ globp = &r1
+ globp = &s1
+ globp = &t1
+ globp = &u1
+ globp = &v1
+ globp = &w1
+ globp = &x1
+ globp = &y1
+ globp = &z1
+ return
+}
diff --git a/src/cmd/compile/internal/logopt/escape.go b/src/cmd/compile/internal/logopt/escape.go
new file mode 100644
index 0000000..802f967
--- /dev/null
+++ b/src/cmd/compile/internal/logopt/escape.go
@@ -0,0 +1,13 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package logopt
+
+import "net/url"
+
+func pathEscape(s string) string {
+ return url.PathEscape(s)
+}
diff --git a/src/cmd/compile/internal/logopt/escape_bootstrap.go b/src/cmd/compile/internal/logopt/escape_bootstrap.go
new file mode 100644
index 0000000..66ff0b8
--- /dev/null
+++ b/src/cmd/compile/internal/logopt/escape_bootstrap.go
@@ -0,0 +1,12 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package logopt
+
+// For bootstrapping with an early version of Go
+func pathEscape(s string) string {
+ panic("This should never be called; the compiler is not fully bootstrapped if it is.")
+}
diff --git a/src/cmd/compile/internal/logopt/log_opts.go b/src/cmd/compile/internal/logopt/log_opts.go
new file mode 100644
index 0000000..37a049d
--- /dev/null
+++ b/src/cmd/compile/internal/logopt/log_opts.go
@@ -0,0 +1,523 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logopt
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net/url"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+)
+
+// This implements (non)optimization logging for -json option to the Go compiler
+// The option is -json 0,<destination>.
+//
+// 0 is the version number; to avoid the need for synchronized updates, if
+// new versions of the logging appear, the compiler will support both, for a while,
+// and clients will specify what they need.
+//
+// <destination> is a directory.
+// Directories are specified with a leading / or os.PathSeparator,
+// or more explicitly with file://directory. The second form is intended to
+// deal with corner cases on Windows, and to allow specification of a relative
+// directory path (which is normally a bad idea, because the local directory
+// varies a lot in a build, especially with modules and/or vendoring, and may
+// not be writeable).
+//
+// For each package pkg compiled, a url.PathEscape(pkg)-named subdirectory
+// is created. For each source file.go in that package that generates
+// diagnostics (no diagnostics means no file),
+// a url.PathEscape(file)+".json"-named file is created and contains the
+// logged diagnostics.
+//
+// For example, "cmd%2Finternal%2Fdwarf/%3Cautogenerated%3E.json"
+// for "cmd/internal/dwarf" and <autogenerated> (which is not really a file, but the compiler sees it)
+//
+// If the package string is empty, it is replaced internally with string(0) which encodes to %00.
+//
+// Each log file begins with a JSON record identifying version,
+// platform, and other context, followed by optimization-relevant
+// LSP Diagnostic records, one per line (LSP version 3.15, no difference from 3.14 on the subset used here
+// see https://microsoft.github.io/language-server-protocol/specifications/specification-3-15/ )
+//
+// The fields of a Diagnostic are used in the following way:
+// Range: the outermost source position, for now begin and end are equal.
+// Severity: (always) SeverityInformation (3)
+// Source: (always) "go compiler"
+// Code: a string describing the missed optimization, e.g., "nilcheck", "cannotInline", "isInBounds", "escape"
+// Message: depending on code, additional information, e.g., the reason a function cannot be inlined.
+// RelatedInformation: if the missed optimization actually occurred at a function inlined at Range,
+// then the sequence of inlined locations appears here, from (second) outermost to innermost,
+// each with message="inlineLoc".
+//
+// In the case of escape analysis explanations, after any outer inlining locations,
+// the lines of the explanation appear, each potentially followed with its own inlining
+// location if the escape flow occurred within an inlined function.
+//
+// For example <destination>/cmd%2Fcompile%2Finternal%2Fssa/prove.json
+// might begin with the following line (wrapped for legibility):
+//
+// {"version":0,"package":"cmd/compile/internal/ssa","goos":"darwin","goarch":"amd64",
+// "gc_version":"devel +e1b9a57852 Fri Nov 1 15:07:00 2019 -0400",
+// "file":"/Users/drchase/work/go/src/cmd/compile/internal/ssa/prove.go"}
+//
+// and later contain (also wrapped for legibility):
+//
+// {"range":{"start":{"line":191,"character":24},"end":{"line":191,"character":24}},
+// "severity":3,"code":"nilcheck","source":"go compiler","message":"",
+// "relatedInformation":[
+// {"location":{"uri":"file:///Users/drchase/work/go/src/cmd/compile/internal/ssa/func.go",
+// "range":{"start":{"line":153,"character":16},"end":{"line":153,"character":16}}},
+// "message":"inlineLoc"}]}
+//
+// That is, at prove.go (implicit from context, provided in both filename and header line),
+// line 191, column 24, a nilcheck occurred in the generated code.
+// The relatedInformation indicates that this code actually came from
+// an inlined call to func.go, line 153, character 16.
+//
+// prove.go:191:
+// ft.orderS = f.newPoset()
+// func.go:152 and 153:
+// func (f *Func) newPoset() *poset {
+// if len(f.Cache.scrPoset) > 0 {
+//
+// In the case that the package is empty, the string(0) package name is also used in the header record, for example
+//
+// go tool compile -json=0,file://logopt x.go # no -p option to set the package
+// head -1 logopt/%00/x.json
+// {"version":0,"package":"\u0000","goos":"darwin","goarch":"amd64","gc_version":"devel +86487adf6a Thu Nov 7 19:34:56 2019 -0500","file":"x.go"}
+
+type VersionHeader struct {
+ Version int `json:"version"`
+ Package string `json:"package"`
+ Goos string `json:"goos"`
+ Goarch string `json:"goarch"`
+ GcVersion string `json:"gc_version"`
+ File string `json:"file,omitempty"` // LSP requires an enclosing resource, i.e., a file
+}
+
+// DocumentURI, Position, Range, Location, Diagnostic, DiagnosticRelatedInformation all reuse json definitions from gopls.
+// See https://github.com/golang/tools/blob/22afafe3322a860fcd3d88448768f9db36f8bc5f/internal/lsp/protocol/tsprotocol.go
+
+type DocumentURI string
+
+type Position struct {
+ Line uint `json:"line"` // gopls uses float64, but json output is the same for integers
+ Character uint `json:"character"` // gopls uses float64, but json output is the same for integers
+}
+
+// A Range in a text document expressed as (zero-based) start and end positions.
+// A range is comparable to a selection in an editor. Therefore the end position is exclusive.
+// If you want to specify a range that contains a line including the line ending character(s)
+// then use an end position denoting the start of the next line.
+type Range struct {
+ /*Start defined:
+ * The range's start position
+ */
+ Start Position `json:"start"`
+
+ /*End defined:
+ * The range's end position
+ */
+ End Position `json:"end"` // exclusive
+}
+
+// A Location represents a location inside a resource, such as a line inside a text file.
+type Location struct {
+ // URI is
+ URI DocumentURI `json:"uri"`
+
+ // Range is
+ Range Range `json:"range"`
+}
+
+/* DiagnosticRelatedInformation defined:
+ * Represents a related message and source code location for a diagnostic. This should be
+ * used to point to code locations that cause or related to a diagnostics, e.g when duplicating
+ * a symbol in a scope.
+ */
+type DiagnosticRelatedInformation struct {
+
+ /*Location defined:
+ * The location of this related diagnostic information.
+ */
+ Location Location `json:"location"`
+
+ /*Message defined:
+ * The message of this related diagnostic information.
+ */
+ Message string `json:"message"`
+}
+
+// DiagnosticSeverity defines constants
+type DiagnosticSeverity uint
+
+const (
+ /*SeverityInformation defined:
+ * Reports an information.
+ */
+ SeverityInformation DiagnosticSeverity = 3
+)
+
+// DiagnosticTag defines constants
+type DiagnosticTag uint
+
+/*Diagnostic defined:
+ * Represents a diagnostic, such as a compiler error or warning. Diagnostic objects
+ * are only valid in the scope of a resource.
+ */
+type Diagnostic struct {
+
+ /*Range defined:
+ * The range at which the message applies
+ */
+ Range Range `json:"range"`
+
+ /*Severity defined:
+ * The diagnostic's severity. Can be omitted. If omitted it is up to the
+ * client to interpret diagnostics as error, warning, info or hint.
+ */
+ Severity DiagnosticSeverity `json:"severity,omitempty"` // always SeverityInformation for optimizer logging.
+
+ /*Code defined:
+ * The diagnostic's code, which usually appear in the user interface.
+ */
+ Code string `json:"code,omitempty"` // LSP uses 'number | string' = gopls interface{}, but only string here, e.g. "boundsCheck", "nilcheck", etc.
+
+ /*Source defined:
+ * A human-readable string describing the source of this
+ * diagnostic, e.g. 'typescript' or 'super lint'. It usually
+ * appears in the user interface.
+ */
+ Source string `json:"source,omitempty"` // "go compiler"
+
+ /*Message defined:
+ * The diagnostic's message. It usually appears in the user interface
+ */
+ Message string `json:"message"` // sometimes used, provides additional information.
+
+ /*Tags defined:
+ * Additional metadata about the diagnostic.
+ */
+ Tags []DiagnosticTag `json:"tags,omitempty"` // always empty for logging optimizations.
+
+ /*RelatedInformation defined:
+ * An array of related diagnostic information, e.g. when symbol-names within
+ * a scope collide all definitions can be marked via this property.
+ */
+ RelatedInformation []DiagnosticRelatedInformation `json:"relatedInformation,omitempty"`
+}
+
+// A LoggedOpt is what the compiler produces and accumulates,
+// to be converted to JSON for human or IDE consumption.
+type LoggedOpt struct {
+ pos src.XPos // Source code position at which the event occurred. If it is inlined, outer and all inlined locations will appear in JSON.
+ compilerPass string // Compiler pass. For human/adhoc consumption; does not appear in JSON (yet)
+ functionName string // Function name. For human/adhoc consumption; does not appear in JSON (yet)
+ what string // The (non) optimization; "nilcheck", "boundsCheck", "inline", "noInline"
+ target []interface{} // Optional target(s) or parameter(s) of "what" -- what was inlined, why it was not, size of copy, etc. 1st is most important/relevant.
+}
+
+type logFormat uint8
+
+const (
+ None logFormat = iota
+ Json0 // version 0 for LSP 3.14, 3.15; future versions of LSP may change the format and the compiler may need to support both as clients are updated.
+)
+
+var Format = None
+var dest string
+
+// LogJsonOption parses and validates the version,directory value attached to the -json compiler flag.
+func LogJsonOption(flagValue string) {
+ version, directory := parseLogFlag("json", flagValue)
+ if version != 0 {
+ log.Fatal("-json version must be 0")
+ }
+ dest = checkLogPath(directory)
+ Format = Json0
+}
+
+// parseLogFlag checks the flag passed to -json
+// for version,destination format and returns the two parts.
+func parseLogFlag(flag, value string) (version int, directory string) {
+ if Format != None {
+ log.Fatal("Cannot repeat -json flag")
+ }
+ commaAt := strings.Index(value, ",")
+ if commaAt <= 0 {
+ log.Fatalf("-%s option should be '<version>,<destination>' where <version> is a number", flag)
+ }
+ v, err := strconv.Atoi(value[:commaAt])
+ if err != nil {
+ log.Fatalf("-%s option should be '<version>,<destination>' where <version> is a number: err=%v", flag, err)
+ }
+ version = v
+ directory = value[commaAt+1:]
+ return
+}
+
+// isWindowsDriveURI returns true if the file URI is of the format used by
+// Windows URIs. The url.Parse package does not specially handle Windows paths
+// (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:").
+// (copied from tools/internal/span/uri.go)
+// this is less comprehensive that the processing in filepath.IsAbs on Windows.
+func isWindowsDriveURIPath(uri string) bool {
+ if len(uri) < 4 {
+ return false
+ }
+ return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':'
+}
+
+func parseLogPath(destination string) (string, string) {
+ if filepath.IsAbs(destination) {
+ return filepath.Clean(destination), ""
+ }
+ if strings.HasPrefix(destination, "file://") { // IKWIAD, or Windows C:\foo\bar\baz
+ uri, err := url.Parse(destination)
+ if err != nil {
+ return "", fmt.Sprintf("optimizer logging destination looked like file:// URI but failed to parse: err=%v", err)
+ }
+ destination = uri.Host + uri.Path
+ if isWindowsDriveURIPath(destination) {
+ // strip leading / from /C:
+ // unlike tools/internal/span/uri.go, do not uppercase the drive letter -- let filepath.Clean do what it does.
+ destination = destination[1:]
+ }
+ return filepath.Clean(destination), ""
+ }
+ return "", fmt.Sprintf("optimizer logging destination %s was neither %s-prefixed directory nor file://-prefixed file URI", destination, string(filepath.Separator))
+}
+
+// checkLogPath does superficial early checking of the string specifying
+// the directory to which optimizer logging is directed, and if
+// it passes the test, stores the string in LO_dir
+func checkLogPath(destination string) string {
+ path, complaint := parseLogPath(destination)
+ if complaint != "" {
+ log.Fatalf(complaint)
+ }
+ err := os.MkdirAll(path, 0755)
+ if err != nil {
+ log.Fatalf("optimizer logging destination '<version>,<directory>' but could not create <directory>: err=%v", err)
+ }
+ return path
+}
+
+var loggedOpts []*LoggedOpt
+var mu = sync.Mutex{} // mu protects loggedOpts.
+
+// NewLoggedOpt allocates a new LoggedOpt, to later be passed to either NewLoggedOpt or LogOpt as "args".
+// Pos is the source position (including inlining), what is the message, pass is which pass created the message,
+// funcName is the name of the function
+// A typical use for this to accumulate an explanation for a missed optimization, for example, why did something escape?
+func NewLoggedOpt(pos src.XPos, what, pass, funcName string, args ...interface{}) *LoggedOpt {
+ pass = strings.Replace(pass, " ", "_", -1)
+ return &LoggedOpt{pos, pass, funcName, what, args}
+}
+
+// Logopt logs information about a (usually missed) optimization performed by the compiler.
+// Pos is the source position (including inlining), what is the message, pass is which pass created the message,
+// funcName is the name of the function
+func LogOpt(pos src.XPos, what, pass, funcName string, args ...interface{}) {
+ if Format == None {
+ return
+ }
+ lo := NewLoggedOpt(pos, what, pass, funcName, args...)
+ mu.Lock()
+ defer mu.Unlock()
+ // Because of concurrent calls from back end, no telling what the order will be, but is stable-sorted by outer Pos before use.
+ loggedOpts = append(loggedOpts, lo)
+}
+
+// Enabled returns whether optimization logging is enabled.
+func Enabled() bool {
+ switch Format {
+ case None:
+ return false
+ case Json0:
+ return true
+ }
+ panic("Unexpected optimizer-logging level")
+}
+
+// byPos sorts diagnostics by source position.
+type byPos struct {
+ ctxt *obj.Link
+ a []*LoggedOpt
+}
+
+func (x byPos) Len() int { return len(x.a) }
+func (x byPos) Less(i, j int) bool {
+ return x.ctxt.OutermostPos(x.a[i].pos).Before(x.ctxt.OutermostPos(x.a[j].pos))
+}
+func (x byPos) Swap(i, j int) { x.a[i], x.a[j] = x.a[j], x.a[i] }
+
+func writerForLSP(subdirpath, file string) io.WriteCloser {
+ basename := file
+ lastslash := strings.LastIndexAny(basename, "\\/")
+ if lastslash != -1 {
+ basename = basename[lastslash+1:]
+ }
+ lastdot := strings.LastIndex(basename, ".go")
+ if lastdot != -1 {
+ basename = basename[:lastdot]
+ }
+ basename = pathEscape(basename)
+
+ // Assume a directory, make a file
+ p := filepath.Join(subdirpath, basename+".json")
+ w, err := os.Create(p)
+ if err != nil {
+ log.Fatalf("Could not create file %s for logging optimizer actions, %v", p, err)
+ }
+ return w
+}
+
+func fixSlash(f string) string {
+ if os.PathSeparator == '/' {
+ return f
+ }
+ return strings.Replace(f, string(os.PathSeparator), "/", -1)
+}
+
+func uriIfy(f string) DocumentURI {
+ url := url.URL{
+ Scheme: "file",
+ Path: fixSlash(f),
+ }
+ return DocumentURI(url.String())
+}
+
+// Return filename, replacing a first occurrence of $GOROOT with the
+// actual value of the GOROOT (because LSP does not speak "$GOROOT").
+func uprootedPath(filename string) string {
+ if !strings.HasPrefix(filename, "$GOROOT/") {
+ return filename
+ }
+ return objabi.GOROOT + filename[len("$GOROOT"):]
+}
+
+// FlushLoggedOpts flushes all the accumulated optimization log entries.
+func FlushLoggedOpts(ctxt *obj.Link, slashPkgPath string) {
+ if Format == None {
+ return
+ }
+
+ sort.Stable(byPos{ctxt, loggedOpts}) // Stable is necessary to preserve the per-function order, which is repeatable.
+ switch Format {
+
+ case Json0: // LSP 3.15
+ var posTmp []src.Pos
+ var encoder *json.Encoder
+ var w io.WriteCloser
+
+ if slashPkgPath == "" {
+ slashPkgPath = "\000"
+ }
+ subdirpath := filepath.Join(dest, pathEscape(slashPkgPath))
+ err := os.MkdirAll(subdirpath, 0755)
+ if err != nil {
+ log.Fatalf("Could not create directory %s for logging optimizer actions, %v", subdirpath, err)
+ }
+ diagnostic := Diagnostic{Source: "go compiler", Severity: SeverityInformation}
+
+ // For LSP, make a subdirectory for the package, and for each file foo.go, create foo.json in that subdirectory.
+ currentFile := ""
+ for _, x := range loggedOpts {
+ posTmp, p0 := x.parsePos(ctxt, posTmp)
+ p0f := uprootedPath(p0.Filename())
+
+ if currentFile != p0f {
+ if w != nil {
+ w.Close()
+ }
+ currentFile = p0f
+ w = writerForLSP(subdirpath, currentFile)
+ encoder = json.NewEncoder(w)
+ encoder.Encode(VersionHeader{Version: 0, Package: slashPkgPath, Goos: objabi.GOOS, Goarch: objabi.GOARCH, GcVersion: objabi.Version, File: currentFile})
+ }
+
+ // The first "target" is the most important one.
+ var target string
+ if len(x.target) > 0 {
+ target = fmt.Sprint(x.target[0])
+ }
+
+ diagnostic.Code = x.what
+ diagnostic.Message = target
+ diagnostic.Range = newPointRange(p0)
+ diagnostic.RelatedInformation = diagnostic.RelatedInformation[:0]
+
+ appendInlinedPos(posTmp, &diagnostic)
+
+ // Diagnostic explanation is stored in RelatedInformation after inlining info
+ if len(x.target) > 1 {
+ switch y := x.target[1].(type) {
+ case []*LoggedOpt:
+ for _, z := range y {
+ posTmp, p0 := z.parsePos(ctxt, posTmp)
+ loc := newLocation(p0)
+ msg := z.what
+ if len(z.target) > 0 {
+ msg = msg + ": " + fmt.Sprint(z.target[0])
+ }
+
+ diagnostic.RelatedInformation = append(diagnostic.RelatedInformation, DiagnosticRelatedInformation{Location: loc, Message: msg})
+ appendInlinedPos(posTmp, &diagnostic)
+ }
+ }
+ }
+
+ encoder.Encode(diagnostic)
+ }
+ if w != nil {
+ w.Close()
+ }
+ }
+}
+
+// newPointRange returns a single-position Range for the compiler source location p.
+func newPointRange(p src.Pos) Range {
+ return Range{Start: Position{p.Line(), p.Col()},
+ End: Position{p.Line(), p.Col()}}
+}
+
+// newLocation returns the Location for the compiler source location p
+func newLocation(p src.Pos) Location {
+ loc := Location{URI: uriIfy(uprootedPath(p.Filename())), Range: newPointRange(p)}
+ return loc
+}
+
+// appendInlinedPos extracts inlining information from posTmp and append it to diagnostic
+func appendInlinedPos(posTmp []src.Pos, diagnostic *Diagnostic) {
+ for i := 1; i < len(posTmp); i++ {
+ p := posTmp[i]
+ loc := newLocation(p)
+ diagnostic.RelatedInformation = append(diagnostic.RelatedInformation, DiagnosticRelatedInformation{Location: loc, Message: "inlineLoc"})
+ }
+}
+
+func (x *LoggedOpt) parsePos(ctxt *obj.Link, posTmp []src.Pos) ([]src.Pos, src.Pos) {
+ posTmp = ctxt.AllPos(x.pos, posTmp)
+ // Reverse posTmp to put outermost first.
+ l := len(posTmp)
+ for i := 0; i < l/2; i++ {
+ posTmp[i], posTmp[l-i-1] = posTmp[l-i-1], posTmp[i]
+ }
+ p0 := posTmp[0]
+ return posTmp, p0
+}
diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go
new file mode 100644
index 0000000..e121c1a
--- /dev/null
+++ b/src/cmd/compile/internal/logopt/logopt_test.go
@@ -0,0 +1,258 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logopt
+
+import (
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+const srcCode = `package x
+type pair struct {a,b int}
+func bar(y *pair) *int {
+ return &y.b
+}
+var a []int
+func foo(w, z *pair) *int {
+ if *bar(w) > 0 {
+ return bar(z)
+ }
+ if a[1] > 0 {
+ a = a[:2]
+ }
+ return &a[0]
+}
+
+// address taking prevents closure inlining
+func n() int {
+ foo := func() int { return 1 }
+ bar := &foo
+ x := (*bar)() + foo()
+ return x
+}
+`
+
+func want(t *testing.T, out string, desired string) {
+ // On Windows, Unicode escapes in the JSON output end up "normalized" elsewhere to /u....,
+ // so "normalize" what we're looking for to match that.
+ s := strings.ReplaceAll(desired, string(os.PathSeparator), "/")
+ if !strings.Contains(out, s) {
+ t.Errorf("did not see phrase %s in \n%s", s, out)
+ }
+}
+
+func wantN(t *testing.T, out string, desired string, n int) {
+ if strings.Count(out, desired) != n {
+ t.Errorf("expected exactly %d occurrences of %s in \n%s", n, desired, out)
+ }
+}
+
+func TestPathStuff(t *testing.T) {
+ sep := string(filepath.Separator)
+ if path, whine := parseLogPath("file:///c:foo"); path != "c:foo" || whine != "" { // good path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if path, whine := parseLogPath("file:///foo"); path != sep+"foo" || whine != "" { // good path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if path, whine := parseLogPath("foo"); path != "" || whine == "" { // BAD path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if sep == "\\" { // On WINDOWS ONLY
+ if path, whine := parseLogPath("C:/foo"); path != "C:\\foo" || whine != "" { // good path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if path, whine := parseLogPath("c:foo"); path != "" || whine == "" { // BAD path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if path, whine := parseLogPath("/foo"); path != "" || whine == "" { // BAD path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ } else { // ON UNIX ONLY
+ if path, whine := parseLogPath("/foo"); path != sep+"foo" || whine != "" { // good path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ }
+}
+
+func TestLogOpt(t *testing.T) {
+ t.Parallel()
+
+ testenv.MustHaveGoBuild(t)
+
+ dir, err := ioutil.TempDir("", "TestLogOpt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ dir = fixSlash(dir) // Normalize the directory name as much as possible, for Windows testing
+ src := filepath.Join(dir, "file.go")
+ if err := ioutil.WriteFile(src, []byte(srcCode), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ outfile := filepath.Join(dir, "file.o")
+
+ t.Run("JSON_fails", func(t *testing.T) {
+ // Test malformed flag
+ out, err := testLogOpt(t, "-json=foo", src, outfile)
+ if err == nil {
+ t.Error("-json=foo succeeded unexpectedly")
+ }
+ want(t, out, "option should be")
+ want(t, out, "number")
+
+ // Test a version number that is currently unsupported (and should remain unsupported for a while)
+ out, err = testLogOpt(t, "-json=9,foo", src, outfile)
+ if err == nil {
+ t.Error("-json=0,foo succeeded unexpectedly")
+ }
+ want(t, out, "version must be")
+
+ })
+
+ // replace d (dir) with t ("tmpdir") and convert path separators to '/'
+ normalize := func(out []byte, d, t string) string {
+ s := string(out)
+ s = strings.ReplaceAll(s, d, t)
+ s = strings.ReplaceAll(s, string(os.PathSeparator), "/")
+ return s
+ }
+
+ // Ensure that <128 byte copies are not reported and that 128-byte copies are.
+ // Check at both 1 and 8-byte alignments.
+ t.Run("Copy", func(t *testing.T) {
+ const copyCode = `package x
+func s128a1(x *[128]int8) [128]int8 {
+ return *x
+}
+func s127a1(x *[127]int8) [127]int8 {
+ return *x
+}
+func s16a8(x *[16]int64) [16]int64 {
+ return *x
+}
+func s15a8(x *[15]int64) [15]int64 {
+ return *x
+}
+`
+ copy := filepath.Join(dir, "copy.go")
+ if err := ioutil.WriteFile(copy, []byte(copyCode), 0644); err != nil {
+ t.Fatal(err)
+ }
+ outcopy := filepath.Join(dir, "copy.o")
+
+ // On not-amd64, test the host architecture and os
+ arches := []string{runtime.GOARCH}
+ goos0 := runtime.GOOS
+ if runtime.GOARCH == "amd64" { // Test many things with "linux" (wasm will get "js")
+ arches = []string{"arm", "arm64", "386", "amd64", "mips", "mips64", "ppc64le", "riscv64", "s390x", "wasm"}
+ goos0 = "linux"
+ }
+
+ for _, arch := range arches {
+ t.Run(arch, func(t *testing.T) {
+ goos := goos0
+ if arch == "wasm" {
+ goos = "js"
+ }
+ _, err := testCopy(t, dir, arch, goos, copy, outcopy)
+ if err != nil {
+ t.Error("-json=0,file://log/opt should have succeeded")
+ }
+ logged, err := ioutil.ReadFile(filepath.Join(dir, "log", "opt", "x", "copy.json"))
+ if err != nil {
+ t.Error("-json=0,file://log/opt missing expected log file")
+ }
+ slogged := normalize(logged, string(uriIfy(dir)), string(uriIfy("tmpdir")))
+ t.Logf("%s", slogged)
+ want(t, slogged, `{"range":{"start":{"line":3,"character":2},"end":{"line":3,"character":2}},"severity":3,"code":"copy","source":"go compiler","message":"128 bytes"}`)
+ want(t, slogged, `{"range":{"start":{"line":9,"character":2},"end":{"line":9,"character":2}},"severity":3,"code":"copy","source":"go compiler","message":"128 bytes"}`)
+ wantN(t, slogged, `"code":"copy"`, 2)
+ })
+ }
+ })
+
+ // Some architectures don't fault on nil dereference, so nilchecks are eliminated differently.
+ // The N-way copy test also doesn't need to run N-ways N times.
+ if runtime.GOARCH != "amd64" {
+ return
+ }
+
+ t.Run("Success", func(t *testing.T) {
+ // This test is supposed to succeed
+
+ // Note 'file://' is the I-Know-What-I-Am-Doing way of specifying a file, also to deal with corner cases for Windows.
+ _, err := testLogOptDir(t, dir, "-json=0,file://log/opt", src, outfile)
+ if err != nil {
+ t.Error("-json=0,file://log/opt should have succeeded")
+ }
+ logged, err := ioutil.ReadFile(filepath.Join(dir, "log", "opt", "x", "file.json"))
+ if err != nil {
+ t.Error("-json=0,file://log/opt missing expected log file")
+ }
+ // All this delicacy with uriIfy and filepath.Join is to get this test to work right on Windows.
+ slogged := normalize(logged, string(uriIfy(dir)), string(uriIfy("tmpdir")))
+ t.Logf("%s", slogged)
+ // below shows proper nilcheck
+ want(t, slogged, `{"range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}},"severity":3,"code":"nilcheck","source":"go compiler","message":"",`+
+ `"relatedInformation":[{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"}]}`)
+ want(t, slogged, `{"range":{"start":{"line":11,"character":6},"end":{"line":11,"character":6}},"severity":3,"code":"isInBounds","source":"go compiler","message":""}`)
+ want(t, slogged, `{"range":{"start":{"line":7,"character":6},"end":{"line":7,"character":6}},"severity":3,"code":"canInlineFunction","source":"go compiler","message":"cost: 35"}`)
+ // escape analysis explanation
+ want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r2 with derefs=0",`+
+ `"relatedInformation":[`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: y = z:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y := z (assign-pair)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: ~R0 = y:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y.b (dot of pointer)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u003cN\u003e (assign-pair)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r2 = ~R0:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return (*int)(~R0) (return)"}]}`)
+ })
+}
+
+func testLogOpt(t *testing.T, flag, src, outfile string) (string, error) {
+ run := []string{testenv.GoToolPath(t), "tool", "compile", flag, "-o", outfile, src}
+ t.Log(run)
+ cmd := exec.Command(run[0], run[1:]...)
+ out, err := cmd.CombinedOutput()
+ t.Logf("%s", out)
+ return string(out), err
+}
+
+func testLogOptDir(t *testing.T, dir, flag, src, outfile string) (string, error) {
+ // Notice the specified import path "x"
+ run := []string{testenv.GoToolPath(t), "tool", "compile", "-p", "x", flag, "-o", outfile, src}
+ t.Log(run)
+ cmd := exec.Command(run[0], run[1:]...)
+ cmd.Dir = dir
+ out, err := cmd.CombinedOutput()
+ t.Logf("%s", out)
+ return string(out), err
+}
+
+func testCopy(t *testing.T, dir, goarch, goos, src, outfile string) (string, error) {
+ // Notice the specified import path "x"
+ run := []string{testenv.GoToolPath(t), "tool", "compile", "-p", "x", "-json=0,file://log/opt", "-o", outfile, src}
+ t.Log(run)
+ cmd := exec.Command(run[0], run[1:]...)
+ cmd.Dir = dir
+ cmd.Env = append(os.Environ(), "GOARCH="+goarch, "GOOS="+goos)
+ out, err := cmd.CombinedOutput()
+ t.Logf("%s", out)
+ return string(out), err
+}
diff --git a/src/cmd/compile/internal/mips/galign.go b/src/cmd/compile/internal/mips/galign.go
new file mode 100644
index 0000000..be40c16
--- /dev/null
+++ b/src/cmd/compile/internal/mips/galign.go
@@ -0,0 +1,28 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj/mips"
+ "cmd/internal/objabi"
+)
+
+func Init(arch *gc.Arch) {
+ arch.LinkArch = &mips.Linkmips
+ if objabi.GOARCH == "mipsle" {
+ arch.LinkArch = &mips.Linkmipsle
+ }
+ arch.REGSP = mips.REGSP
+ arch.MAXWIDTH = (1 << 31) - 1
+ arch.SoftFloat = (objabi.GOMIPS == "softfloat")
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+ arch.Ginsnopdefer = ginsnop
+ arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/mips/ggen.go b/src/cmd/compile/internal/mips/ggen.go
new file mode 100644
index 0000000..5e86772
--- /dev/null
+++ b/src/cmd/compile/internal/mips/ggen.go
@@ -0,0 +1,53 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
+
+// TODO(mips): implement DUFFZERO
+func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*gc.Widthptr) {
+ for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
+ p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, gc.Ctxt.FixedFrameSize()+off+i)
+ }
+ } else {
+ //fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
+ // ADD $(FIXED_FRAME+frame+lo-4), SP, r1
+ // ADD $cnt, r1, r2
+ // loop:
+ // MOVW R0, (Widthptr)r1
+ // ADD $Widthptr, r1
+ // BNE r1, r2, loop
+ p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
+ p.Reg = mips.REGSP
+ p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+ p.Reg = mips.REGRT1
+ p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
+ p1 := p
+ p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+ p.Reg = mips.REGRT2
+ gc.Patch(p, p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *gc.Progs) *obj.Prog {
+ p := pp.Prog(mips.ANOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REG_R0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R0
+ return p
+}
diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go
new file mode 100644
index 0000000..9d11c6b
--- /dev/null
+++ b/src/cmd/compile/internal/mips/ssa.go
@@ -0,0 +1,885 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips
+
+import (
+ "math"
+
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
+
+// isFPreg reports whether r is an FP register
+func isFPreg(r int16) bool {
+ return mips.REG_F0 <= r && r <= mips.REG_F31
+}
+
+// isHILO reports whether r is HI or LO register
+func isHILO(r int16) bool {
+ return r == mips.REG_HI || r == mips.REG_LO
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 { // float32 or int32
+ return mips.AMOVF
+ } else { // float64 or int64
+ return mips.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return mips.AMOVB
+ } else {
+ return mips.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return mips.AMOVH
+ } else {
+ return mips.AMOVHU
+ }
+ case 4:
+ return mips.AMOVW
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 { // float32 or int32
+ return mips.AMOVF
+ } else { // float64 or int64
+ return mips.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return mips.AMOVB
+ case 2:
+ return mips.AMOVH
+ case 4:
+ return mips.AMOVW
+ }
+ }
+ panic("bad store type")
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy, ssa.OpMIPSMOVWreg:
+ t := v.Type
+ if t.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x == y {
+ return
+ }
+ as := mips.AMOVW
+ if isFPreg(x) && isFPreg(y) {
+ as = mips.AMOVF
+ if t.Size() == 8 {
+ as = mips.AMOVD
+ }
+ }
+
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
+ // cannot move between special registers, use TMP as intermediate
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ }
+ case ssa.OpMIPSMOVWnop:
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Reg()
+ p := s.Prog(loadByType(v.Type, r))
+ gc.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if isHILO(r) {
+ // cannot directly load, load to TMP and move
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Args[0].Reg()
+ if isHILO(r) {
+ // cannot directly store, move to TMP and store
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ r = mips.REGTMP
+ }
+ p := s.Prog(storeByType(v.Type, r))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ gc.AddrAuto(&p.To, v)
+ case ssa.OpMIPSADD,
+ ssa.OpMIPSSUB,
+ ssa.OpMIPSAND,
+ ssa.OpMIPSOR,
+ ssa.OpMIPSXOR,
+ ssa.OpMIPSNOR,
+ ssa.OpMIPSSLL,
+ ssa.OpMIPSSRL,
+ ssa.OpMIPSSRA,
+ ssa.OpMIPSADDF,
+ ssa.OpMIPSADDD,
+ ssa.OpMIPSSUBF,
+ ssa.OpMIPSSUBD,
+ ssa.OpMIPSMULF,
+ ssa.OpMIPSMULD,
+ ssa.OpMIPSDIVF,
+ ssa.OpMIPSDIVD,
+ ssa.OpMIPSMUL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSSGT,
+ ssa.OpMIPSSGTU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSSGTzero,
+ ssa.OpMIPSSGTUzero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSADDconst,
+ ssa.OpMIPSSUBconst,
+ ssa.OpMIPSANDconst,
+ ssa.OpMIPSORconst,
+ ssa.OpMIPSXORconst,
+ ssa.OpMIPSNORconst,
+ ssa.OpMIPSSLLconst,
+ ssa.OpMIPSSRLconst,
+ ssa.OpMIPSSRAconst,
+ ssa.OpMIPSSGTconst,
+ ssa.OpMIPSSGTUconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSMULT,
+ ssa.OpMIPSMULTU,
+ ssa.OpMIPSDIV,
+ ssa.OpMIPSDIVU:
+ // result in hi,lo
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpMIPSMOVWconst:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if isFPreg(r) || isHILO(r) {
+ // cannot move into FP or special registers, use TMP as intermediate
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpMIPSMOVFconst,
+ ssa.OpMIPSMOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSCMOVZ:
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSCMOVZzero:
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSCMPEQF,
+ ssa.OpMIPSCMPEQD,
+ ssa.OpMIPSCMPGEF,
+ ssa.OpMIPSCMPGED,
+ ssa.OpMIPSCMPGTF,
+ ssa.OpMIPSCMPGTD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ case ssa.OpMIPSMOVWaddr:
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ var wantreg string
+ // MOVW $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R29)
+ // when constant is large, tmp register (R23) may be used
+ // - base is SB: load external address with relocation
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ gc.AddAux(&p.From, v)
+ case *gc.Node:
+ wantreg = "SP"
+ gc.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVW $off(SP), R
+ wantreg = "SP"
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSMOVBload,
+ ssa.OpMIPSMOVBUload,
+ ssa.OpMIPSMOVHload,
+ ssa.OpMIPSMOVHUload,
+ ssa.OpMIPSMOVWload,
+ ssa.OpMIPSMOVFload,
+ ssa.OpMIPSMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSMOVBstore,
+ ssa.OpMIPSMOVHstore,
+ ssa.OpMIPSMOVWstore,
+ ssa.OpMIPSMOVFstore,
+ ssa.OpMIPSMOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpMIPSMOVBstorezero,
+ ssa.OpMIPSMOVHstorezero,
+ ssa.OpMIPSMOVWstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpMIPSMOVBreg,
+ ssa.OpMIPSMOVBUreg,
+ ssa.OpMIPSMOVHreg,
+ ssa.OpMIPSMOVHUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpMIPSMOVWreg || a.Op == ssa.OpMIPSMOVWnop {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpMIPSMOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpMIPSMOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpMIPSMOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpMIPSMOVHUreg && t.Size() == 2 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if v.Reg() == v.Args[0].Reg() {
+ return
+ }
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpMIPSMOVWF,
+ ssa.OpMIPSMOVWD,
+ ssa.OpMIPSTRUNCFW,
+ ssa.OpMIPSTRUNCDW,
+ ssa.OpMIPSMOVFD,
+ ssa.OpMIPSMOVDF,
+ ssa.OpMIPSNEGF,
+ ssa.OpMIPSNEGD,
+ ssa.OpMIPSSQRTD,
+ ssa.OpMIPSCLZ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSNEG:
+ // SUB from REGZERO
+ p := s.Prog(mips.ASUBU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSLoweredZero:
+ // SUBU $4, R1
+ // MOVW R0, 4(R1)
+ // ADDU $4, R1
+ // BNE Rarg1, R1, -2(PC)
+ // arg1 is the address of the last element to zero
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = mips.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = mips.AMOVH
+ default:
+ sz = 1
+ mov = mips.AMOVB
+ }
+ p := s.Prog(mips.ASUBU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGZERO
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = mips.REG_R1
+ p2.To.Offset = sz
+ p3 := s.Prog(mips.AADDU)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = sz
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = mips.REG_R1
+ p4 := s.Prog(mips.ABNE)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Args[1].Reg()
+ p4.Reg = mips.REG_R1
+ p4.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p4, p2)
+ case ssa.OpMIPSLoweredMove:
+ // SUBU $4, R1
+ // MOVW 4(R1), Rtmp
+ // MOVW Rtmp, (R2)
+ // ADDU $4, R1
+ // ADDU $4, R2
+ // BNE Rarg2, R1, -4(PC)
+ // arg2 is the address of the last element of src
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = mips.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = mips.AMOVH
+ default:
+ sz = 1
+ mov = mips.AMOVB
+ }
+ p := s.Prog(mips.ASUBU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_MEM
+ p2.From.Reg = mips.REG_R1
+ p2.From.Offset = sz
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = mips.REGTMP
+ p3 := s.Prog(mov)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = mips.REG_R2
+ p4 := s.Prog(mips.AADDU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = sz
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = mips.REG_R1
+ p5 := s.Prog(mips.AADDU)
+ p5.From.Type = obj.TYPE_CONST
+ p5.From.Offset = sz
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = mips.REG_R2
+ p6 := s.Prog(mips.ABNE)
+ p6.From.Type = obj.TYPE_REG
+ p6.From.Reg = v.Args[2].Reg()
+ p6.Reg = mips.REG_R1
+ p6.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p6, p2)
+ case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter:
+ s.Call(v)
+ case ssa.OpMIPSLoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+ case ssa.OpMIPSLoweredPanicBoundsA, ssa.OpMIPSLoweredPanicBoundsB, ssa.OpMIPSLoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(8) // space used in callee args area by assembly stubs
+ case ssa.OpMIPSLoweredPanicExtendA, ssa.OpMIPSLoweredPanicExtendB, ssa.OpMIPSLoweredPanicExtendC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
+ s.UseArgs(12) // space used in callee args area by assembly stubs
+ case ssa.OpMIPSLoweredAtomicLoad8,
+ ssa.OpMIPSLoweredAtomicLoad32:
+ s.Prog(mips.ASYNC)
+
+ var op obj.As
+ switch v.Op {
+ case ssa.OpMIPSLoweredAtomicLoad8:
+ op = mips.AMOVB
+ case ssa.OpMIPSLoweredAtomicLoad32:
+ op = mips.AMOVW
+ }
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPSLoweredAtomicStore8,
+ ssa.OpMIPSLoweredAtomicStore32:
+ s.Prog(mips.ASYNC)
+
+ var op obj.As
+ switch v.Op {
+ case ssa.OpMIPSLoweredAtomicStore8:
+ op = mips.AMOVB
+ case ssa.OpMIPSLoweredAtomicStore32:
+ op = mips.AMOVW
+ }
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPSLoweredAtomicStorezero:
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPSLoweredAtomicExchange:
+ // SYNC
+ // MOVW Rarg1, Rtmp
+ // LL (Rarg0), Rout
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+
+ p1 := s.Prog(mips.ALL)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg0()
+
+ p2 := s.Prog(mips.ASC)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPSLoweredAtomicAdd:
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDU Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDU Rarg1, Rout
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.ALL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ p1 := s.Prog(mips.AADDU)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = v.Args[1].Reg()
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+
+ p2 := s.Prog(mips.ASC)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+
+ s.Prog(mips.ASYNC)
+
+ p4 := s.Prog(mips.AADDU)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Args[1].Reg()
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+
+ case ssa.OpMIPSLoweredAtomicAddconst:
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDU $auxInt, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDU $auxInt, Rout
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.ALL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ p1 := s.Prog(mips.AADDU)
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = v.AuxInt
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+
+ p2 := s.Prog(mips.ASC)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+
+ s.Prog(mips.ASYNC)
+
+ p4 := s.Prog(mips.AADDU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = v.AuxInt
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+
+ case ssa.OpMIPSLoweredAtomicAnd,
+ ssa.OpMIPSLoweredAtomicOr:
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // AND/OR Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.ALL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+
+ p1 := s.Prog(v.Op.Asm())
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = v.Args[1].Reg()
+ p1.Reg = mips.REGTMP
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+
+ p2 := s.Prog(mips.ASC)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+
+ s.Prog(mips.ASYNC)
+
+ case ssa.OpMIPSLoweredAtomicCas:
+ // MOVW $0, Rout
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVW Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // SYNC
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ s.Prog(mips.ASYNC)
+
+ p1 := s.Prog(mips.ALL)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+
+ p2 := s.Prog(mips.ABNE)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Args[1].Reg()
+ p2.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_BRANCH
+
+ p3 := s.Prog(mips.AMOVW)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[2].Reg()
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Reg0()
+
+ p4 := s.Prog(mips.ASC)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_MEM
+ p4.To.Reg = v.Args[0].Reg()
+
+ p5 := s.Prog(mips.ABEQ)
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = v.Reg0()
+ p5.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p5, p1)
+
+ s.Prog(mips.ASYNC)
+
+ p6 := s.Prog(obj.ANOP)
+ gc.Patch(p2, p6)
+
+ case ssa.OpMIPSLoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(mips.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ gc.Warnl(v.Pos, "generated nil check")
+ }
+ case ssa.OpMIPSFPFlagTrue,
+ ssa.OpMIPSFPFlagFalse:
+ // MOVW $1, r
+ // CMOVF R0, r
+
+ cmov := mips.ACMOVF
+ if v.Op == ssa.OpMIPSFPFlagFalse {
+ cmov = mips.ACMOVT
+ }
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p1 := s.Prog(cmov)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = mips.REGZERO
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg()
+
+ case ssa.OpMIPSLoweredGetClosurePtr:
+ // Closure pointer is R22 (mips.REGCTXT).
+ gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpMIPSLoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSLoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpClobber:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockMIPSEQ: {mips.ABEQ, mips.ABNE},
+ ssa.BlockMIPSNE: {mips.ABNE, mips.ABEQ},
+ ssa.BlockMIPSLTZ: {mips.ABLTZ, mips.ABGEZ},
+ ssa.BlockMIPSGEZ: {mips.ABGEZ, mips.ABLTZ},
+ ssa.BlockMIPSLEZ: {mips.ABLEZ, mips.ABGTZ},
+ ssa.BlockMIPSGTZ: {mips.ABGTZ, mips.ABLEZ},
+ ssa.BlockMIPSFPT: {mips.ABFPT, mips.ABFPF},
+ ssa.BlockMIPSFPF: {mips.ABFPF, mips.ABFPT},
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in R1:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(mips.ABNE)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.Reg = mips.REG_R1
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockRetJmp:
+ p := s.Prog(obj.ARET)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = b.Aux.(*obj.LSym)
+ case ssa.BlockMIPSEQ, ssa.BlockMIPSNE,
+ ssa.BlockMIPSLTZ, ssa.BlockMIPSGEZ,
+ ssa.BlockMIPSLEZ, ssa.BlockMIPSGTZ,
+ ssa.BlockMIPSFPT, ssa.BlockMIPSFPF:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ if !b.Controls[0].Type.IsFlags() {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = b.Controls[0].Reg()
+ }
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/mips64/galign.go b/src/cmd/compile/internal/mips64/galign.go
new file mode 100644
index 0000000..90c381a
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/galign.go
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj/mips"
+ "cmd/internal/objabi"
+)
+
+func Init(arch *gc.Arch) {
+ arch.LinkArch = &mips.Linkmips64
+ if objabi.GOARCH == "mips64le" {
+ arch.LinkArch = &mips.Linkmips64le
+ }
+ arch.REGSP = mips.REGSP
+ arch.MAXWIDTH = 1 << 50
+ arch.SoftFloat = objabi.GOMIPS64 == "softfloat"
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+ arch.Ginsnopdefer = ginsnop
+
+ arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go
new file mode 100644
index 0000000..04e7a66
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/ggen.go
@@ -0,0 +1,57 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
+
+func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*gc.Widthptr) {
+ for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
+ p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i)
+ }
+ } else if cnt <= int64(128*gc.Widthptr) {
+ p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
+ p.Reg = mips.REGSP
+ p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffzero
+ p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
+ } else {
+ // ADDV $(8+frame+lo-8), SP, r1
+ // ADDV $cnt, r1, r2
+ // loop:
+ // MOVV R0, (Widthptr)r1
+ // ADDV $Widthptr, r1
+ // BNE r1, r2, loop
+ p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
+ p.Reg = mips.REGSP
+ p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+ p.Reg = mips.REGRT1
+ p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
+ p1 := p
+ p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+ p.Reg = mips.REGRT2
+ gc.Patch(p, p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *gc.Progs) *obj.Prog {
+ p := pp.Prog(mips.ANOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REG_R0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R0
+ return p
+}
diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go
new file mode 100644
index 0000000..2727c4d
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/ssa.go
@@ -0,0 +1,846 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips64
+
+import (
+ "math"
+
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
+
+// isFPreg reports whether r is an FP register
+func isFPreg(r int16) bool {
+ return mips.REG_F0 <= r && r <= mips.REG_F31
+}
+
+// isHILO reports whether r is HI or LO register
+func isHILO(r int16) bool {
+ return r == mips.REG_HI || r == mips.REG_LO
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 { // float32 or int32
+ return mips.AMOVF
+ } else { // float64 or int64
+ return mips.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return mips.AMOVB
+ } else {
+ return mips.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return mips.AMOVH
+ } else {
+ return mips.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return mips.AMOVW
+ } else {
+ return mips.AMOVWU
+ }
+ case 8:
+ return mips.AMOVV
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 { // float32 or int32
+ return mips.AMOVF
+ } else { // float64 or int64
+ return mips.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return mips.AMOVB
+ case 2:
+ return mips.AMOVH
+ case 4:
+ return mips.AMOVW
+ case 8:
+ return mips.AMOVV
+ }
+ }
+ panic("bad store type")
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy, ssa.OpMIPS64MOVVreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x == y {
+ return
+ }
+ as := mips.AMOVV
+ if isFPreg(x) && isFPreg(y) {
+ as = mips.AMOVD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
+ // cannot move between special registers, use TMP as intermediate
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ }
+ case ssa.OpMIPS64MOVVnop:
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Reg()
+ p := s.Prog(loadByType(v.Type, r))
+ gc.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if isHILO(r) {
+ // cannot directly load, load to TMP and move
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Args[0].Reg()
+ if isHILO(r) {
+ // cannot directly store, move to TMP and store
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ r = mips.REGTMP
+ }
+ p := s.Prog(storeByType(v.Type, r))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ gc.AddrAuto(&p.To, v)
+ case ssa.OpMIPS64ADDV,
+ ssa.OpMIPS64SUBV,
+ ssa.OpMIPS64AND,
+ ssa.OpMIPS64OR,
+ ssa.OpMIPS64XOR,
+ ssa.OpMIPS64NOR,
+ ssa.OpMIPS64SLLV,
+ ssa.OpMIPS64SRLV,
+ ssa.OpMIPS64SRAV,
+ ssa.OpMIPS64ADDF,
+ ssa.OpMIPS64ADDD,
+ ssa.OpMIPS64SUBF,
+ ssa.OpMIPS64SUBD,
+ ssa.OpMIPS64MULF,
+ ssa.OpMIPS64MULD,
+ ssa.OpMIPS64DIVF,
+ ssa.OpMIPS64DIVD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64SGT,
+ ssa.OpMIPS64SGTU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64ADDVconst,
+ ssa.OpMIPS64SUBVconst,
+ ssa.OpMIPS64ANDconst,
+ ssa.OpMIPS64ORconst,
+ ssa.OpMIPS64XORconst,
+ ssa.OpMIPS64NORconst,
+ ssa.OpMIPS64SLLVconst,
+ ssa.OpMIPS64SRLVconst,
+ ssa.OpMIPS64SRAVconst,
+ ssa.OpMIPS64SGTconst,
+ ssa.OpMIPS64SGTUconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64MULV,
+ ssa.OpMIPS64MULVU,
+ ssa.OpMIPS64DIVV,
+ ssa.OpMIPS64DIVVU:
+ // result in hi,lo
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpMIPS64MOVVconst:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if isFPreg(r) || isHILO(r) {
+ // cannot move into FP or special registers, use TMP as intermediate
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpMIPS64MOVFconst,
+ ssa.OpMIPS64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64CMPEQF,
+ ssa.OpMIPS64CMPEQD,
+ ssa.OpMIPS64CMPGEF,
+ ssa.OpMIPS64CMPGED,
+ ssa.OpMIPS64CMPGTF,
+ ssa.OpMIPS64CMPGTD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ case ssa.OpMIPS64MOVVaddr:
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ var wantreg string
+ // MOVV $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R29)
+ // when constant is large, tmp register (R23) may be used
+ // - base is SB: load external address with relocation
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ gc.AddAux(&p.From, v)
+ case *gc.Node:
+ wantreg = "SP"
+ gc.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVV $off(SP), R
+ wantreg = "SP"
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64MOVBload,
+ ssa.OpMIPS64MOVBUload,
+ ssa.OpMIPS64MOVHload,
+ ssa.OpMIPS64MOVHUload,
+ ssa.OpMIPS64MOVWload,
+ ssa.OpMIPS64MOVWUload,
+ ssa.OpMIPS64MOVVload,
+ ssa.OpMIPS64MOVFload,
+ ssa.OpMIPS64MOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64MOVBstore,
+ ssa.OpMIPS64MOVHstore,
+ ssa.OpMIPS64MOVWstore,
+ ssa.OpMIPS64MOVVstore,
+ ssa.OpMIPS64MOVFstore,
+ ssa.OpMIPS64MOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpMIPS64MOVBstorezero,
+ ssa.OpMIPS64MOVHstorezero,
+ ssa.OpMIPS64MOVWstorezero,
+ ssa.OpMIPS64MOVVstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpMIPS64MOVBreg,
+ ssa.OpMIPS64MOVBUreg,
+ ssa.OpMIPS64MOVHreg,
+ ssa.OpMIPS64MOVHUreg,
+ ssa.OpMIPS64MOVWreg,
+ ssa.OpMIPS64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpMIPS64MOVVreg {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpMIPS64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if v.Reg() == v.Args[0].Reg() {
+ return
+ }
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpMIPS64MOVWF,
+ ssa.OpMIPS64MOVWD,
+ ssa.OpMIPS64TRUNCFW,
+ ssa.OpMIPS64TRUNCDW,
+ ssa.OpMIPS64MOVVF,
+ ssa.OpMIPS64MOVVD,
+ ssa.OpMIPS64TRUNCFV,
+ ssa.OpMIPS64TRUNCDV,
+ ssa.OpMIPS64MOVFD,
+ ssa.OpMIPS64MOVDF,
+ ssa.OpMIPS64NEGF,
+ ssa.OpMIPS64NEGD,
+ ssa.OpMIPS64SQRTD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64NEGV:
+ // SUB from REGZERO
+ p := s.Prog(mips.ASUBVU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64DUFFZERO:
+ // runtime.duffzero expects start address - 8 in R1
+ p := s.Prog(mips.ASUBVU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 8
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p = s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffzero
+ p.To.Offset = v.AuxInt
+ case ssa.OpMIPS64LoweredZero:
+ // SUBV $8, R1
+ // MOVV R0, 8(R1)
+ // ADDV $8, R1
+ // BNE Rarg1, R1, -2(PC)
+ // arg1 is the address of the last element to zero
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%8 == 0:
+ sz = 8
+ mov = mips.AMOVV
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = mips.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = mips.AMOVH
+ default:
+ sz = 1
+ mov = mips.AMOVB
+ }
+ p := s.Prog(mips.ASUBVU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGZERO
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = mips.REG_R1
+ p2.To.Offset = sz
+ p3 := s.Prog(mips.AADDVU)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = sz
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = mips.REG_R1
+ p4 := s.Prog(mips.ABNE)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Args[1].Reg()
+ p4.Reg = mips.REG_R1
+ p4.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p4, p2)
+ case ssa.OpMIPS64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffcopy
+ p.To.Offset = v.AuxInt
+ case ssa.OpMIPS64LoweredMove:
+ // SUBV $8, R1
+ // MOVV 8(R1), Rtmp
+ // MOVV Rtmp, (R2)
+ // ADDV $8, R1
+ // ADDV $8, R2
+ // BNE Rarg2, R1, -4(PC)
+ // arg2 is the address of the last element of src
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%8 == 0:
+ sz = 8
+ mov = mips.AMOVV
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = mips.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = mips.AMOVH
+ default:
+ sz = 1
+ mov = mips.AMOVB
+ }
+ p := s.Prog(mips.ASUBVU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_MEM
+ p2.From.Reg = mips.REG_R1
+ p2.From.Offset = sz
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = mips.REGTMP
+ p3 := s.Prog(mov)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = mips.REG_R2
+ p4 := s.Prog(mips.AADDVU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = sz
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = mips.REG_R1
+ p5 := s.Prog(mips.AADDVU)
+ p5.From.Type = obj.TYPE_CONST
+ p5.From.Offset = sz
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = mips.REG_R2
+ p6 := s.Prog(mips.ABNE)
+ p6.From.Type = obj.TYPE_REG
+ p6.From.Reg = v.Args[2].Reg()
+ p6.Reg = mips.REG_R1
+ p6.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p6, p2)
+ case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter:
+ s.Call(v)
+ case ssa.OpMIPS64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+ case ssa.OpMIPS64LoweredPanicBoundsA, ssa.OpMIPS64LoweredPanicBoundsB, ssa.OpMIPS64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+ case ssa.OpMIPS64LoweredAtomicLoad8, ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64:
+ as := mips.AMOVV
+ switch v.Op {
+ case ssa.OpMIPS64LoweredAtomicLoad8:
+ as = mips.AMOVB
+ case ssa.OpMIPS64LoweredAtomicLoad32:
+ as = mips.AMOVW
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPS64LoweredAtomicStore8, ssa.OpMIPS64LoweredAtomicStore32, ssa.OpMIPS64LoweredAtomicStore64:
+ as := mips.AMOVV
+ switch v.Op {
+ case ssa.OpMIPS64LoweredAtomicStore8:
+ as = mips.AMOVB
+ case ssa.OpMIPS64LoweredAtomicStore32:
+ as = mips.AMOVW
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPS64LoweredAtomicStorezero32, ssa.OpMIPS64LoweredAtomicStorezero64:
+ as := mips.AMOVV
+ if v.Op == ssa.OpMIPS64LoweredAtomicStorezero32 {
+ as = mips.AMOVW
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPS64LoweredAtomicExchange32, ssa.OpMIPS64LoweredAtomicExchange64:
+ // SYNC
+ // MOVV Rarg1, Rtmp
+ // LL (Rarg0), Rout
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ ll := mips.ALLV
+ sc := mips.ASCV
+ if v.Op == ssa.OpMIPS64LoweredAtomicExchange32 {
+ ll = mips.ALL
+ sc = mips.ASC
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ p1 := s.Prog(ll)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg0()
+ p2 := s.Prog(sc)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPS64LoweredAtomicAdd32, ssa.OpMIPS64LoweredAtomicAdd64:
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDV Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDV Rarg1, Rout
+ ll := mips.ALLV
+ sc := mips.ASCV
+ if v.Op == ssa.OpMIPS64LoweredAtomicAdd32 {
+ ll = mips.ALL
+ sc = mips.ASC
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(ll)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(mips.AADDVU)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = v.Args[1].Reg()
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+ p2 := s.Prog(sc)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+ s.Prog(mips.ASYNC)
+ p4 := s.Prog(mips.AADDVU)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Args[1].Reg()
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+ case ssa.OpMIPS64LoweredAtomicAddconst32, ssa.OpMIPS64LoweredAtomicAddconst64:
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDV $auxint, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDV $auxint, Rout
+ ll := mips.ALLV
+ sc := mips.ASCV
+ if v.Op == ssa.OpMIPS64LoweredAtomicAddconst32 {
+ ll = mips.ALL
+ sc = mips.ASC
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(ll)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(mips.AADDVU)
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = v.AuxInt
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+ p2 := s.Prog(sc)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+ s.Prog(mips.ASYNC)
+ p4 := s.Prog(mips.AADDVU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = v.AuxInt
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+ case ssa.OpMIPS64LoweredAtomicCas32, ssa.OpMIPS64LoweredAtomicCas64:
+ // MOVV $0, Rout
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVV Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // SYNC
+ ll := mips.ALLV
+ sc := mips.ASCV
+ if v.Op == ssa.OpMIPS64LoweredAtomicCas32 {
+ ll = mips.ALL
+ sc = mips.ASC
+ }
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ s.Prog(mips.ASYNC)
+ p1 := s.Prog(ll)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+ p2 := s.Prog(mips.ABNE)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Args[1].Reg()
+ p2.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_BRANCH
+ p3 := s.Prog(mips.AMOVV)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[2].Reg()
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Reg0()
+ p4 := s.Prog(sc)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_MEM
+ p4.To.Reg = v.Args[0].Reg()
+ p5 := s.Prog(mips.ABEQ)
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = v.Reg0()
+ p5.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p5, p1)
+ p6 := s.Prog(mips.ASYNC)
+ gc.Patch(p2, p6)
+ case ssa.OpMIPS64LoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(mips.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ gc.Warnl(v.Pos, "generated nil check")
+ }
+ case ssa.OpMIPS64FPFlagTrue,
+ ssa.OpMIPS64FPFlagFalse:
+ // MOVV $0, r
+ // BFPF 2(PC)
+ // MOVV $1, r
+ branch := mips.ABFPF
+ if v.Op == ssa.OpMIPS64FPFlagFalse {
+ branch = mips.ABFPT
+ }
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p2 := s.Prog(branch)
+ p2.To.Type = obj.TYPE_BRANCH
+ p3 := s.Prog(mips.AMOVV)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = 1
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Reg()
+ p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
+ gc.Patch(p2, p4)
+ case ssa.OpMIPS64LoweredGetClosurePtr:
+ // Closure pointer is R22 (mips.REGCTXT).
+ gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpMIPS64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpClobber:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockMIPS64EQ: {mips.ABEQ, mips.ABNE},
+ ssa.BlockMIPS64NE: {mips.ABNE, mips.ABEQ},
+ ssa.BlockMIPS64LTZ: {mips.ABLTZ, mips.ABGEZ},
+ ssa.BlockMIPS64GEZ: {mips.ABGEZ, mips.ABLTZ},
+ ssa.BlockMIPS64LEZ: {mips.ABLEZ, mips.ABGTZ},
+ ssa.BlockMIPS64GTZ: {mips.ABGTZ, mips.ABLEZ},
+ ssa.BlockMIPS64FPT: {mips.ABFPT, mips.ABFPF},
+ ssa.BlockMIPS64FPF: {mips.ABFPF, mips.ABFPT},
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in R1:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(mips.ABNE)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.Reg = mips.REG_R1
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockRetJmp:
+ p := s.Prog(obj.ARET)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = b.Aux.(*obj.LSym)
+ case ssa.BlockMIPS64EQ, ssa.BlockMIPS64NE,
+ ssa.BlockMIPS64LTZ, ssa.BlockMIPS64GEZ,
+ ssa.BlockMIPS64LEZ, ssa.BlockMIPS64GTZ,
+ ssa.BlockMIPS64FPT, ssa.BlockMIPS64FPF:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ if !b.Controls[0].Type.IsFlags() {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = b.Controls[0].Reg()
+ }
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go
new file mode 100644
index 0000000..c8ef567
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/galign.go
@@ -0,0 +1,28 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj/ppc64"
+ "cmd/internal/objabi"
+)
+
+func Init(arch *gc.Arch) {
+ arch.LinkArch = &ppc64.Linkppc64
+ if objabi.GOARCH == "ppc64le" {
+ arch.LinkArch = &ppc64.Linkppc64le
+ }
+ arch.REGSP = ppc64.REGSP
+ arch.MAXWIDTH = 1 << 60
+
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+ arch.Ginsnopdefer = ginsnopdefer
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
new file mode 100644
index 0000000..a5a772b
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -0,0 +1,79 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+)
+
+func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*gc.Widthptr) {
+ for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
+ p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, gc.Ctxt.FixedFrameSize()+off+i)
+ }
+ } else if cnt <= int64(128*gc.Widthptr) {
+ p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p.Reg = ppc64.REGSP
+ p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffzero
+ p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+ } else {
+ p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p.Reg = ppc64.REGSP
+ p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p.Reg = ppc64.REGRT1
+ p = pp.Appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
+ p1 := p
+ p = pp.Appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p = pp.Appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ gc.Patch(p, p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *gc.Progs) *obj.Prog {
+ p := pp.Prog(ppc64.AOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R0
+ return p
+}
+
+func ginsnopdefer(pp *gc.Progs) *obj.Prog {
+ // On PPC64 two nops are required in the defer case.
+ //
+ // (see gc/cgen.go, gc/plive.go -- copy of comment below)
+ //
+ // On ppc64, when compiling Go into position
+ // independent code on ppc64le we insert an
+ // instruction to reload the TOC pointer from the
+ // stack as well. See the long comment near
+ // jmpdefer in runtime/asm_ppc64.s for why.
+ // If the MOVD is not needed, insert a hardware NOP
+ // so that the same number of instructions are used
+ // on ppc64 in both shared and non-shared modes.
+
+ ginsnop(pp)
+ if gc.Ctxt.Flag_shared {
+ p := pp.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = 24
+ p.From.Reg = ppc64.REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R2
+ return p
+ }
+ return ginsnop(pp)
+}
diff --git a/src/cmd/compile/internal/ppc64/opt.go b/src/cmd/compile/internal/ppc64/opt.go
new file mode 100644
index 0000000..4f81aa9
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/opt.go
@@ -0,0 +1,12 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+// Many Power ISA arithmetic and logical instructions come in four
+// standard variants. These bits let us map between variants.
+const (
+ V_CC = 1 << 0 // xCC (affect CR field 0 flags)
+ V_V = 1 << 1 // xV (affect SO and OV flags)
+)
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
new file mode 100644
index 0000000..3e20c44
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -0,0 +1,1967 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "cmd/internal/objabi"
+ "math"
+ "strings"
+)
+
+// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+ // flive := b.FlagsLiveAtEnd
+ // if b.Control != nil && b.Control.Type.IsFlags() {
+ // flive = true
+ // }
+ // for i := len(b.Values) - 1; i >= 0; i-- {
+ // v := b.Values[i]
+ // if flive && (v.Op == v.Op == ssa.OpPPC64MOVDconst) {
+ // // The "mark" is any non-nil Aux value.
+ // v.Aux = v
+ // }
+ // if v.Type.IsFlags() {
+ // flive = false
+ // }
+ // for _, a := range v.Args {
+ // if a.Type.IsFlags() {
+ // flive = true
+ // }
+ // }
+ // }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return ppc64.AFMOVS
+ case 8:
+ return ppc64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return ppc64.AMOVB
+ } else {
+ return ppc64.AMOVBZ
+ }
+ case 2:
+ if t.IsSigned() {
+ return ppc64.AMOVH
+ } else {
+ return ppc64.AMOVHZ
+ }
+ case 4:
+ if t.IsSigned() {
+ return ppc64.AMOVW
+ } else {
+ return ppc64.AMOVWZ
+ }
+ case 8:
+ return ppc64.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return ppc64.AFMOVS
+ case 8:
+ return ppc64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return ppc64.AMOVB
+ case 2:
+ return ppc64.AMOVH
+ case 4:
+ return ppc64.AMOVW
+ case 8:
+ return ppc64.AMOVD
+ }
+ }
+ panic("bad store type")
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy:
+ t := v.Type
+ if t.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x != y {
+ rt := obj.TYPE_REG
+ op := ppc64.AMOVD
+
+ if t.IsFloat() {
+ op = ppc64.AFMOVD
+ }
+ p := s.Prog(op)
+ p.From.Type = rt
+ p.From.Reg = x
+ p.To.Type = rt
+ p.To.Reg = y
+ }
+
+ case ssa.OpPPC64LoweredMuluhilo:
+ // MULHDU Rarg1, Rarg0, Reg0
+ // MULLD Rarg1, Rarg0, Reg1
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ p := s.Prog(ppc64.AMULHDU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(ppc64.AMULLD)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg1()
+
+ case ssa.OpPPC64LoweredAdd64Carry:
+ // ADDC Rarg2, -1, Rtmp
+ // ADDE Rarg1, Rarg0, Reg0
+ // ADDZE Rzero, Reg1
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ p := s.Prog(ppc64.AADDC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = -1
+ p.Reg = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ p1 := s.Prog(ppc64.AADDE)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg0()
+ p2 := s.Prog(ppc64.AADDZE)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = ppc64.REGZERO
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Reg1()
+
+ case ssa.OpPPC64LoweredAtomicAnd8,
+ ssa.OpPPC64LoweredAtomicAnd32,
+ ssa.OpPPC64LoweredAtomicOr8,
+ ssa.OpPPC64LoweredAtomicOr32:
+ // LWSYNC
+ // LBAR/LWAR (Rarg0), Rtmp
+ // AND/OR Rarg1, Rtmp
+ // STBCCC/STWCCC Rtmp, (Rarg0)
+ // BNE -3(PC)
+ ld := ppc64.ALBAR
+ st := ppc64.ASTBCCC
+ if v.Op == ssa.OpPPC64LoweredAtomicAnd32 || v.Op == ssa.OpPPC64LoweredAtomicOr32 {
+ ld = ppc64.ALWAR
+ st = ppc64.ASTWCCC
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
+ plwsync := s.Prog(ppc64.ALWSYNC)
+ plwsync.To.Type = obj.TYPE_NONE
+ // LBAR or LWAR
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ // AND/OR reg1,out
+ p1 := s.Prog(v.Op.Asm())
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = ppc64.REGTMP
+ // STBCCC or STWCCC
+ p2 := s.Prog(st)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = ppc64.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = r0
+ p2.RegTo2 = ppc64.REGTMP
+ // BNE retry
+ p3 := s.Prog(ppc64.ABNE)
+ p3.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p3, p)
+
+ case ssa.OpPPC64LoweredAtomicAdd32,
+ ssa.OpPPC64LoweredAtomicAdd64:
+ // LWSYNC
+ // LDAR/LWAR (Rarg0), Rout
+ // ADD Rarg1, Rout
+ // STDCCC/STWCCC Rout, (Rarg0)
+ // BNE -3(PC)
+ // MOVW Rout,Rout (if Add32)
+ ld := ppc64.ALDAR
+ st := ppc64.ASTDCCC
+ if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
+ ld = ppc64.ALWAR
+ st = ppc64.ASTWCCC
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
+ plwsync := s.Prog(ppc64.ALWSYNC)
+ plwsync.To.Type = obj.TYPE_NONE
+ // LDAR or LWAR
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ // ADD reg1,out
+ p1 := s.Prog(ppc64.AADD)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Reg = out
+ p1.To.Type = obj.TYPE_REG
+ // STDCCC or STWCCC
+ p3 := s.Prog(st)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = out
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = r0
+ // BNE retry
+ p4 := s.Prog(ppc64.ABNE)
+ p4.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p4, p)
+
+ // Ensure a 32 bit result
+ if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
+ p5 := s.Prog(ppc64.AMOVWZ)
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = out
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = out
+ }
+
+ case ssa.OpPPC64LoweredAtomicExchange32,
+ ssa.OpPPC64LoweredAtomicExchange64:
+ // LWSYNC
+ // LDAR/LWAR (Rarg0), Rout
+ // STDCCC/STWCCC Rout, (Rarg0)
+ // BNE -2(PC)
+ // ISYNC
+ ld := ppc64.ALDAR
+ st := ppc64.ASTDCCC
+ if v.Op == ssa.OpPPC64LoweredAtomicExchange32 {
+ ld = ppc64.ALWAR
+ st = ppc64.ASTWCCC
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
+ plwsync := s.Prog(ppc64.ALWSYNC)
+ plwsync.To.Type = obj.TYPE_NONE
+ // LDAR or LWAR
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ // STDCCC or STWCCC
+ p1 := s.Prog(st)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = r0
+ // BNE retry
+ p2 := s.Prog(ppc64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p2, p)
+ // ISYNC
+ pisync := s.Prog(ppc64.AISYNC)
+ pisync.To.Type = obj.TYPE_NONE
+
+ case ssa.OpPPC64LoweredAtomicLoad8,
+ ssa.OpPPC64LoweredAtomicLoad32,
+ ssa.OpPPC64LoweredAtomicLoad64,
+ ssa.OpPPC64LoweredAtomicLoadPtr:
+ // SYNC
+ // MOVB/MOVD/MOVW (Rarg0), Rout
+ // CMP Rout,Rout
+ // BNE 1(PC)
+ // ISYNC
+ ld := ppc64.AMOVD
+ cmp := ppc64.ACMP
+ switch v.Op {
+ case ssa.OpPPC64LoweredAtomicLoad8:
+ ld = ppc64.AMOVBZ
+ case ssa.OpPPC64LoweredAtomicLoad32:
+ ld = ppc64.AMOVWZ
+ cmp = ppc64.ACMPW
+ }
+ arg0 := v.Args[0].Reg()
+ out := v.Reg0()
+ // SYNC when AuxInt == 1; otherwise, load-acquire
+ if v.AuxInt == 1 {
+ psync := s.Prog(ppc64.ASYNC)
+ psync.To.Type = obj.TYPE_NONE
+ }
+ // Load
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = arg0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ // CMP
+ p1 := s.Prog(cmp)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = out
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = out
+ // BNE
+ p2 := s.Prog(ppc64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+ // ISYNC
+ pisync := s.Prog(ppc64.AISYNC)
+ pisync.To.Type = obj.TYPE_NONE
+ gc.Patch(p2, pisync)
+
+ case ssa.OpPPC64LoweredAtomicStore8,
+ ssa.OpPPC64LoweredAtomicStore32,
+ ssa.OpPPC64LoweredAtomicStore64:
+ // SYNC or LWSYNC
+ // MOVB/MOVW/MOVD arg1,(arg0)
+ st := ppc64.AMOVD
+ switch v.Op {
+ case ssa.OpPPC64LoweredAtomicStore8:
+ st = ppc64.AMOVB
+ case ssa.OpPPC64LoweredAtomicStore32:
+ st = ppc64.AMOVW
+ }
+ arg0 := v.Args[0].Reg()
+ arg1 := v.Args[1].Reg()
+ // If AuxInt == 0, LWSYNC (Store-Release), else SYNC
+ // SYNC
+ syncOp := ppc64.ASYNC
+ if v.AuxInt == 0 {
+ syncOp = ppc64.ALWSYNC
+ }
+ psync := s.Prog(syncOp)
+ psync.To.Type = obj.TYPE_NONE
+ // Store
+ p := s.Prog(st)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arg0
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arg1
+
+ case ssa.OpPPC64LoweredAtomicCas64,
+ ssa.OpPPC64LoweredAtomicCas32:
+ // LWSYNC
+ // loop:
+ // LDAR (Rarg0), MutexHint, Rtmp
+ // CMP Rarg1, Rtmp
+ // BNE fail
+ // STDCCC Rarg2, (Rarg0)
+ // BNE loop
+ // LWSYNC // Only for sequential consistency; not required in CasRel.
+ // MOVD $1, Rout
+ // BR end
+ // fail:
+ // MOVD $0, Rout
+ // end:
+ ld := ppc64.ALDAR
+ st := ppc64.ASTDCCC
+ cmp := ppc64.ACMP
+ if v.Op == ssa.OpPPC64LoweredAtomicCas32 {
+ ld = ppc64.ALWAR
+ st = ppc64.ASTWCCC
+ cmp = ppc64.ACMPW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ out := v.Reg0()
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
+ plwsync1 := s.Prog(ppc64.ALWSYNC)
+ plwsync1.To.Type = obj.TYPE_NONE
+ // LDAR or LWAR
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ // If it is a Compare-and-Swap-Release operation, set the EH field with
+ // the release hint.
+ if v.AuxInt == 0 {
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 0})
+ }
+ // CMP reg1,reg2
+ p1 := s.Prog(cmp)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Reg = ppc64.REGTMP
+ p1.To.Type = obj.TYPE_REG
+ // BNE cas_fail
+ p2 := s.Prog(ppc64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+ // STDCCC or STWCCC
+ p3 := s.Prog(st)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = r2
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = r0
+ // BNE retry
+ p4 := s.Prog(ppc64.ABNE)
+ p4.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p4, p)
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b.
+ // If the operation is a CAS-Release, then synchronization is not necessary.
+ if v.AuxInt != 0 {
+ plwsync2 := s.Prog(ppc64.ALWSYNC)
+ plwsync2.To.Type = obj.TYPE_NONE
+ }
+ // return true
+ p5 := s.Prog(ppc64.AMOVD)
+ p5.From.Type = obj.TYPE_CONST
+ p5.From.Offset = 1
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = out
+ // BR done
+ p6 := s.Prog(obj.AJMP)
+ p6.To.Type = obj.TYPE_BRANCH
+ // return false
+ p7 := s.Prog(ppc64.AMOVD)
+ p7.From.Type = obj.TYPE_CONST
+ p7.From.Offset = 0
+ p7.To.Type = obj.TYPE_REG
+ p7.To.Reg = out
+ gc.Patch(p2, p7)
+ // done (label)
+ p8 := s.Prog(obj.ANOP)
+ gc.Patch(p6, p8)
+
+ case ssa.OpPPC64LoweredGetClosurePtr:
+ // Closure pointer is R11 (already)
+ gc.CheckLoweredGetClosurePtr(v)
+
+ case ssa.OpPPC64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64LoweredRound32F, ssa.OpPPC64LoweredRound64F:
+ // input is already rounded
+
+ case ssa.OpLoadReg:
+ loadOp := loadByType(v.Type)
+ p := s.Prog(loadOp)
+ gc.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpStoreReg:
+ storeOp := storeByType(v.Type)
+ p := s.Prog(storeOp)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddrAuto(&p.To, v)
+
+ case ssa.OpPPC64DIVD:
+ // For now,
+ //
+ // cmp arg1, -1
+ // be ahead
+ // v = arg0 / arg1
+ // b over
+ // ahead: v = - arg0
+ // over: nop
+ r := v.Reg()
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+
+ p := s.Prog(ppc64.ACMP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = -1
+
+ pbahead := s.Prog(ppc64.ABEQ)
+ pbahead.To.Type = obj.TYPE_BRANCH
+
+ p = s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ pbover := s.Prog(obj.AJMP)
+ pbover.To.Type = obj.TYPE_BRANCH
+
+ p = s.Prog(ppc64.ANEG)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ gc.Patch(pbahead, p)
+
+ p = s.Prog(obj.ANOP)
+ gc.Patch(pbover, p)
+
+ case ssa.OpPPC64DIVW:
+ // word-width version of above
+ r := v.Reg()
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+
+ p := s.Prog(ppc64.ACMPW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = -1
+
+ pbahead := s.Prog(ppc64.ABEQ)
+ pbahead.To.Type = obj.TYPE_BRANCH
+
+ p = s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ pbover := s.Prog(obj.AJMP)
+ pbover.To.Type = obj.TYPE_BRANCH
+
+ p = s.Prog(ppc64.ANEG)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ gc.Patch(pbahead, p)
+
+ p = s.Prog(obj.ANOP)
+ gc.Patch(pbover, p)
+
+ case ssa.OpPPC64CLRLSLWI:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ shifts := v.AuxInt
+ p := s.Prog(v.Op.Asm())
+ // clrlslwi ra,rs,mb,sh will become rlwinm ra,rs,sh,mb-sh,31-sh as described in ISA
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)})
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64CLRLSLDI:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ shifts := v.AuxInt
+ p := s.Prog(v.Op.Asm())
+ // clrlsldi ra,rs,mb,sh will become rldic ra,rs,sh,mb-sh
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)})
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ // Mask has been set as sh
+ case ssa.OpPPC64RLDICL:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ shifts := v.AuxInt
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)}
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)})
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS,
+ ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64DIVDU, ssa.OpPPC64DIVWU,
+ ssa.OpPPC64SRAD, ssa.OpPPC64SRAW, ssa.OpPPC64SRD, ssa.OpPPC64SRW, ssa.OpPPC64SLD, ssa.OpPPC64SLW,
+ ssa.OpPPC64ROTL, ssa.OpPPC64ROTLW,
+ ssa.OpPPC64MULHD, ssa.OpPPC64MULHW, ssa.OpPPC64MULHDU, ssa.OpPPC64MULHWU,
+ ssa.OpPPC64FMUL, ssa.OpPPC64FMULS, ssa.OpPPC64FDIV, ssa.OpPPC64FDIVS, ssa.OpPPC64FCPSGN,
+ ssa.OpPPC64AND, ssa.OpPPC64OR, ssa.OpPPC64ANDN, ssa.OpPPC64ORN, ssa.OpPPC64NOR, ssa.OpPPC64XOR, ssa.OpPPC64EQV,
+ ssa.OpPPC64MODUD, ssa.OpPPC64MODSD, ssa.OpPPC64MODUW, ssa.OpPPC64MODSW:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64ANDCC, ssa.OpPPC64ORCC, ssa.OpPPC64XORCC:
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP // result is not needed
+
+ case ssa.OpPPC64ROTLconst, ssa.OpPPC64ROTLWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ // Auxint holds encoded rotate + mask
+ case ssa.OpPPC64RLWINM, ssa.OpPPC64RLWMI:
+ rot, _, _, mask := ssa.DecodePPC64RotateMask(v.AuxInt)
+ p := s.Prog(v.Op.Asm())
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ p.Reg = v.Args[0].Reg()
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(rot)}
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(mask)})
+
+ // Auxint holds mask
+ case ssa.OpPPC64RLWNM:
+ _, _, _, mask := ssa.DecodePPC64RotateMask(v.AuxInt)
+ p := s.Prog(v.Op.Asm())
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ p.Reg = v.Args[0].Reg()
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[1].Reg()}
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(mask)})
+
+ case ssa.OpPPC64MADDLD:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ r3 := v.Args[2].Reg()
+ // r = r1*r2 ± r3
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r2
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r3})
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64FMADD, ssa.OpPPC64FMADDS, ssa.OpPPC64FMSUB, ssa.OpPPC64FMSUBS:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ r3 := v.Args[2].Reg()
+ // r = r1*r2 ± r3
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r3
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r2})
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FFLOOR, ssa.OpPPC64FTRUNC, ssa.OpPPC64FCEIL,
+ ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FCFIDS, ssa.OpPPC64FRSP, ssa.OpPPC64CNTLZD, ssa.OpPPC64CNTLZW,
+ ssa.OpPPC64POPCNTD, ssa.OpPPC64POPCNTW, ssa.OpPPC64POPCNTB, ssa.OpPPC64MFVSRD, ssa.OpPPC64MTVSRD, ssa.OpPPC64FABS, ssa.OpPPC64FNABS,
+ ssa.OpPPC64FROUND, ssa.OpPPC64CNTTZW, ssa.OpPPC64CNTTZD:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+
+ case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst,
+ ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst,
+ ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst, ssa.OpPPC64MULLWconst, ssa.OpPPC64MULLDconst:
+ p := s.Prog(v.Op.Asm())
+ p.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64SUBFCconst:
+ p := s.Prog(v.Op.Asm())
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt})
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64ANDCCconst:
+ p := s.Prog(v.Op.Asm())
+ p.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP // discard result
+
+ case ssa.OpPPC64MOVDaddr:
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux in MOVDaddr is of unknown type %T", v.Aux)
+ case nil:
+ // If aux offset and aux int are both 0, and the same
+ // input and output regs are used, no instruction
+ // needs to be generated, since it would just be
+ // addi rx, rx, 0.
+ if v.AuxInt != 0 || v.Args[0].Reg() != v.Reg() {
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ }
+
+ case *obj.LSym, *gc.Node:
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ gc.AddAux(&p.From, v)
+
+ }
+
+ case ssa.OpPPC64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64FMOVDconst, ssa.OpPPC64FMOVSconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64FCMPU, ssa.OpPPC64CMP, ssa.OpPPC64CMPW, ssa.OpPPC64CMPU, ssa.OpPPC64CMPWU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[1].Reg()
+
+ case ssa.OpPPC64CMPconst, ssa.OpPPC64CMPUconst, ssa.OpPPC64CMPWconst, ssa.OpPPC64CMPWUconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpPPC64MOVBreg, ssa.OpPPC64MOVBZreg, ssa.OpPPC64MOVHreg, ssa.OpPPC64MOVHZreg, ssa.OpPPC64MOVWreg, ssa.OpPPC64MOVWZreg:
+ // Shift in register to required size
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Reg = v.Reg()
+ p.To.Type = obj.TYPE_REG
+
+ case ssa.OpPPC64MOVDload:
+
+ // MOVDload uses a DS instruction which requires the offset value of the data to be a multiple of 4.
+ // For offsets known at compile time, a MOVDload won't be selected, but in the case of a go.string,
+ // the offset is not known until link time. If the load of a go.string uses relocation for the
+ // offset field of the instruction, and if the offset is not aligned to 4, then a link error will occur.
+ // To avoid this problem, the full address of the go.string is computed and loaded into the base register,
+ // and that base register is used for the MOVDload using a 0 offset. This problem can only occur with
+ // go.string types because other types will have proper alignment.
+
+ gostring := false
+ switch n := v.Aux.(type) {
+ case *obj.LSym:
+ gostring = strings.HasPrefix(n.Name, "go.string.")
+ }
+ if gostring {
+ // Generate full addr of the go.string const
+ // including AuxInt
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ // Load go.string using 0 offset
+ p = s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ break
+ }
+ // Not a go.string, generate a normal load
+ fallthrough
+
+ case ssa.OpPPC64MOVWload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload, ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64MOVDBRload, ssa.OpPPC64MOVWBRload, ssa.OpPPC64MOVHBRload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64MOVDBRstore, ssa.OpPPC64MOVWBRstore, ssa.OpPPC64MOVHBRstore:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.OpPPC64MOVDloadidx, ssa.OpPPC64MOVWloadidx, ssa.OpPPC64MOVHloadidx, ssa.OpPPC64MOVWZloadidx,
+ ssa.OpPPC64MOVBZloadidx, ssa.OpPPC64MOVHZloadidx, ssa.OpPPC64FMOVDloadidx, ssa.OpPPC64FMOVSloadidx,
+ ssa.OpPPC64MOVDBRloadidx, ssa.OpPPC64MOVWBRloadidx, ssa.OpPPC64MOVHBRloadidx:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.From.Index = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64MOVDstorezero, ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+
+ case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore, ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+
+ case ssa.OpPPC64MOVDstoreidx, ssa.OpPPC64MOVWstoreidx, ssa.OpPPC64MOVHstoreidx, ssa.OpPPC64MOVBstoreidx,
+ ssa.OpPPC64FMOVDstoreidx, ssa.OpPPC64FMOVSstoreidx, ssa.OpPPC64MOVDBRstoreidx, ssa.OpPPC64MOVWBRstoreidx,
+ ssa.OpPPC64MOVHBRstoreidx:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Index = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ case ssa.OpPPC64ISEL, ssa.OpPPC64ISELB:
+ // ISEL, ISELB
+ // AuxInt value indicates condition: 0=LT 1=GT 2=EQ 4=GE 5=LE 6=NE
+ // ISEL only accepts 0, 1, 2 condition values but the others can be
+ // achieved by swapping operand order.
+ // arg0 ? arg1 : arg2 with conditions LT, GT, EQ
+ // arg0 ? arg2 : arg1 for conditions GE, LE, NE
+ // ISELB is used when a boolean result is needed, returning 0 or 1
+ p := s.Prog(ppc64.AISEL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ // For ISELB, boolean result 0 or 1. Use R0 for 0 operand to avoid load.
+ r := obj.Addr{Type: obj.TYPE_REG, Reg: ppc64.REG_R0}
+ if v.Op == ssa.OpPPC64ISEL {
+ r.Reg = v.Args[1].Reg()
+ }
+ // AuxInt values 4,5,6 implemented with reverse operand order from 0,1,2
+ if v.AuxInt > 3 {
+ p.Reg = r.Reg
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()})
+ } else {
+ p.Reg = v.Args[0].Reg()
+ p.SetFrom3(r)
+ }
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt & 3
+
+ case ssa.OpPPC64LoweredQuadZero, ssa.OpPPC64LoweredQuadZeroShort:
+ // The LoweredQuad code generation
+ // generates STXV instructions on
+ // power9. The Short variation is used
+ // if no loop is generated.
+
+ // sizes >= 64 generate a loop as follows:
+
+ // Set up loop counter in CTR, used by BC
+ // XXLXOR clears VS32
+ // XXLXOR VS32,VS32,VS32
+ // MOVD len/64,REG_TMP
+ // MOVD REG_TMP,CTR
+ // loop:
+ // STXV VS32,0(R20)
+ // STXV VS32,16(R20)
+ // STXV VS32,32(R20)
+ // STXV VS32,48(R20)
+ // ADD $64,R20
+ // BC 16, 0, loop
+
+ // Bytes per iteration
+ ctr := v.AuxInt / 64
+
+ // Remainder bytes
+ rem := v.AuxInt % 64
+
+ // Only generate a loop if there is more
+ // than 1 iteration.
+ if ctr > 1 {
+ // Set up VS32 (V0) to hold 0s
+ p := s.Prog(ppc64.AXXLXOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ p.Reg = ppc64.REG_VS32
+
+ // Set up CTR loop counter
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ctr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ // Don't generate padding for
+ // loops with few iterations.
+ if ctr > 3 {
+ p = s.Prog(obj.APCALIGN)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ }
+
+ // generate 4 STXVs to zero 64 bytes
+ var top *obj.Prog
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ // Save the top of loop
+ if top == nil {
+ top = p
+ }
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = 16
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = 32
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = 48
+
+ // Increment address for the
+ // 64 bytes just zeroed.
+ p = s.Prog(ppc64.AADD)
+ p.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 64
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[0].Reg()
+
+ // Branch back to top of loop
+ // based on CTR
+ // BC with BO_BCTR generates bdnz
+ p = s.Prog(ppc64.ABC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ppc64.BO_BCTR
+ p.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p, top)
+ }
+ // When ctr == 1 the loop was not generated but
+ // there are at least 64 bytes to clear, so add
+ // that to the remainder to generate the code
+ // to clear those doublewords
+ if ctr == 1 {
+ rem += 64
+ }
+
+ // Clear the remainder starting at offset zero
+ offset := int64(0)
+
+ if rem >= 16 && ctr <= 1 {
+ // If the XXLXOR hasn't already been
+ // generated, do it here to initialize
+ // VS32 (V0) to 0.
+ p := s.Prog(ppc64.AXXLXOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ p.Reg = ppc64.REG_VS32
+ }
+ // Generate STXV for 32 or 64
+ // bytes.
+ for rem >= 32 {
+ p := s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset + 16
+ offset += 32
+ rem -= 32
+ }
+ // Generate 16 bytes
+ if rem >= 16 {
+ p := s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset
+ offset += 16
+ rem -= 16
+ }
+
+ // first clear as many doublewords as possible
+ // then clear remaining sizes as available
+ for rem > 0 {
+ op, size := ppc64.AMOVB, int64(1)
+ switch {
+ case rem >= 8:
+ op, size = ppc64.AMOVD, 8
+ case rem >= 4:
+ op, size = ppc64.AMOVW, 4
+ case rem >= 2:
+ op, size = ppc64.AMOVH, 2
+ }
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset
+ rem -= size
+ offset += size
+ }
+
+ case ssa.OpPPC64LoweredZero, ssa.OpPPC64LoweredZeroShort:
+
+ // Unaligned data doesn't hurt performance
+ // for these instructions on power8.
+
+ // For sizes >= 64 generate a loop as follows:
+
+ // Set up loop counter in CTR, used by BC
+ // XXLXOR VS32,VS32,VS32
+ // MOVD len/32,REG_TMP
+ // MOVD REG_TMP,CTR
+ // MOVD $16,REG_TMP
+ // loop:
+ // STXVD2X VS32,(R0)(R20)
+ // STXVD2X VS32,(R31)(R20)
+ // ADD $32,R20
+ // BC 16, 0, loop
+ //
+ // any remainder is done as described below
+
+ // for sizes < 64 bytes, first clear as many doublewords as possible,
+ // then handle the remainder
+ // MOVD R0,(R20)
+ // MOVD R0,8(R20)
+ // .... etc.
+ //
+ // the remainder bytes are cleared using one or more
+ // of the following instructions with the appropriate
+ // offsets depending which instructions are needed
+ //
+ // MOVW R0,n1(R20) 4 bytes
+ // MOVH R0,n2(R20) 2 bytes
+ // MOVB R0,n3(R20) 1 byte
+ //
+ // 7 bytes: MOVW, MOVH, MOVB
+ // 6 bytes: MOVW, MOVH
+ // 5 bytes: MOVW, MOVB
+ // 3 bytes: MOVH, MOVB
+
+ // each loop iteration does 32 bytes
+ ctr := v.AuxInt / 32
+
+ // remainder bytes
+ rem := v.AuxInt % 32
+
+ // only generate a loop if there is more
+ // than 1 iteration.
+ if ctr > 1 {
+ // Set up VS32 (V0) to hold 0s
+ p := s.Prog(ppc64.AXXLXOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ p.Reg = ppc64.REG_VS32
+
+ // Set up CTR loop counter
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ctr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ // Set up R31 to hold index value 16
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ // Don't add padding for alignment
+ // with few loop iterations.
+ if ctr > 3 {
+ p = s.Prog(obj.APCALIGN)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ }
+
+ // generate 2 STXVD2Xs to store 16 bytes
+ // when this is a loop then the top must be saved
+ var top *obj.Prog
+ // This is the top of loop
+
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Index = ppc64.REGZERO
+ // Save the top of loop
+ if top == nil {
+ top = p
+ }
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Index = ppc64.REGTMP
+
+ // Increment address for the
+ // 4 doublewords just zeroed.
+ p = s.Prog(ppc64.AADD)
+ p.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[0].Reg()
+
+ // Branch back to top of loop
+ // based on CTR
+ // BC with BO_BCTR generates bdnz
+ p = s.Prog(ppc64.ABC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ppc64.BO_BCTR
+ p.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p, top)
+ }
+
+ // when ctr == 1 the loop was not generated but
+ // there are at least 32 bytes to clear, so add
+ // that to the remainder to generate the code
+ // to clear those doublewords
+ if ctr == 1 {
+ rem += 32
+ }
+
+ // clear the remainder starting at offset zero
+ offset := int64(0)
+
+ // first clear as many doublewords as possible
+ // then clear remaining sizes as available
+ for rem > 0 {
+ op, size := ppc64.AMOVB, int64(1)
+ switch {
+ case rem >= 8:
+ op, size = ppc64.AMOVD, 8
+ case rem >= 4:
+ op, size = ppc64.AMOVW, 4
+ case rem >= 2:
+ op, size = ppc64.AMOVH, 2
+ }
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset
+ rem -= size
+ offset += size
+ }
+
+ case ssa.OpPPC64LoweredMove, ssa.OpPPC64LoweredMoveShort:
+
+ bytesPerLoop := int64(32)
+ // This will be used when moving more
+ // than 8 bytes. Moves start with
+ // as many 8 byte moves as possible, then
+ // 4, 2, or 1 byte(s) as remaining. This will
+ // work and be efficient for power8 or later.
+ // If there are 64 or more bytes, then a
+ // loop is generated to move 32 bytes and
+ // update the src and dst addresses on each
+ // iteration. When < 64 bytes, the appropriate
+ // number of moves are generated based on the
+ // size.
+ // When moving >= 64 bytes a loop is used
+ // MOVD len/32,REG_TMP
+ // MOVD REG_TMP,CTR
+ // MOVD $16,REG_TMP
+ // top:
+ // LXVD2X (R0)(R21),VS32
+ // LXVD2X (R31)(R21),VS33
+ // ADD $32,R21
+ // STXVD2X VS32,(R0)(R20)
+ // STXVD2X VS33,(R31)(R20)
+ // ADD $32,R20
+ // BC 16,0,top
+ // Bytes not moved by this loop are moved
+ // with a combination of the following instructions,
+ // starting with the largest sizes and generating as
+ // many as needed, using the appropriate offset value.
+ // MOVD n(R21),R31
+ // MOVD R31,n(R20)
+ // MOVW n1(R21),R31
+ // MOVW R31,n1(R20)
+ // MOVH n2(R21),R31
+ // MOVH R31,n2(R20)
+ // MOVB n3(R21),R31
+ // MOVB R31,n3(R20)
+
+ // Each loop iteration moves 32 bytes
+ ctr := v.AuxInt / bytesPerLoop
+
+ // Remainder after the loop
+ rem := v.AuxInt % bytesPerLoop
+
+ dstReg := v.Args[0].Reg()
+ srcReg := v.Args[1].Reg()
+
+ // The set of registers used here, must match the clobbered reg list
+ // in PPC64Ops.go.
+ offset := int64(0)
+
+ // top of the loop
+ var top *obj.Prog
+ // Only generate looping code when loop counter is > 1 for >= 64 bytes
+ if ctr > 1 {
+ // Set up the CTR
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ctr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ // Use REGTMP as index reg
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ // Don't adding padding for
+ // alignment with small iteration
+ // counts.
+ if ctr > 3 {
+ p = s.Prog(obj.APCALIGN)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ }
+
+ // Generate 16 byte loads and stores.
+ // Use temp register for index (16)
+ // on the second one.
+
+ p = s.Prog(ppc64.ALXVD2X)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Index = ppc64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ if top == nil {
+ top = p
+ }
+ p = s.Prog(ppc64.ALXVD2X)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Index = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS33
+
+ // increment the src reg for next iteration
+ p = s.Prog(ppc64.AADD)
+ p.Reg = srcReg
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = bytesPerLoop
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = srcReg
+
+ // generate 16 byte stores
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Index = ppc64.REGZERO
+
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS33
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Index = ppc64.REGTMP
+
+ // increment the dst reg for next iteration
+ p = s.Prog(ppc64.AADD)
+ p.Reg = dstReg
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = bytesPerLoop
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dstReg
+
+ // BC with BO_BCTR generates bdnz to branch on nonzero CTR
+ // to loop top.
+ p = s.Prog(ppc64.ABC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ppc64.BO_BCTR
+ p.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p, top)
+
+ // srcReg and dstReg were incremented in the loop, so
+ // later instructions start with offset 0.
+ offset = int64(0)
+ }
+
+ // No loop was generated for one iteration, so
+ // add 32 bytes to the remainder to move those bytes.
+ if ctr == 1 {
+ rem += bytesPerLoop
+ }
+
+ if rem >= 16 {
+ // Generate 16 byte loads and stores.
+ // Use temp register for index (value 16)
+ // on the second one.
+ p := s.Prog(ppc64.ALXVD2X)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Index = ppc64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Index = ppc64.REGZERO
+
+ offset = 16
+ rem -= 16
+
+ if rem >= 16 {
+ // Use REGTMP as index reg
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.ALXVD2X)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Index = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Index = ppc64.REGTMP
+
+ offset = 32
+ rem -= 16
+ }
+ }
+
+ // Generate all the remaining load and store pairs, starting with
+ // as many 8 byte moves as possible, then 4, 2, 1.
+ for rem > 0 {
+ op, size := ppc64.AMOVB, int64(1)
+ switch {
+ case rem >= 8:
+ op, size = ppc64.AMOVD, 8
+ case rem >= 4:
+ op, size = ppc64.AMOVW, 4
+ case rem >= 2:
+ op, size = ppc64.AMOVH, 2
+ }
+ // Load
+ p := s.Prog(op)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+
+ // Store
+ p = s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+ rem -= size
+ offset += size
+ }
+
+ case ssa.OpPPC64LoweredQuadMove, ssa.OpPPC64LoweredQuadMoveShort:
+ bytesPerLoop := int64(64)
+ // This is used when moving more
+ // than 8 bytes on power9. Moves start with
+ // as many 8 byte moves as possible, then
+ // 4, 2, or 1 byte(s) as remaining. This will
+ // work and be efficient for power8 or later.
+ // If there are 64 or more bytes, then a
+ // loop is generated to move 32 bytes and
+ // update the src and dst addresses on each
+ // iteration. When < 64 bytes, the appropriate
+ // number of moves are generated based on the
+ // size.
+ // When moving >= 64 bytes a loop is used
+ // MOVD len/32,REG_TMP
+ // MOVD REG_TMP,CTR
+ // top:
+ // LXV 0(R21),VS32
+ // LXV 16(R21),VS33
+ // ADD $32,R21
+ // STXV VS32,0(R20)
+ // STXV VS33,16(R20)
+ // ADD $32,R20
+ // BC 16,0,top
+ // Bytes not moved by this loop are moved
+ // with a combination of the following instructions,
+ // starting with the largest sizes and generating as
+ // many as needed, using the appropriate offset value.
+ // MOVD n(R21),R31
+ // MOVD R31,n(R20)
+ // MOVW n1(R21),R31
+ // MOVW R31,n1(R20)
+ // MOVH n2(R21),R31
+ // MOVH R31,n2(R20)
+ // MOVB n3(R21),R31
+ // MOVB R31,n3(R20)
+
+ // Each loop iteration moves 32 bytes
+ ctr := v.AuxInt / bytesPerLoop
+
+ // Remainder after the loop
+ rem := v.AuxInt % bytesPerLoop
+
+ dstReg := v.Args[0].Reg()
+ srcReg := v.Args[1].Reg()
+
+ offset := int64(0)
+
+ // top of the loop
+ var top *obj.Prog
+
+ // Only generate looping code when loop counter is > 1 for >= 64 bytes
+ if ctr > 1 {
+ // Set up the CTR
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ctr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ p = s.Prog(obj.APCALIGN)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+
+ // Generate 16 byte loads and stores.
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ if top == nil {
+ top = p
+ }
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset + 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS33
+
+ // generate 16 byte stores
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS33
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset + 16
+
+ // Generate 16 byte loads and stores.
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset + 32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset + 48
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS33
+
+ // generate 16 byte stores
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset + 32
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS33
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset + 48
+
+ // increment the src reg for next iteration
+ p = s.Prog(ppc64.AADD)
+ p.Reg = srcReg
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = bytesPerLoop
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = srcReg
+
+ // increment the dst reg for next iteration
+ p = s.Prog(ppc64.AADD)
+ p.Reg = dstReg
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = bytesPerLoop
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dstReg
+
+ // BC with BO_BCTR generates bdnz to branch on nonzero CTR
+ // to loop top.
+ p = s.Prog(ppc64.ABC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ppc64.BO_BCTR
+ p.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p, top)
+
+ // srcReg and dstReg were incremented in the loop, so
+ // later instructions start with offset 0.
+ offset = int64(0)
+ }
+
+ // No loop was generated for one iteration, so
+ // add 32 bytes to the remainder to move those bytes.
+ if ctr == 1 {
+ rem += bytesPerLoop
+ }
+ if rem >= 32 {
+ p := s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS33
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS33
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = 16
+
+ offset = 32
+ rem -= 32
+ }
+
+ if rem >= 16 {
+ // Generate 16 byte loads and stores.
+ p := s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+
+ offset += 16
+ rem -= 16
+
+ if rem >= 16 {
+ p := s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+
+ offset += 16
+ rem -= 16
+ }
+ }
+ // Generate all the remaining load and store pairs, starting with
+ // as many 8 byte moves as possible, then 4, 2, 1.
+ for rem > 0 {
+ op, size := ppc64.AMOVB, int64(1)
+ switch {
+ case rem >= 8:
+ op, size = ppc64.AMOVD, 8
+ case rem >= 4:
+ op, size = ppc64.AMOVW, 4
+ case rem >= 2:
+ op, size = ppc64.AMOVH, 2
+ }
+ // Load
+ p := s.Prog(op)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+
+ // Store
+ p = s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+ rem -= size
+ offset += size
+ }
+
+ case ssa.OpPPC64CALLstatic:
+ s.Call(v)
+
+ case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter:
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_LR
+
+ if v.Args[0].Reg() != ppc64.REG_R12 {
+ v.Fatalf("Function address for %v should be in R12 %d but is in %d", v.LongString(), ppc64.REG_R12, p.From.Reg)
+ }
+
+ pp := s.Call(v)
+ pp.To.Reg = ppc64.REG_LR
+
+ // Insert a hint this is not a subroutine return.
+ pp.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 1})
+
+ if gc.Ctxt.Flag_shared {
+ // When compiling Go into PIC, the function we just
+ // called via pointer might have been implemented in
+ // a separate module and so overwritten the TOC
+ // pointer in R2; reload it.
+ q := s.Prog(ppc64.AMOVD)
+ q.From.Type = obj.TYPE_MEM
+ q.From.Offset = 24
+ q.From.Reg = ppc64.REGSP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = ppc64.REG_R2
+ }
+
+ case ssa.OpPPC64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+
+ case ssa.OpPPC64LoweredPanicBoundsA, ssa.OpPPC64LoweredPanicBoundsB, ssa.OpPPC64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+
+ case ssa.OpPPC64LoweredNilCheck:
+ if objabi.GOOS == "aix" {
+ // CMP Rarg0, R0
+ // BNE 2(PC)
+ // STW R0, 0(R0)
+ // NOP (so the BNE has somewhere to land)
+
+ // CMP Rarg0, R0
+ p := s.Prog(ppc64.ACMP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R0
+
+ // BNE 2(PC)
+ p2 := s.Prog(ppc64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+
+ // STW R0, 0(R0)
+ // Write at 0 is forbidden and will trigger a SIGSEGV
+ p = s.Prog(ppc64.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = ppc64.REG_R0
+
+ // NOP (so the BNE has somewhere to land)
+ nop := s.Prog(obj.ANOP)
+ gc.Patch(p2, nop)
+
+ } else {
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(ppc64.AMOVBZ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ gc.Warnl(v.Pos, "generated nil check")
+ }
+
+ // These should be resolved by rules and not make it here.
+ case ssa.OpPPC64Equal, ssa.OpPPC64NotEqual, ssa.OpPPC64LessThan, ssa.OpPPC64FLessThan,
+ ssa.OpPPC64LessEqual, ssa.OpPPC64GreaterThan, ssa.OpPPC64FGreaterThan, ssa.OpPPC64GreaterEqual,
+ ssa.OpPPC64FLessEqual, ssa.OpPPC64FGreaterEqual:
+ v.Fatalf("Pseudo-op should not make it to codegen: %s ###\n", v.LongString())
+ case ssa.OpPPC64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.OpClobber:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = [...]struct {
+ asm, invasm obj.As
+ asmeq, invasmun bool
+}{
+ ssa.BlockPPC64EQ: {ppc64.ABEQ, ppc64.ABNE, false, false},
+ ssa.BlockPPC64NE: {ppc64.ABNE, ppc64.ABEQ, false, false},
+
+ ssa.BlockPPC64LT: {ppc64.ABLT, ppc64.ABGE, false, false},
+ ssa.BlockPPC64GE: {ppc64.ABGE, ppc64.ABLT, false, false},
+ ssa.BlockPPC64LE: {ppc64.ABLE, ppc64.ABGT, false, false},
+ ssa.BlockPPC64GT: {ppc64.ABGT, ppc64.ABLE, false, false},
+
+ // TODO: need to work FP comparisons into block jumps
+ ssa.BlockPPC64FLT: {ppc64.ABLT, ppc64.ABGE, false, false},
+ ssa.BlockPPC64FGE: {ppc64.ABGT, ppc64.ABLT, true, true}, // GE = GT or EQ; !GE = LT or UN
+ ssa.BlockPPC64FLE: {ppc64.ABLT, ppc64.ABGT, true, true}, // LE = LT or EQ; !LE = GT or UN
+ ssa.BlockPPC64FGT: {ppc64.ABGT, ppc64.ABLE, false, false},
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockDefer:
+ // defer returns in R3:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(ppc64.ACMP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R3
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R0
+
+ p = s.Prog(ppc64.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockRetJmp:
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = b.Aux.(*obj.LSym)
+
+ case ssa.BlockPPC64EQ, ssa.BlockPPC64NE,
+ ssa.BlockPPC64LT, ssa.BlockPPC64GE,
+ ssa.BlockPPC64LE, ssa.BlockPPC64GT,
+ ssa.BlockPPC64FLT, ssa.BlockPPC64FGE,
+ ssa.BlockPPC64FLE, ssa.BlockPPC64FGT:
+ jmp := blockJump[b.Kind]
+ switch next {
+ case b.Succs[0].Block():
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ if jmp.invasmun {
+ // TODO: The second branch is probably predict-not-taken since it is for FP unordered
+ s.Br(ppc64.ABVS, b.Succs[1].Block())
+ }
+ case b.Succs[1].Block():
+ s.Br(jmp.asm, b.Succs[0].Block())
+ if jmp.asmeq {
+ s.Br(ppc64.ABEQ, b.Succs[0].Block())
+ }
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ s.Br(jmp.asm, b.Succs[0].Block())
+ if jmp.asmeq {
+ s.Br(ppc64.ABEQ, b.Succs[0].Block())
+ }
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ if jmp.invasmun {
+ // TODO: The second branch is probably predict-not-taken since it is for FP unordered
+ s.Br(ppc64.ABVS, b.Succs[1].Block())
+ }
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/riscv64/galign.go b/src/cmd/compile/internal/riscv64/galign.go
new file mode 100644
index 0000000..4db0fac
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/galign.go
@@ -0,0 +1,25 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj/riscv"
+)
+
+func Init(arch *gc.Arch) {
+ arch.LinkArch = &riscv.LinkRISCV64
+
+ arch.REGSP = riscv.REG_SP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.Ginsnop = ginsnop
+ arch.Ginsnopdefer = ginsnop
+ arch.ZeroRange = zeroRange
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/riscv64/ggen.go b/src/cmd/compile/internal/riscv64/ggen.go
new file mode 100644
index 0000000..f7c03fe
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/ggen.go
@@ -0,0 +1,56 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/riscv"
+)
+
+func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+
+ // Adjust the frame to account for LR.
+ off += gc.Ctxt.FixedFrameSize()
+
+ if cnt < int64(4*gc.Widthptr) {
+ for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
+ p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
+ }
+ return p
+ }
+
+ if cnt <= int64(128*gc.Widthptr) {
+ p = pp.Appendpp(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
+ p.Reg = riscv.REG_SP
+ p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffzero
+ p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
+ return p
+ }
+
+ // Loop, zeroing pointer width bytes at a time.
+ // ADD $(off), SP, T0
+ // ADD $(cnt), T0, T1
+ // loop:
+ // MOV ZERO, (T0)
+ // ADD $Widthptr, T0
+ // BNE T0, T1, loop
+ p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
+ p.Reg = riscv.REG_SP
+ p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
+ p.Reg = riscv.REG_T0
+ p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
+ loop := p
+ p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, riscv.REG_T0, 0)
+ p = pp.Appendpp(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.Reg = riscv.REG_T1
+ gc.Patch(p, loop)
+ return p
+}
diff --git a/src/cmd/compile/internal/riscv64/gsubr.go b/src/cmd/compile/internal/riscv64/gsubr.go
new file mode 100644
index 0000000..d40bdf7
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/gsubr.go
@@ -0,0 +1,20 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/riscv"
+)
+
+func ginsnop(pp *gc.Progs) *obj.Prog {
+ // Hardware nop is ADD $0, ZERO
+ p := pp.Prog(riscv.AADD)
+ p.From.Type = obj.TYPE_CONST
+ p.Reg = riscv.REG_ZERO
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: riscv.REG_ZERO}
+ return p
+}
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
new file mode 100644
index 0000000..0beb5b4
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -0,0 +1,720 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/riscv"
+)
+
+// ssaRegToReg maps ssa register numbers to obj register numbers.
+var ssaRegToReg = []int16{
+ riscv.REG_X0,
+ // X1 (LR): unused
+ riscv.REG_X2,
+ riscv.REG_X3,
+ riscv.REG_X4,
+ riscv.REG_X5,
+ riscv.REG_X6,
+ riscv.REG_X7,
+ riscv.REG_X8,
+ riscv.REG_X9,
+ riscv.REG_X10,
+ riscv.REG_X11,
+ riscv.REG_X12,
+ riscv.REG_X13,
+ riscv.REG_X14,
+ riscv.REG_X15,
+ riscv.REG_X16,
+ riscv.REG_X17,
+ riscv.REG_X18,
+ riscv.REG_X19,
+ riscv.REG_X20,
+ riscv.REG_X21,
+ riscv.REG_X22,
+ riscv.REG_X23,
+ riscv.REG_X24,
+ riscv.REG_X25,
+ riscv.REG_X26,
+ riscv.REG_X27,
+ riscv.REG_X28,
+ riscv.REG_X29,
+ riscv.REG_X30,
+ riscv.REG_X31,
+ riscv.REG_F0,
+ riscv.REG_F1,
+ riscv.REG_F2,
+ riscv.REG_F3,
+ riscv.REG_F4,
+ riscv.REG_F5,
+ riscv.REG_F6,
+ riscv.REG_F7,
+ riscv.REG_F8,
+ riscv.REG_F9,
+ riscv.REG_F10,
+ riscv.REG_F11,
+ riscv.REG_F12,
+ riscv.REG_F13,
+ riscv.REG_F14,
+ riscv.REG_F15,
+ riscv.REG_F16,
+ riscv.REG_F17,
+ riscv.REG_F18,
+ riscv.REG_F19,
+ riscv.REG_F20,
+ riscv.REG_F21,
+ riscv.REG_F22,
+ riscv.REG_F23,
+ riscv.REG_F24,
+ riscv.REG_F25,
+ riscv.REG_F26,
+ riscv.REG_F27,
+ riscv.REG_F28,
+ riscv.REG_F29,
+ riscv.REG_F30,
+ riscv.REG_F31,
+ 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
+}
+
+func loadByType(t *types.Type) obj.As {
+ width := t.Size()
+
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return riscv.AMOVF
+ case 8:
+ return riscv.AMOVD
+ default:
+ gc.Fatalf("unknown float width for load %d in type %v", width, t)
+ return 0
+ }
+ }
+
+ switch width {
+ case 1:
+ if t.IsSigned() {
+ return riscv.AMOVB
+ } else {
+ return riscv.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return riscv.AMOVH
+ } else {
+ return riscv.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return riscv.AMOVW
+ } else {
+ return riscv.AMOVWU
+ }
+ case 8:
+ return riscv.AMOV
+ default:
+ gc.Fatalf("unknown width for load %d in type %v", width, t)
+ return 0
+ }
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ width := t.Size()
+
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return riscv.AMOVF
+ case 8:
+ return riscv.AMOVD
+ default:
+ gc.Fatalf("unknown float width for store %d in type %v", width, t)
+ return 0
+ }
+ }
+
+ switch width {
+ case 1:
+ return riscv.AMOVB
+ case 2:
+ return riscv.AMOVH
+ case 4:
+ return riscv.AMOVW
+ case 8:
+ return riscv.AMOV
+ default:
+ gc.Fatalf("unknown width for store %d in type %v", width, t)
+ return 0
+ }
+}
+
+// largestMove returns the largest move instruction possible and its size,
+// given the alignment of the total size of the move.
+//
+// e.g., a 16-byte move may use MOV, but an 11-byte move must use MOVB.
+//
+// Note that the moves may not be on naturally aligned addresses depending on
+// the source and destination.
+//
+// This matches the calculation in ssa.moveSize.
+func largestMove(alignment int64) (obj.As, int64) {
+ switch {
+ case alignment%8 == 0:
+ return riscv.AMOV, 8
+ case alignment%4 == 0:
+ return riscv.AMOVW, 4
+ case alignment%2 == 0:
+ return riscv.AMOVH, 2
+ default:
+ return riscv.AMOVB, 1
+ }
+}
+
+// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
+// RISC-V has no flags, so this is a no-op.
+func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ s.SetPos(v.Pos)
+
+ switch v.Op {
+ case ssa.OpInitMem:
+ // memory arg needs no code
+ case ssa.OpArg:
+ // input args need no code
+ case ssa.OpPhi:
+ gc.CheckLoweredPhi(v)
+ case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ rs := v.Args[0].Reg()
+ rd := v.Reg()
+ if rs == rd {
+ return
+ }
+ as := riscv.AMOV
+ if v.Type.IsFloat() {
+ as = riscv.AMOVD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = rs
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = rd
+ case ssa.OpRISCV64MOVDnop:
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ gc.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddrAuto(&p.To, v)
+ case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
+ // nothing to do
+ case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
+ ssa.OpRISCV64MOVBUreg, ssa.OpRISCV64MOVHUreg, ssa.OpRISCV64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpRISCV64MOVDreg {
+ a = a.Args[0]
+ }
+ as := v.Op.Asm()
+ rs := v.Args[0].Reg()
+ rd := v.Reg()
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpRISCV64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load and already sign/zero-extended
+ if rs == rd {
+ return
+ }
+ as = riscv.AMOV
+ default:
+ }
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = rs
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = rd
+ case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND,
+ ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRL,
+ ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
+ ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW,
+ ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW,
+ ssa.OpRISCV64REMUW,
+ ssa.OpRISCV64FADDS, ssa.OpRISCV64FSUBS, ssa.OpRISCV64FMULS, ssa.OpRISCV64FDIVS,
+ ssa.OpRISCV64FEQS, ssa.OpRISCV64FNES, ssa.OpRISCV64FLTS, ssa.OpRISCV64FLES,
+ ssa.OpRISCV64FADDD, ssa.OpRISCV64FSUBD, ssa.OpRISCV64FMULD, ssa.OpRISCV64FDIVD,
+ ssa.OpRISCV64FEQD, ssa.OpRISCV64FNED, ssa.OpRISCV64FLTD, ssa.OpRISCV64FLED:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD,
+ ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVDX,
+ ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS,
+ ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD,
+ ssa.OpRISCV64NOT, ssa.OpRISCV64NEG, ssa.OpRISCV64NEGW:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64ADDI, ssa.OpRISCV64ADDIW, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI,
+ ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRLI, ssa.OpRISCV64SLTI,
+ ssa.OpRISCV64SLTIU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64MOVBconst, ssa.OpRISCV64MOVHconst, ssa.OpRISCV64MOVWconst, ssa.OpRISCV64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64MOVaddr:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_ADDR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ var wantreg string
+ // MOVW $sym+off(base), R
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ gc.AddAux(&p.From, v)
+ case *gc.Node:
+ wantreg = "SP"
+ gc.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVW $off(SP), R
+ wantreg = "SP"
+ p.From.Reg = riscv.REG_SP
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ case ssa.OpRISCV64MOVBload, ssa.OpRISCV64MOVHload, ssa.OpRISCV64MOVWload, ssa.OpRISCV64MOVDload,
+ ssa.OpRISCV64MOVBUload, ssa.OpRISCV64MOVHUload, ssa.OpRISCV64MOVWUload,
+ ssa.OpRISCV64FMOVWload, ssa.OpRISCV64FMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore,
+ ssa.OpRISCV64FMOVWstore, ssa.OpRISCV64FMOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64CALLstatic, ssa.OpRISCV64CALLclosure, ssa.OpRISCV64CALLinter:
+ s.Call(v)
+ case ssa.OpRISCV64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+ case ssa.OpRISCV64LoweredPanicBoundsA, ssa.OpRISCV64LoweredPanicBoundsB, ssa.OpRISCV64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+
+ case ssa.OpRISCV64LoweredAtomicLoad8:
+ s.Prog(riscv.AFENCE)
+ p := s.Prog(riscv.AMOVBU)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ s.Prog(riscv.AFENCE)
+
+ case ssa.OpRISCV64LoweredAtomicLoad32, ssa.OpRISCV64LoweredAtomicLoad64:
+ as := riscv.ALRW
+ if v.Op == ssa.OpRISCV64LoweredAtomicLoad64 {
+ as = riscv.ALRD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpRISCV64LoweredAtomicStore8:
+ s.Prog(riscv.AFENCE)
+ p := s.Prog(riscv.AMOVB)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ s.Prog(riscv.AFENCE)
+
+ case ssa.OpRISCV64LoweredAtomicStore32, ssa.OpRISCV64LoweredAtomicStore64:
+ as := riscv.AAMOSWAPW
+ if v.Op == ssa.OpRISCV64LoweredAtomicStore64 {
+ as = riscv.AAMOSWAPD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.RegTo2 = riscv.REG_ZERO
+
+ case ssa.OpRISCV64LoweredAtomicAdd32, ssa.OpRISCV64LoweredAtomicAdd64:
+ as := riscv.AAMOADDW
+ if v.Op == ssa.OpRISCV64LoweredAtomicAdd64 {
+ as = riscv.AAMOADDD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.RegTo2 = riscv.REG_TMP
+
+ p2 := s.Prog(riscv.AADD)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = riscv.REG_TMP
+ p2.Reg = v.Args[1].Reg()
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Reg0()
+
+ case ssa.OpRISCV64LoweredAtomicExchange32, ssa.OpRISCV64LoweredAtomicExchange64:
+ as := riscv.AAMOSWAPW
+ if v.Op == ssa.OpRISCV64LoweredAtomicExchange64 {
+ as = riscv.AAMOSWAPD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.RegTo2 = v.Reg0()
+
+ case ssa.OpRISCV64LoweredAtomicCas32, ssa.OpRISCV64LoweredAtomicCas64:
+ // MOV ZERO, Rout
+ // LR (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 3(PC)
+ // SC Rarg2, (Rarg0), Rtmp
+ // BNE Rtmp, ZERO, -3(PC)
+ // MOV $1, Rout
+
+ lr := riscv.ALRW
+ sc := riscv.ASCW
+ if v.Op == ssa.OpRISCV64LoweredAtomicCas64 {
+ lr = riscv.ALRD
+ sc = riscv.ASCD
+ }
+
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ out := v.Reg0()
+
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+
+ p1 := s.Prog(lr)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = riscv.REG_TMP
+
+ p2 := s.Prog(riscv.ABNE)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = r1
+ p2.Reg = riscv.REG_TMP
+ p2.To.Type = obj.TYPE_BRANCH
+
+ p3 := s.Prog(sc)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = r2
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = r0
+ p3.RegTo2 = riscv.REG_TMP
+
+ p4 := s.Prog(riscv.ABNE)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = riscv.REG_TMP
+ p4.Reg = riscv.REG_ZERO
+ p4.To.Type = obj.TYPE_BRANCH
+ gc.Patch(p4, p1)
+
+ p5 := s.Prog(riscv.AMOV)
+ p5.From.Type = obj.TYPE_CONST
+ p5.From.Offset = 1
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = out
+
+ p6 := s.Prog(obj.ANOP)
+ gc.Patch(p2, p6)
+
+ case ssa.OpRISCV64LoweredZero:
+ mov, sz := largestMove(v.AuxInt)
+
+ // mov ZERO, (Rarg0)
+ // ADD $sz, Rarg0
+ // BGEU Rarg1, Rarg0, -2(PC)
+
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ p2 := s.Prog(riscv.AADD)
+ p2.From.Type = obj.TYPE_CONST
+ p2.From.Offset = sz
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(riscv.ABGEU)
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.Reg = v.Args[0].Reg()
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[1].Reg()
+ gc.Patch(p3, p)
+
+ case ssa.OpRISCV64LoweredMove:
+ mov, sz := largestMove(v.AuxInt)
+
+ // mov (Rarg1), T2
+ // mov T2, (Rarg0)
+ // ADD $sz, Rarg0
+ // ADD $sz, Rarg1
+ // BGEU Rarg2, Rarg0, -4(PC)
+
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_T2
+
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = riscv.REG_T2
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(riscv.AADD)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = sz
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Args[0].Reg()
+
+ p4 := s.Prog(riscv.AADD)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = sz
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Args[1].Reg()
+
+ p5 := s.Prog(riscv.ABGEU)
+ p5.To.Type = obj.TYPE_BRANCH
+ p5.Reg = v.Args[1].Reg()
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = v.Args[2].Reg()
+ gc.Patch(p5, p)
+
+ case ssa.OpRISCV64LoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ // TODO: optimizations. See arm and amd64 LoweredNilCheck.
+ p := s.Prog(riscv.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_ZERO
+ if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
+ gc.Warnl(v.Pos, "generated nil check")
+ }
+
+ case ssa.OpRISCV64LoweredGetClosurePtr:
+ // Closure pointer is S4 (riscv.REG_CTXT).
+ gc.CheckLoweredGetClosurePtr(v)
+
+ case ssa.OpRISCV64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpRISCV64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpRISCV64DUFFZERO:
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffzero
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpRISCV64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Duffcopy
+ p.To.Offset = v.AuxInt
+
+ default:
+ v.Fatalf("Unhandled op %v", v.Op)
+ }
+}
+
+var blockBranch = [...]obj.As{
+ ssa.BlockRISCV64BEQ: riscv.ABEQ,
+ ssa.BlockRISCV64BEQZ: riscv.ABEQZ,
+ ssa.BlockRISCV64BGE: riscv.ABGE,
+ ssa.BlockRISCV64BGEU: riscv.ABGEU,
+ ssa.BlockRISCV64BGEZ: riscv.ABGEZ,
+ ssa.BlockRISCV64BGTZ: riscv.ABGTZ,
+ ssa.BlockRISCV64BLEZ: riscv.ABLEZ,
+ ssa.BlockRISCV64BLT: riscv.ABLT,
+ ssa.BlockRISCV64BLTU: riscv.ABLTU,
+ ssa.BlockRISCV64BLTZ: riscv.ABLTZ,
+ ssa.BlockRISCV64BNE: riscv.ABNE,
+ ssa.BlockRISCV64BNEZ: riscv.ABNEZ,
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ s.SetPos(b.Pos)
+
+ switch b.Kind {
+ case ssa.BlockDefer:
+ // defer returns in A0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(riscv.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.Reg = riscv.REG_A0
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockRetJmp:
+ p := s.Prog(obj.ARET)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = b.Aux.(*obj.LSym)
+ case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BNEZ,
+ ssa.BlockRISCV64BLT, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BGEZ,
+ ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
+
+ as := blockBranch[b.Kind]
+ invAs := riscv.InvertBranch(as)
+
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(invAs, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(as, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(as, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(invAs, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+
+ p.From.Type = obj.TYPE_REG
+ switch b.Kind {
+ case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BLT, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
+ if b.NumControls() != 2 {
+ b.Fatalf("Unexpected number of controls (%d != 2): %s", b.NumControls(), b.LongString())
+ }
+ p.From.Reg = b.Controls[0].Reg()
+ p.Reg = b.Controls[1].Reg()
+
+ case ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNEZ, ssa.BlockRISCV64BGEZ, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ:
+ if b.NumControls() != 1 {
+ b.Fatalf("Unexpected number of controls (%d != 1): %s", b.NumControls(), b.LongString())
+ }
+ p.From.Reg = b.Controls[0].Reg()
+ }
+
+ default:
+ b.Fatalf("Unhandled block: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/s390x/galign.go b/src/cmd/compile/internal/s390x/galign.go
new file mode 100644
index 0000000..cb68fd3
--- /dev/null
+++ b/src/cmd/compile/internal/s390x/galign.go
@@ -0,0 +1,24 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj/s390x"
+)
+
+func Init(arch *gc.Arch) {
+ arch.LinkArch = &s390x.Links390x
+ arch.REGSP = s390x.REGSP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+ arch.Ginsnopdefer = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/s390x/ggen.go b/src/cmd/compile/internal/s390x/ggen.go
new file mode 100644
index 0000000..5a837d8
--- /dev/null
+++ b/src/cmd/compile/internal/s390x/ggen.go
@@ -0,0 +1,88 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+)
+
+// clearLoopCutOff is the (somewhat arbitrary) value above which it is better
+// to have a loop of clear instructions (e.g. XCs) rather than just generating
+// multiple instructions (i.e. loop unrolling).
+// Must be between 256 and 4096.
+const clearLoopCutoff = 1024
+
+// zerorange clears the stack in the given range.
+func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+
+ // Adjust the frame to account for LR.
+ off += gc.Ctxt.FixedFrameSize()
+ reg := int16(s390x.REGSP)
+
+ // If the off cannot fit in a 12-bit unsigned displacement then we
+ // need to create a copy of the stack pointer that we can adjust.
+ // We also need to do this if we are going to loop.
+ if off < 0 || off > 4096-clearLoopCutoff || cnt > clearLoopCutoff {
+ p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0)
+ p.Reg = int16(s390x.REGSP)
+ reg = s390x.REGRT1
+ off = 0
+ }
+
+ // Generate a loop of large clears.
+ if cnt > clearLoopCutoff {
+ ireg := int16(s390x.REGRT2) // register holds number of remaining loop iterations
+ p = pp.Appendpp(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0)
+ p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
+ pl := p
+ p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
+ p = pp.Appendpp(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0)
+ gc.Patch(p, pl)
+ cnt = cnt % 256
+ }
+
+ // Generate remaining clear instructions without a loop.
+ for cnt > 0 {
+ n := cnt
+
+ // Can clear at most 256 bytes per instruction.
+ if n > 256 {
+ n = 256
+ }
+
+ switch n {
+ // Handle very small clears with move instructions.
+ case 8, 4, 2, 1:
+ ins := s390x.AMOVB
+ switch n {
+ case 8:
+ ins = s390x.AMOVD
+ case 4:
+ ins = s390x.AMOVW
+ case 2:
+ ins = s390x.AMOVH
+ }
+ p = pp.Appendpp(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off)
+
+ // Handle clears that would require multiple move instructions with CLEAR (assembled as XC).
+ default:
+ p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off)
+ }
+
+ cnt -= n
+ off += n
+ }
+
+ return p
+}
+
+func ginsnop(pp *gc.Progs) *obj.Prog {
+ return pp.Prog(s390x.ANOPH)
+}
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
new file mode 100644
index 0000000..8037357
--- /dev/null
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -0,0 +1,988 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+import (
+ "math"
+
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+)
+
+// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+ flive := b.FlagsLiveAtEnd
+ for _, c := range b.ControlValues() {
+ flive = c.Type.IsFlags() || flive
+ }
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if flive && v.Op == ssa.OpS390XMOVDconst {
+ // The "mark" is any non-nil Aux value.
+ v.Aux = v
+ }
+ if v.Type.IsFlags() {
+ flive = false
+ }
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ flive = true
+ }
+ }
+ }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return s390x.AFMOVS
+ case 8:
+ return s390x.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return s390x.AMOVB
+ } else {
+ return s390x.AMOVBZ
+ }
+ case 2:
+ if t.IsSigned() {
+ return s390x.AMOVH
+ } else {
+ return s390x.AMOVHZ
+ }
+ case 4:
+ if t.IsSigned() {
+ return s390x.AMOVW
+ } else {
+ return s390x.AMOVWZ
+ }
+ case 8:
+ return s390x.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ width := t.Size()
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return s390x.AFMOVS
+ case 8:
+ return s390x.AFMOVD
+ }
+ } else {
+ switch width {
+ case 1:
+ return s390x.AMOVB
+ case 2:
+ return s390x.AMOVH
+ case 4:
+ return s390x.AMOVW
+ case 8:
+ return s390x.AMOVD
+ }
+ }
+ panic("bad store type")
+}
+
+// moveByType returns the reg->reg move instruction of the given type.
+func moveByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ return s390x.AFMOVD
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return s390x.AMOVB
+ } else {
+ return s390x.AMOVBZ
+ }
+ case 2:
+ if t.IsSigned() {
+ return s390x.AMOVH
+ } else {
+ return s390x.AMOVHZ
+ }
+ case 4:
+ if t.IsSigned() {
+ return s390x.AMOVW
+ } else {
+ return s390x.AMOVWZ
+ }
+ case 8:
+ return s390x.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// opregreg emits instructions for
+// dest := dest(To) op src(From)
+// and also returns the created obj.Prog so it
+// may be further adjusted (offset, scale, etc).
+func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dest
+ p.From.Reg = src
+ return p
+}
+
+// opregregimm emits instructions for
+// dest := src(From) op off
+// and also returns the created obj.Prog so it
+// may be further adjusted (offset, scale, etc).
+func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.Prog {
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = off
+ p.Reg = src
+ p.To.Reg = dest
+ p.To.Type = obj.TYPE_REG
+ return p
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpS390XSLD, ssa.OpS390XSLW,
+ ssa.OpS390XSRD, ssa.OpS390XSRW,
+ ssa.OpS390XSRAD, ssa.OpS390XSRAW,
+ ssa.OpS390XRLLG, ssa.OpS390XRLL:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ if r2 == s390x.REG_R0 {
+ v.Fatalf("cannot use R0 as shift value %s", v.LongString())
+ }
+ p := opregreg(s, v.Op.Asm(), r, r2)
+ if r != r1 {
+ p.Reg = r1
+ }
+ case ssa.OpS390XRXSBG:
+ r1 := v.Reg()
+ if r1 != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ r2 := v.Args[1].Reg()
+ i := v.Aux.(s390x.RotateParams)
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(i.Start)}
+ p.SetRestArgs([]obj.Addr{
+ {Type: obj.TYPE_CONST, Offset: int64(i.End)},
+ {Type: obj.TYPE_CONST, Offset: int64(i.Amount)},
+ {Type: obj.TYPE_REG, Reg: r2},
+ })
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: r1}
+ case ssa.OpS390XRISBGZ:
+ r1 := v.Reg()
+ r2 := v.Args[0].Reg()
+ i := v.Aux.(s390x.RotateParams)
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(i.Start)}
+ p.SetRestArgs([]obj.Addr{
+ {Type: obj.TYPE_CONST, Offset: int64(i.End)},
+ {Type: obj.TYPE_CONST, Offset: int64(i.Amount)},
+ {Type: obj.TYPE_REG, Reg: r2},
+ })
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: r1}
+ case ssa.OpS390XADD, ssa.OpS390XADDW,
+ ssa.OpS390XSUB, ssa.OpS390XSUBW,
+ ssa.OpS390XAND, ssa.OpS390XANDW,
+ ssa.OpS390XOR, ssa.OpS390XORW,
+ ssa.OpS390XXOR, ssa.OpS390XXORW:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := opregreg(s, v.Op.Asm(), r, r2)
+ if r != r1 {
+ p.Reg = r1
+ }
+ case ssa.OpS390XADDC:
+ r1 := v.Reg0()
+ r2 := v.Args[0].Reg()
+ r3 := v.Args[1].Reg()
+ if r1 == r2 {
+ r2, r3 = r3, r2
+ }
+ p := opregreg(s, v.Op.Asm(), r1, r2)
+ if r3 != r1 {
+ p.Reg = r3
+ }
+ case ssa.OpS390XSUBC:
+ r1 := v.Reg0()
+ r2 := v.Args[0].Reg()
+ r3 := v.Args[1].Reg()
+ p := opregreg(s, v.Op.Asm(), r1, r3)
+ if r1 != r2 {
+ p.Reg = r2
+ }
+ case ssa.OpS390XADDE, ssa.OpS390XSUBE:
+ r1 := v.Reg0()
+ if r1 != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ r2 := v.Args[1].Reg()
+ opregreg(s, v.Op.Asm(), r1, r2)
+ case ssa.OpS390XADDCconst:
+ r1 := v.Reg0()
+ r3 := v.Args[0].Reg()
+ i2 := int64(int16(v.AuxInt))
+ opregregimm(s, v.Op.Asm(), r1, r3, i2)
+ // 2-address opcode arithmetic
+ case ssa.OpS390XMULLD, ssa.OpS390XMULLW,
+ ssa.OpS390XMULHD, ssa.OpS390XMULHDU,
+ ssa.OpS390XFMULS, ssa.OpS390XFMUL, ssa.OpS390XFDIVS, ssa.OpS390XFDIV:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
+ case ssa.OpS390XFSUBS, ssa.OpS390XFSUB,
+ ssa.OpS390XFADDS, ssa.OpS390XFADD:
+ r := v.Reg0()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
+ case ssa.OpS390XMLGR:
+ // MLGR Rx R3 -> R2:R3
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ if r1 != s390x.REG_R3 {
+ v.Fatalf("We require the multiplcand to be stored in R3 for MLGR %s", v.LongString())
+ }
+ p := s.Prog(s390x.AMLGR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ p.To.Reg = s390x.REG_R2
+ p.To.Type = obj.TYPE_REG
+ case ssa.OpS390XFMADD, ssa.OpS390XFMADDS,
+ ssa.OpS390XFMSUB, ssa.OpS390XFMSUBS:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpS390XFIDBR:
+ switch v.AuxInt {
+ case 0, 1, 3, 4, 5, 6, 7:
+ opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)
+ default:
+ v.Fatalf("invalid FIDBR mask: %v", v.AuxInt)
+ }
+ case ssa.OpS390XCPSDR:
+ p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpS390XDIVD, ssa.OpS390XDIVW,
+ ssa.OpS390XDIVDU, ssa.OpS390XDIVWU,
+ ssa.OpS390XMODD, ssa.OpS390XMODW,
+ ssa.OpS390XMODDU, ssa.OpS390XMODWU:
+
+ // TODO(mundaym): use the temp registers every time like x86 does with AX?
+ dividend := v.Args[0].Reg()
+ divisor := v.Args[1].Reg()
+
+ // CPU faults upon signed overflow, which occurs when most
+ // negative int is divided by -1.
+ var j *obj.Prog
+ if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW ||
+ v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW {
+
+ var c *obj.Prog
+ c = s.Prog(s390x.ACMP)
+ j = s.Prog(s390x.ABEQ)
+
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = divisor
+ c.To.Type = obj.TYPE_CONST
+ c.To.Offset = -1
+
+ j.To.Type = obj.TYPE_BRANCH
+
+ }
+
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = divisor
+ p.Reg = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dividend
+
+ // signed division, rest of the check for -1 case
+ if j != nil {
+ j2 := s.Prog(s390x.ABR)
+ j2.To.Type = obj.TYPE_BRANCH
+
+ var n *obj.Prog
+ if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW {
+ // n * -1 = -n
+ n = s.Prog(s390x.ANEG)
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = dividend
+ } else {
+ // n % -1 == 0
+ n = s.Prog(s390x.AXOR)
+ n.From.Type = obj.TYPE_REG
+ n.From.Reg = dividend
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = dividend
+ }
+
+ j.To.SetTarget(n)
+ j2.To.SetTarget(s.Pc())
+ }
+ case ssa.OpS390XADDconst, ssa.OpS390XADDWconst:
+ opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)
+ case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst,
+ ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst,
+ ssa.OpS390XANDconst, ssa.OpS390XANDWconst,
+ ssa.OpS390XORconst, ssa.OpS390XORWconst,
+ ssa.OpS390XXORconst, ssa.OpS390XXORWconst:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst,
+ ssa.OpS390XSRDconst, ssa.OpS390XSRWconst,
+ ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst,
+ ssa.OpS390XRLLconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ if r != r1 {
+ p.Reg = r1
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpS390XMOVDaddridx:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ p := s.Prog(s390x.AMOVD)
+ p.From.Scale = 1
+ if i == s390x.REGSP {
+ r, i = i, r
+ }
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = r
+ p.From.Index = i
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XMOVDaddr:
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU:
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ case ssa.OpS390XFCMPS, ssa.OpS390XFCMP:
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+ case ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = int64(uint32(v.AuxInt))
+ case ssa.OpS390XMOVDconst:
+ x := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst:
+ x := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.OpS390XADDWload, ssa.OpS390XADDload,
+ ssa.OpS390XMULLWload, ssa.OpS390XMULLDload,
+ ssa.OpS390XSUBWload, ssa.OpS390XSUBload,
+ ssa.OpS390XANDWload, ssa.OpS390XANDload,
+ ssa.OpS390XORWload, ssa.OpS390XORload,
+ ssa.OpS390XXORWload, ssa.OpS390XXORload:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpS390XMOVDload,
+ ssa.OpS390XMOVWZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVBZload,
+ ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload,
+ ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload,
+ ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx,
+ ssa.OpS390XMOVBloadidx, ssa.OpS390XMOVHloadidx, ssa.OpS390XMOVWloadidx, ssa.OpS390XMOVDloadidx,
+ ssa.OpS390XMOVHBRloadidx, ssa.OpS390XMOVWBRloadidx, ssa.OpS390XMOVDBRloadidx,
+ ssa.OpS390XFMOVSloadidx, ssa.OpS390XFMOVDloadidx:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ if i == s390x.REGSP {
+ r, i = i, r
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r
+ p.From.Scale = 1
+ p.From.Index = i
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
+ ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore,
+ ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx,
+ ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx,
+ ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ if i == s390x.REGSP {
+ r, i = i, r
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r
+ p.To.Scale = 1
+ p.To.Index = i
+ gc.AddAux(&p.To, v)
+ case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.To, v, sc.Off())
+ case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg,
+ ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg,
+ ssa.OpS390XLDGR, ssa.OpS390XLGDR,
+ ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA,
+ ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA,
+ ssa.OpS390XCELFBR, ssa.OpS390XCDLFBR, ssa.OpS390XCELGBR, ssa.OpS390XCDLGBR,
+ ssa.OpS390XCLFEBR, ssa.OpS390XCLFDBR, ssa.OpS390XCLGEBR, ssa.OpS390XCLGDBR,
+ ssa.OpS390XLDEBR, ssa.OpS390XLEDBR,
+ ssa.OpS390XFNEG, ssa.OpS390XFNEGS,
+ ssa.OpS390XLPDFR, ssa.OpS390XLNDFR:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
+ case ssa.OpS390XCLEAR:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.To, v, sc.Off())
+ case ssa.OpCopy:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x != y {
+ opregreg(s, moveByType(v.Type), y, x)
+ }
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ gc.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddrAuto(&p.To, v)
+ case ssa.OpS390XLoweredGetClosurePtr:
+ // Closure pointer is R12 (already)
+ gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpS390XLoweredRound32F, ssa.OpS390XLoweredRound64F:
+ // input is already rounded
+ case ssa.OpS390XLoweredGetG:
+ r := v.Reg()
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = s390x.REGG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpS390XLoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XLoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XCALLstatic, ssa.OpS390XCALLclosure, ssa.OpS390XCALLinter:
+ s.Call(v)
+ case ssa.OpS390XLoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+ case ssa.OpS390XLoweredPanicBoundsA, ssa.OpS390XLoweredPanicBoundsB, ssa.OpS390XLoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+ case ssa.OpS390XFLOGR, ssa.OpS390XPOPCNT,
+ ssa.OpS390XNEG, ssa.OpS390XNEGW,
+ ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XNOT, ssa.OpS390XNOTW:
+ v.Fatalf("NOT/NOTW generated %s", v.LongString())
+ case ssa.OpS390XSumBytes2, ssa.OpS390XSumBytes4, ssa.OpS390XSumBytes8:
+ v.Fatalf("SumBytes generated %s", v.LongString())
+ case ssa.OpS390XLOCGR:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(v.Aux.(s390x.CCMask))
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpS390XFSQRT:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XLTDBR, ssa.OpS390XLTEBR:
+ opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[0].Reg())
+ case ssa.OpS390XInvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT, ssa.OpS390XFlagOV:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.OpS390XAddTupleFirst32, ssa.OpS390XAddTupleFirst64:
+ v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
+ case ssa.OpS390XLoweredNilCheck:
+ // Issue a load which will fault if the input is nil.
+ p := s.Prog(s390x.AMOVBZ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ gc.Warnl(v.Pos, "generated nil check")
+ }
+ case ssa.OpS390XMVC:
+ vo := v.AuxValAndOff()
+ p := s.Prog(s390x.AMVC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = vo.Val()
+ p.SetFrom3(obj.Addr{
+ Type: obj.TYPE_MEM,
+ Reg: v.Args[1].Reg(),
+ Offset: vo.Off(),
+ })
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = vo.Off()
+ case ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4,
+ ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4:
+ for i := 2; i < len(v.Args)-1; i++ {
+ if v.Args[i].Reg() != v.Args[i-1].Reg()+1 {
+ v.Fatalf("invalid store multiple %s", v.LongString())
+ }
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[len(v.Args)-2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpS390XLoweredMove:
+ // Inputs must be valid pointers to memory,
+ // so adjust arg0 and arg1 as part of the expansion.
+ // arg2 should be src+size,
+ //
+ // mvc: MVC $256, 0(R2), 0(R1)
+ // MOVD $256(R1), R1
+ // MOVD $256(R2), R2
+ // CMP R2, Rarg2
+ // BNE mvc
+ // MVC $rem, 0(R2), 0(R1) // if rem > 0
+ // arg2 is the last address to move in the loop + 256
+ mvc := s.Prog(s390x.AMVC)
+ mvc.From.Type = obj.TYPE_CONST
+ mvc.From.Offset = 256
+ mvc.SetFrom3(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()})
+ mvc.To.Type = obj.TYPE_MEM
+ mvc.To.Reg = v.Args[0].Reg()
+
+ for i := 0; i < 2; i++ {
+ movd := s.Prog(s390x.AMOVD)
+ movd.From.Type = obj.TYPE_ADDR
+ movd.From.Reg = v.Args[i].Reg()
+ movd.From.Offset = 256
+ movd.To.Type = obj.TYPE_REG
+ movd.To.Reg = v.Args[i].Reg()
+ }
+
+ cmpu := s.Prog(s390x.ACMPU)
+ cmpu.From.Reg = v.Args[1].Reg()
+ cmpu.From.Type = obj.TYPE_REG
+ cmpu.To.Reg = v.Args[2].Reg()
+ cmpu.To.Type = obj.TYPE_REG
+
+ bne := s.Prog(s390x.ABLT)
+ bne.To.Type = obj.TYPE_BRANCH
+ gc.Patch(bne, mvc)
+
+ if v.AuxInt > 0 {
+ mvc := s.Prog(s390x.AMVC)
+ mvc.From.Type = obj.TYPE_CONST
+ mvc.From.Offset = v.AuxInt
+ mvc.SetFrom3(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()})
+ mvc.To.Type = obj.TYPE_MEM
+ mvc.To.Reg = v.Args[0].Reg()
+ }
+ case ssa.OpS390XLoweredZero:
+ // Input must be valid pointers to memory,
+ // so adjust arg0 as part of the expansion.
+ // arg1 should be src+size,
+ //
+ // clear: CLEAR $256, 0(R1)
+ // MOVD $256(R1), R1
+ // CMP R1, Rarg1
+ // BNE clear
+ // CLEAR $rem, 0(R1) // if rem > 0
+ // arg1 is the last address to zero in the loop + 256
+ clear := s.Prog(s390x.ACLEAR)
+ clear.From.Type = obj.TYPE_CONST
+ clear.From.Offset = 256
+ clear.To.Type = obj.TYPE_MEM
+ clear.To.Reg = v.Args[0].Reg()
+
+ movd := s.Prog(s390x.AMOVD)
+ movd.From.Type = obj.TYPE_ADDR
+ movd.From.Reg = v.Args[0].Reg()
+ movd.From.Offset = 256
+ movd.To.Type = obj.TYPE_REG
+ movd.To.Reg = v.Args[0].Reg()
+
+ cmpu := s.Prog(s390x.ACMPU)
+ cmpu.From.Reg = v.Args[0].Reg()
+ cmpu.From.Type = obj.TYPE_REG
+ cmpu.To.Reg = v.Args[1].Reg()
+ cmpu.To.Type = obj.TYPE_REG
+
+ bne := s.Prog(s390x.ABLT)
+ bne.To.Type = obj.TYPE_BRANCH
+ gc.Patch(bne, clear)
+
+ if v.AuxInt > 0 {
+ clear := s.Prog(s390x.ACLEAR)
+ clear.From.Type = obj.TYPE_CONST
+ clear.From.Offset = v.AuxInt
+ clear.To.Type = obj.TYPE_MEM
+ clear.To.Reg = v.Args[0].Reg()
+ }
+ case ssa.OpS390XMOVBZatomicload, ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpS390XMOVBatomicstore, ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpS390XLAN, ssa.OpS390XLAO:
+ // LA(N|O) Ry, TMP, 0(Rx)
+ op := s.Prog(v.Op.Asm())
+ op.From.Type = obj.TYPE_REG
+ op.From.Reg = v.Args[1].Reg()
+ op.Reg = s390x.REGTMP
+ op.To.Type = obj.TYPE_MEM
+ op.To.Reg = v.Args[0].Reg()
+ case ssa.OpS390XLANfloor, ssa.OpS390XLAOfloor:
+ r := v.Args[0].Reg() // clobbered, assumed R1 in comments
+
+ // Round ptr down to nearest multiple of 4.
+ // ANDW $~3, R1
+ ptr := s.Prog(s390x.AANDW)
+ ptr.From.Type = obj.TYPE_CONST
+ ptr.From.Offset = 0xfffffffc
+ ptr.To.Type = obj.TYPE_REG
+ ptr.To.Reg = r
+
+ // Redirect output of LA(N|O) into R1 since it is clobbered anyway.
+ // LA(N|O) Rx, R1, 0(R1)
+ op := s.Prog(v.Op.Asm())
+ op.From.Type = obj.TYPE_REG
+ op.From.Reg = v.Args[1].Reg()
+ op.Reg = r
+ op.To.Type = obj.TYPE_MEM
+ op.To.Reg = r
+ case ssa.OpS390XLAA, ssa.OpS390XLAAG:
+ p := s.Prog(v.Op.Asm())
+ p.Reg = v.Reg0()
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64:
+ // Convert the flags output of CS{,G} into a bool.
+ // CS{,G} arg1, arg2, arg0
+ // MOVD $0, ret
+ // BNE 2(PC)
+ // MOVD $1, ret
+ // NOP (so the BNE has somewhere to land)
+
+ // CS{,G} arg1, arg2, arg0
+ cs := s.Prog(v.Op.Asm())
+ cs.From.Type = obj.TYPE_REG
+ cs.From.Reg = v.Args[1].Reg() // old
+ cs.Reg = v.Args[2].Reg() // new
+ cs.To.Type = obj.TYPE_MEM
+ cs.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&cs.To, v)
+
+ // MOVD $0, ret
+ movd := s.Prog(s390x.AMOVD)
+ movd.From.Type = obj.TYPE_CONST
+ movd.From.Offset = 0
+ movd.To.Type = obj.TYPE_REG
+ movd.To.Reg = v.Reg0()
+
+ // BNE 2(PC)
+ bne := s.Prog(s390x.ABNE)
+ bne.To.Type = obj.TYPE_BRANCH
+
+ // MOVD $1, ret
+ movd = s.Prog(s390x.AMOVD)
+ movd.From.Type = obj.TYPE_CONST
+ movd.From.Offset = 1
+ movd.To.Type = obj.TYPE_REG
+ movd.To.Reg = v.Reg0()
+
+ // NOP (so the BNE has somewhere to land)
+ nop := s.Prog(obj.ANOP)
+ gc.Patch(bne, nop)
+ case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64:
+ // Loop until the CS{,G} succeeds.
+ // MOV{WZ,D} arg0, ret
+ // cs: CS{,G} ret, arg1, arg0
+ // BNE cs
+
+ // MOV{WZ,D} arg0, ret
+ load := s.Prog(loadByType(v.Type.FieldType(0)))
+ load.From.Type = obj.TYPE_MEM
+ load.From.Reg = v.Args[0].Reg()
+ load.To.Type = obj.TYPE_REG
+ load.To.Reg = v.Reg0()
+ gc.AddAux(&load.From, v)
+
+ // CS{,G} ret, arg1, arg0
+ cs := s.Prog(v.Op.Asm())
+ cs.From.Type = obj.TYPE_REG
+ cs.From.Reg = v.Reg0() // old
+ cs.Reg = v.Args[1].Reg() // new
+ cs.To.Type = obj.TYPE_MEM
+ cs.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&cs.To, v)
+
+ // BNE cs
+ bne := s.Prog(s390x.ABNE)
+ bne.To.Type = obj.TYPE_BRANCH
+ gc.Patch(bne, cs)
+ case ssa.OpS390XSYNC:
+ s.Prog(s390x.ASYNC)
+ case ssa.OpClobber:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+func blockAsm(b *ssa.Block) obj.As {
+ switch b.Kind {
+ case ssa.BlockS390XBRC:
+ return s390x.ABRC
+ case ssa.BlockS390XCRJ:
+ return s390x.ACRJ
+ case ssa.BlockS390XCGRJ:
+ return s390x.ACGRJ
+ case ssa.BlockS390XCLRJ:
+ return s390x.ACLRJ
+ case ssa.BlockS390XCLGRJ:
+ return s390x.ACLGRJ
+ case ssa.BlockS390XCIJ:
+ return s390x.ACIJ
+ case ssa.BlockS390XCGIJ:
+ return s390x.ACGIJ
+ case ssa.BlockS390XCLIJ:
+ return s390x.ACLIJ
+ case ssa.BlockS390XCLGIJ:
+ return s390x.ACLGIJ
+ }
+ b.Fatalf("blockAsm not implemented: %s", b.LongString())
+ panic("unreachable")
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ // Handle generic blocks first.
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(s390x.ABR)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ return
+ case ssa.BlockDefer:
+ // defer returns in R3:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Br(s390x.ACIJ, b.Succs[1].Block())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(s390x.NotEqual & s390x.NotUnordered) // unordered is not possible
+ p.Reg = s390x.REG_R3
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: 0})
+ if b.Succs[0].Block() != next {
+ s.Br(s390x.ABR, b.Succs[0].Block())
+ }
+ return
+ case ssa.BlockExit:
+ return
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ return
+ case ssa.BlockRetJmp:
+ p := s.Prog(s390x.ABR)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = b.Aux.(*obj.LSym)
+ return
+ }
+
+ // Handle s390x-specific blocks. These blocks all have a
+ // condition code mask in the Aux value and 2 successors.
+ succs := [...]*ssa.Block{b.Succs[0].Block(), b.Succs[1].Block()}
+ mask := b.Aux.(s390x.CCMask)
+
+ // TODO: take into account Likely property for forward/backward
+ // branches. We currently can't do this because we don't know
+ // whether a block has already been emitted. In general forward
+ // branches are assumed 'not taken' and backward branches are
+ // assumed 'taken'.
+ if next == succs[0] {
+ succs[0], succs[1] = succs[1], succs[0]
+ mask = mask.Inverse()
+ }
+
+ p := s.Br(blockAsm(b), succs[0])
+ switch b.Kind {
+ case ssa.BlockS390XBRC:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(mask)
+ case ssa.BlockS390XCGRJ, ssa.BlockS390XCRJ,
+ ssa.BlockS390XCLGRJ, ssa.BlockS390XCLRJ:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
+ p.Reg = b.Controls[0].Reg()
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: b.Controls[1].Reg()})
+ case ssa.BlockS390XCGIJ, ssa.BlockS390XCIJ:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
+ p.Reg = b.Controls[0].Reg()
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(int8(b.AuxInt))})
+ case ssa.BlockS390XCLGIJ, ssa.BlockS390XCLIJ:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
+ p.Reg = b.Controls[0].Reg()
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(uint8(b.AuxInt))})
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+ if next != succs[1] {
+ s.Br(s390x.ABR, succs[1])
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/README.md b/src/cmd/compile/internal/ssa/README.md
new file mode 100644
index 0000000..4483c2c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/README.md
@@ -0,0 +1,209 @@
+<!---
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+-->
+
+## Introduction to the Go compiler's SSA backend
+
+This package contains the compiler's Static Single Assignment form component. If
+you're not familiar with SSA, its [Wikipedia
+article](https://en.wikipedia.org/wiki/Static_single_assignment_form) is a good
+starting point.
+
+It is recommended that you first read [cmd/compile/README.md](../../README.md)
+if you are not familiar with the Go compiler already. That document gives an
+overview of the compiler, and explains what is SSA's part and purpose in it.
+
+### Key concepts
+
+The names described below may be loosely related to their Go counterparts, but
+note that they are not equivalent. For example, a Go block statement has a
+variable scope, yet SSA has no notion of variables nor variable scopes.
+
+It may also be surprising that values and blocks are named after their unique
+sequential IDs. They rarely correspond to named entities in the original code,
+such as variables or function parameters. The sequential IDs also allow the
+compiler to avoid maps, and it is always possible to track back the values to Go
+code using debug and position information.
+
+#### Values
+
+Values are the basic building blocks of SSA. Per SSA's very definition, a
+value is defined exactly once, but it may be used any number of times. A value
+mainly consists of a unique identifier, an operator, a type, and some arguments.
+
+An operator or `Op` describes the operation that computes the value. The
+semantics of each operator can be found in `gen/*Ops.go`. For example, `OpAdd8`
+takes two value arguments holding 8-bit integers and results in their addition.
+Here is a possible SSA representation of the addition of two `uint8` values:
+
+ // var c uint8 = a + b
+ v4 = Add8 <uint8> v2 v3
+
+A value's type will usually be a Go type. For example, the value in the example
+above has a `uint8` type, and a constant boolean value will have a `bool` type.
+However, certain types don't come from Go and are special; below we will cover
+`memory`, the most common of them.
+
+See [value.go](value.go) for more information.
+
+#### Memory types
+
+`memory` represents the global memory state. An `Op` that takes a memory
+argument depends on that memory state, and an `Op` which has the memory type
+impacts the state of memory. This ensures that memory operations are kept in the
+right order. For example:
+
+ // *a = 3
+ // *b = *a
+ v10 = Store <mem> {int} v6 v8 v1
+ v14 = Store <mem> {int} v7 v8 v10
+
+Here, `Store` stores its second argument (of type `int`) into the first argument
+(of type `*int`). The last argument is the memory state; since the second store
+depends on the memory value defined by the first store, the two stores cannot be
+reordered.
+
+See [cmd/compile/internal/types/type.go](../types/type.go) for more information.
+
+#### Blocks
+
+A block represents a basic block in the control flow graph of a function. It is,
+essentially, a list of values that define the operation of this block. Besides
+the list of values, blocks mainly consist of a unique identifier, a kind, and a
+list of successor blocks.
+
+The simplest kind is a `plain` block; it simply hands the control flow to
+another block, thus its successors list contains one block.
+
+Another common block kind is the `exit` block. These have a final value, called
+control value, which must return a memory state. This is necessary for functions
+to return some values, for example - the caller needs some memory state to
+depend on, to ensure that it receives those return values correctly.
+
+The last important block kind we will mention is the `if` block. It has a single
+control value that must be a boolean value, and it has exactly two successor
+blocks. The control flow is handed to the first successor if the bool is true,
+and to the second otherwise.
+
+Here is a sample if-else control flow represented with basic blocks:
+
+ // func(b bool) int {
+ // if b {
+ // return 2
+ // }
+ // return 3
+ // }
+ b1:
+ v1 = InitMem <mem>
+ v2 = SP <uintptr>
+ v5 = Addr <*int> {~r1} v2
+ v6 = Arg <bool> {b}
+ v8 = Const64 <int> [2]
+ v12 = Const64 <int> [3]
+ If v6 -> b2 b3
+ b2: <- b1
+ v10 = VarDef <mem> {~r1} v1
+ v11 = Store <mem> {int} v5 v8 v10
+ Ret v11
+ b3: <- b1
+ v14 = VarDef <mem> {~r1} v1
+ v15 = Store <mem> {int} v5 v12 v14
+ Ret v15
+
+<!---
+TODO: can we come up with a shorter example that still shows the control flow?
+-->
+
+See [block.go](block.go) for more information.
+
+#### Functions
+
+A function represents a function declaration along with its body. It mainly
+consists of a name, a type (its signature), a list of blocks that form its body,
+and the entry block within said list.
+
+When a function is called, the control flow is handed to its entry block. If the
+function terminates, the control flow will eventually reach an exit block, thus
+ending the function call.
+
+Note that a function may have zero or multiple exit blocks, just like a Go
+function can have any number of return points, but it must have exactly one
+entry point block.
+
+Also note that some SSA functions are autogenerated, such as the hash functions
+for each type used as a map key.
+
+For example, this is what an empty function can look like in SSA, with a single
+exit block that returns an uninteresting memory state:
+
+ foo func()
+ b1:
+ v1 = InitMem <mem>
+ Ret v1
+
+See [func.go](func.go) for more information.
+
+### Compiler passes
+
+Having a program in SSA form is not very useful on its own. Its advantage lies
+in how easy it is to write optimizations that modify the program to make it
+better. The way the Go compiler accomplishes this is via a list of passes.
+
+Each pass transforms a SSA function in some way. For example, a dead code
+elimination pass will remove blocks and values that it can prove will never be
+executed, and a nil check elimination pass will remove nil checks which it can
+prove to be redundant.
+
+Compiler passes work on one function at a time, and by default run sequentially
+and exactly once.
+
+The `lower` pass is special; it converts the SSA representation from being
+machine-independent to being machine-dependent. That is, some abstract operators
+are replaced with their non-generic counterparts, potentially reducing or
+increasing the final number of values.
+
+<!---
+TODO: Probably explain here why the ordering of the passes matters, and why some
+passes like deadstore have multiple variants at different stages.
+-->
+
+See the `passes` list defined in [compile.go](compile.go) for more information.
+
+### Playing with SSA
+
+A good way to see and get used to the compiler's SSA in action is via
+`GOSSAFUNC`. For example, to see func `Foo`'s initial SSA form and final
+generated assembly, one can run:
+
+ GOSSAFUNC=Foo go build
+
+The generated `ssa.html` file will also contain the SSA func at each of the
+compile passes, making it easy to see what each pass does to a particular
+program. You can also click on values and blocks to highlight them, to help
+follow the control flow and values.
+
+<!---
+TODO: need more ideas for this section
+-->
+
+### Hacking on SSA
+
+While most compiler passes are implemented directly in Go code, some others are
+code generated. This is currently done via rewrite rules, which have their own
+syntax and are maintained in `gen/*.rules`. Simpler optimizations can be written
+easily and quickly this way, but rewrite rules are not suitable for more complex
+optimizations.
+
+To read more on rewrite rules, have a look at the top comments in
+[gen/generic.rules](gen/generic.rules) and [gen/rulegen.go](gen/rulegen.go).
+
+Similarly, the code to manage operators is also code generated from
+`gen/*Ops.go`, as it is easier to maintain a few tables than a lot of code.
+After changing the rules or operators, see [gen/README](gen/README) for
+instructions on how to generate the Go code again.
+
+<!---
+TODO: more tips and info could likely go here
+-->
diff --git a/src/cmd/compile/internal/ssa/TODO b/src/cmd/compile/internal/ssa/TODO
new file mode 100644
index 0000000..f4e4382
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/TODO
@@ -0,0 +1,24 @@
+This is a list of possible improvements to the SSA pass of the compiler.
+
+Optimizations (better compiled code)
+------------------------------------
+- Reduce register pressure in scheduler
+- Make dead store pass inter-block
+- If there are a lot of MOVQ $0, ..., then load
+ 0 into a register and use the register as the source instead.
+- Allow large structs to be SSAable (issue 24416)
+- Allow arrays of length >1 to be SSAable
+- If strings are being passed around without being interpreted (ptr
+ and len fields being accessed) pass them in xmm registers?
+ Same for interfaces?
+- any pointer generated by unsafe arithmetic must be non-nil?
+ (Of course that may not be true in general, but it is for all uses
+ in the runtime, and we can play games with unsafe.)
+
+Optimizations (better compiler)
+-------------------------------
+- Handle signed division overflow and sign extension earlier
+
+Regalloc
+--------
+- Make liveness analysis non-quadratic
diff --git a/src/cmd/compile/internal/ssa/addressingmodes.go b/src/cmd/compile/internal/ssa/addressingmodes.go
new file mode 100644
index 0000000..1baf143
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/addressingmodes.go
@@ -0,0 +1,460 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// addressingModes combines address calculations into memory operations
+// that can perform complicated addressing modes.
+func addressingModes(f *Func) {
+ isInImmediateRange := is32Bit
+ switch f.Config.arch {
+ default:
+ // Most architectures can't do this.
+ return
+ case "amd64", "386":
+ case "s390x":
+ isInImmediateRange = is20Bit
+ }
+
+ var tmp []*Value
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if !combineFirst[v.Op] {
+ continue
+ }
+ // All matched operations have the pointer in arg[0].
+ // All results have the pointer in arg[0] and the index in arg[1].
+ // *Except* for operations which update a register,
+ // which are marked with resultInArg0. Those have
+ // the pointer in arg[1], and the corresponding result op
+ // has the pointer in arg[1] and the index in arg[2].
+ ptrIndex := 0
+ if opcodeTable[v.Op].resultInArg0 {
+ ptrIndex = 1
+ }
+ p := v.Args[ptrIndex]
+ c, ok := combine[[2]Op{v.Op, p.Op}]
+ if !ok {
+ continue
+ }
+ // See if we can combine the Aux/AuxInt values.
+ switch [2]auxType{opcodeTable[v.Op].auxType, opcodeTable[p.Op].auxType} {
+ case [2]auxType{auxSymOff, auxInt32}:
+ // TODO: introduce auxSymOff32
+ if !isInImmediateRange(v.AuxInt + p.AuxInt) {
+ continue
+ }
+ v.AuxInt += p.AuxInt
+ case [2]auxType{auxSymOff, auxSymOff}:
+ if v.Aux != nil && p.Aux != nil {
+ continue
+ }
+ if !isInImmediateRange(v.AuxInt + p.AuxInt) {
+ continue
+ }
+ if p.Aux != nil {
+ v.Aux = p.Aux
+ }
+ v.AuxInt += p.AuxInt
+ case [2]auxType{auxSymValAndOff, auxInt32}:
+ vo := ValAndOff(v.AuxInt)
+ if !vo.canAdd64(p.AuxInt) {
+ continue
+ }
+ v.AuxInt = int64(vo.addOffset64(p.AuxInt))
+ case [2]auxType{auxSymValAndOff, auxSymOff}:
+ vo := ValAndOff(v.AuxInt)
+ if v.Aux != nil && p.Aux != nil {
+ continue
+ }
+ if !vo.canAdd64(p.AuxInt) {
+ continue
+ }
+ if p.Aux != nil {
+ v.Aux = p.Aux
+ }
+ v.AuxInt = int64(vo.addOffset64(p.AuxInt))
+ case [2]auxType{auxSymOff, auxNone}:
+ // nothing to do
+ case [2]auxType{auxSymValAndOff, auxNone}:
+ // nothing to do
+ default:
+ f.Fatalf("unknown aux combining for %s and %s\n", v.Op, p.Op)
+ }
+ // Combine the operations.
+ tmp = append(tmp[:0], v.Args[:ptrIndex]...)
+ tmp = append(tmp, p.Args...)
+ tmp = append(tmp, v.Args[ptrIndex+1:]...)
+ v.resetArgs()
+ v.Op = c
+ v.AddArgs(tmp...)
+ if needSplit[c] {
+ // It turns out that some of the combined instructions have faster two-instruction equivalents,
+ // but not the two instructions that led to them being combined here. For example
+ // (CMPBconstload c (ADDQ x y)) -> (CMPBconstloadidx1 c x y) -> (CMPB c (MOVBloadidx1 x y))
+ // The final pair of instructions turns out to be notably faster, at least in some benchmarks.
+ f.Config.splitLoad(v)
+ }
+ }
+ }
+}
+
+// combineFirst contains ops which appear in combine as the
+// first part of the key.
+var combineFirst = map[Op]bool{}
+
+func init() {
+ for k := range combine {
+ combineFirst[k[0]] = true
+ }
+}
+
+// needSplit contains instructions that should be postprocessed by splitLoad
+// into a more-efficient two-instruction form.
+var needSplit = map[Op]bool{
+ OpAMD64CMPBloadidx1: true,
+ OpAMD64CMPWloadidx1: true,
+ OpAMD64CMPLloadidx1: true,
+ OpAMD64CMPQloadidx1: true,
+ OpAMD64CMPWloadidx2: true,
+ OpAMD64CMPLloadidx4: true,
+ OpAMD64CMPQloadidx8: true,
+
+ OpAMD64CMPBconstloadidx1: true,
+ OpAMD64CMPWconstloadidx1: true,
+ OpAMD64CMPLconstloadidx1: true,
+ OpAMD64CMPQconstloadidx1: true,
+ OpAMD64CMPWconstloadidx2: true,
+ OpAMD64CMPLconstloadidx4: true,
+ OpAMD64CMPQconstloadidx8: true,
+}
+
+// For each entry k, v in this map, if we have a value x with:
+// x.Op == k[0]
+// x.Args[0].Op == k[1]
+// then we can set x.Op to v and set x.Args like this:
+// x.Args[0].Args + x.Args[1:]
+// Additionally, the Aux/AuxInt from x.Args[0] is merged into x.
+var combine = map[[2]Op]Op{
+ // amd64
+ [2]Op{OpAMD64MOVBload, OpAMD64ADDQ}: OpAMD64MOVBloadidx1,
+ [2]Op{OpAMD64MOVWload, OpAMD64ADDQ}: OpAMD64MOVWloadidx1,
+ [2]Op{OpAMD64MOVLload, OpAMD64ADDQ}: OpAMD64MOVLloadidx1,
+ [2]Op{OpAMD64MOVQload, OpAMD64ADDQ}: OpAMD64MOVQloadidx1,
+ [2]Op{OpAMD64MOVSSload, OpAMD64ADDQ}: OpAMD64MOVSSloadidx1,
+ [2]Op{OpAMD64MOVSDload, OpAMD64ADDQ}: OpAMD64MOVSDloadidx1,
+
+ [2]Op{OpAMD64MOVBstore, OpAMD64ADDQ}: OpAMD64MOVBstoreidx1,
+ [2]Op{OpAMD64MOVWstore, OpAMD64ADDQ}: OpAMD64MOVWstoreidx1,
+ [2]Op{OpAMD64MOVLstore, OpAMD64ADDQ}: OpAMD64MOVLstoreidx1,
+ [2]Op{OpAMD64MOVQstore, OpAMD64ADDQ}: OpAMD64MOVQstoreidx1,
+ [2]Op{OpAMD64MOVSSstore, OpAMD64ADDQ}: OpAMD64MOVSSstoreidx1,
+ [2]Op{OpAMD64MOVSDstore, OpAMD64ADDQ}: OpAMD64MOVSDstoreidx1,
+
+ [2]Op{OpAMD64MOVBstoreconst, OpAMD64ADDQ}: OpAMD64MOVBstoreconstidx1,
+ [2]Op{OpAMD64MOVWstoreconst, OpAMD64ADDQ}: OpAMD64MOVWstoreconstidx1,
+ [2]Op{OpAMD64MOVLstoreconst, OpAMD64ADDQ}: OpAMD64MOVLstoreconstidx1,
+ [2]Op{OpAMD64MOVQstoreconst, OpAMD64ADDQ}: OpAMD64MOVQstoreconstidx1,
+
+ [2]Op{OpAMD64MOVBload, OpAMD64LEAQ1}: OpAMD64MOVBloadidx1,
+ [2]Op{OpAMD64MOVWload, OpAMD64LEAQ1}: OpAMD64MOVWloadidx1,
+ [2]Op{OpAMD64MOVWload, OpAMD64LEAQ2}: OpAMD64MOVWloadidx2,
+ [2]Op{OpAMD64MOVLload, OpAMD64LEAQ1}: OpAMD64MOVLloadidx1,
+ [2]Op{OpAMD64MOVLload, OpAMD64LEAQ4}: OpAMD64MOVLloadidx4,
+ [2]Op{OpAMD64MOVLload, OpAMD64LEAQ8}: OpAMD64MOVLloadidx8,
+ [2]Op{OpAMD64MOVQload, OpAMD64LEAQ1}: OpAMD64MOVQloadidx1,
+ [2]Op{OpAMD64MOVQload, OpAMD64LEAQ8}: OpAMD64MOVQloadidx8,
+ [2]Op{OpAMD64MOVSSload, OpAMD64LEAQ1}: OpAMD64MOVSSloadidx1,
+ [2]Op{OpAMD64MOVSSload, OpAMD64LEAQ4}: OpAMD64MOVSSloadidx4,
+ [2]Op{OpAMD64MOVSDload, OpAMD64LEAQ1}: OpAMD64MOVSDloadidx1,
+ [2]Op{OpAMD64MOVSDload, OpAMD64LEAQ8}: OpAMD64MOVSDloadidx8,
+
+ [2]Op{OpAMD64MOVBstore, OpAMD64LEAQ1}: OpAMD64MOVBstoreidx1,
+ [2]Op{OpAMD64MOVWstore, OpAMD64LEAQ1}: OpAMD64MOVWstoreidx1,
+ [2]Op{OpAMD64MOVWstore, OpAMD64LEAQ2}: OpAMD64MOVWstoreidx2,
+ [2]Op{OpAMD64MOVLstore, OpAMD64LEAQ1}: OpAMD64MOVLstoreidx1,
+ [2]Op{OpAMD64MOVLstore, OpAMD64LEAQ4}: OpAMD64MOVLstoreidx4,
+ [2]Op{OpAMD64MOVLstore, OpAMD64LEAQ8}: OpAMD64MOVLstoreidx8,
+ [2]Op{OpAMD64MOVQstore, OpAMD64LEAQ1}: OpAMD64MOVQstoreidx1,
+ [2]Op{OpAMD64MOVQstore, OpAMD64LEAQ8}: OpAMD64MOVQstoreidx8,
+ [2]Op{OpAMD64MOVSSstore, OpAMD64LEAQ1}: OpAMD64MOVSSstoreidx1,
+ [2]Op{OpAMD64MOVSSstore, OpAMD64LEAQ4}: OpAMD64MOVSSstoreidx4,
+ [2]Op{OpAMD64MOVSDstore, OpAMD64LEAQ1}: OpAMD64MOVSDstoreidx1,
+ [2]Op{OpAMD64MOVSDstore, OpAMD64LEAQ8}: OpAMD64MOVSDstoreidx8,
+
+ [2]Op{OpAMD64MOVBstoreconst, OpAMD64LEAQ1}: OpAMD64MOVBstoreconstidx1,
+ [2]Op{OpAMD64MOVWstoreconst, OpAMD64LEAQ1}: OpAMD64MOVWstoreconstidx1,
+ [2]Op{OpAMD64MOVWstoreconst, OpAMD64LEAQ2}: OpAMD64MOVWstoreconstidx2,
+ [2]Op{OpAMD64MOVLstoreconst, OpAMD64LEAQ1}: OpAMD64MOVLstoreconstidx1,
+ [2]Op{OpAMD64MOVLstoreconst, OpAMD64LEAQ4}: OpAMD64MOVLstoreconstidx4,
+ [2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ1}: OpAMD64MOVQstoreconstidx1,
+ [2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ8}: OpAMD64MOVQstoreconstidx8,
+
+ // These instructions are re-split differently for performance, see needSplit above.
+ // TODO if 386 versions are created, also update needSplit and gen/386splitload.rules
+ [2]Op{OpAMD64CMPBload, OpAMD64ADDQ}: OpAMD64CMPBloadidx1,
+ [2]Op{OpAMD64CMPWload, OpAMD64ADDQ}: OpAMD64CMPWloadidx1,
+ [2]Op{OpAMD64CMPLload, OpAMD64ADDQ}: OpAMD64CMPLloadidx1,
+ [2]Op{OpAMD64CMPQload, OpAMD64ADDQ}: OpAMD64CMPQloadidx1,
+
+ [2]Op{OpAMD64CMPBload, OpAMD64LEAQ1}: OpAMD64CMPBloadidx1,
+ [2]Op{OpAMD64CMPWload, OpAMD64LEAQ1}: OpAMD64CMPWloadidx1,
+ [2]Op{OpAMD64CMPWload, OpAMD64LEAQ2}: OpAMD64CMPWloadidx2,
+ [2]Op{OpAMD64CMPLload, OpAMD64LEAQ1}: OpAMD64CMPLloadidx1,
+ [2]Op{OpAMD64CMPLload, OpAMD64LEAQ4}: OpAMD64CMPLloadidx4,
+ [2]Op{OpAMD64CMPQload, OpAMD64LEAQ1}: OpAMD64CMPQloadidx1,
+ [2]Op{OpAMD64CMPQload, OpAMD64LEAQ8}: OpAMD64CMPQloadidx8,
+
+ [2]Op{OpAMD64CMPBconstload, OpAMD64ADDQ}: OpAMD64CMPBconstloadidx1,
+ [2]Op{OpAMD64CMPWconstload, OpAMD64ADDQ}: OpAMD64CMPWconstloadidx1,
+ [2]Op{OpAMD64CMPLconstload, OpAMD64ADDQ}: OpAMD64CMPLconstloadidx1,
+ [2]Op{OpAMD64CMPQconstload, OpAMD64ADDQ}: OpAMD64CMPQconstloadidx1,
+
+ [2]Op{OpAMD64CMPBconstload, OpAMD64LEAQ1}: OpAMD64CMPBconstloadidx1,
+ [2]Op{OpAMD64CMPWconstload, OpAMD64LEAQ1}: OpAMD64CMPWconstloadidx1,
+ [2]Op{OpAMD64CMPWconstload, OpAMD64LEAQ2}: OpAMD64CMPWconstloadidx2,
+ [2]Op{OpAMD64CMPLconstload, OpAMD64LEAQ1}: OpAMD64CMPLconstloadidx1,
+ [2]Op{OpAMD64CMPLconstload, OpAMD64LEAQ4}: OpAMD64CMPLconstloadidx4,
+ [2]Op{OpAMD64CMPQconstload, OpAMD64LEAQ1}: OpAMD64CMPQconstloadidx1,
+ [2]Op{OpAMD64CMPQconstload, OpAMD64LEAQ8}: OpAMD64CMPQconstloadidx8,
+
+ [2]Op{OpAMD64ADDLload, OpAMD64ADDQ}: OpAMD64ADDLloadidx1,
+ [2]Op{OpAMD64ADDQload, OpAMD64ADDQ}: OpAMD64ADDQloadidx1,
+ [2]Op{OpAMD64SUBLload, OpAMD64ADDQ}: OpAMD64SUBLloadidx1,
+ [2]Op{OpAMD64SUBQload, OpAMD64ADDQ}: OpAMD64SUBQloadidx1,
+ [2]Op{OpAMD64ANDLload, OpAMD64ADDQ}: OpAMD64ANDLloadidx1,
+ [2]Op{OpAMD64ANDQload, OpAMD64ADDQ}: OpAMD64ANDQloadidx1,
+ [2]Op{OpAMD64ORLload, OpAMD64ADDQ}: OpAMD64ORLloadidx1,
+ [2]Op{OpAMD64ORQload, OpAMD64ADDQ}: OpAMD64ORQloadidx1,
+ [2]Op{OpAMD64XORLload, OpAMD64ADDQ}: OpAMD64XORLloadidx1,
+ [2]Op{OpAMD64XORQload, OpAMD64ADDQ}: OpAMD64XORQloadidx1,
+
+ [2]Op{OpAMD64ADDLload, OpAMD64LEAQ1}: OpAMD64ADDLloadidx1,
+ [2]Op{OpAMD64ADDLload, OpAMD64LEAQ4}: OpAMD64ADDLloadidx4,
+ [2]Op{OpAMD64ADDLload, OpAMD64LEAQ8}: OpAMD64ADDLloadidx8,
+ [2]Op{OpAMD64ADDQload, OpAMD64LEAQ1}: OpAMD64ADDQloadidx1,
+ [2]Op{OpAMD64ADDQload, OpAMD64LEAQ8}: OpAMD64ADDQloadidx8,
+ [2]Op{OpAMD64SUBLload, OpAMD64LEAQ1}: OpAMD64SUBLloadidx1,
+ [2]Op{OpAMD64SUBLload, OpAMD64LEAQ4}: OpAMD64SUBLloadidx4,
+ [2]Op{OpAMD64SUBLload, OpAMD64LEAQ8}: OpAMD64SUBLloadidx8,
+ [2]Op{OpAMD64SUBQload, OpAMD64LEAQ1}: OpAMD64SUBQloadidx1,
+ [2]Op{OpAMD64SUBQload, OpAMD64LEAQ8}: OpAMD64SUBQloadidx8,
+ [2]Op{OpAMD64ANDLload, OpAMD64LEAQ1}: OpAMD64ANDLloadidx1,
+ [2]Op{OpAMD64ANDLload, OpAMD64LEAQ4}: OpAMD64ANDLloadidx4,
+ [2]Op{OpAMD64ANDLload, OpAMD64LEAQ8}: OpAMD64ANDLloadidx8,
+ [2]Op{OpAMD64ANDQload, OpAMD64LEAQ1}: OpAMD64ANDQloadidx1,
+ [2]Op{OpAMD64ANDQload, OpAMD64LEAQ8}: OpAMD64ANDQloadidx8,
+ [2]Op{OpAMD64ORLload, OpAMD64LEAQ1}: OpAMD64ORLloadidx1,
+ [2]Op{OpAMD64ORLload, OpAMD64LEAQ4}: OpAMD64ORLloadidx4,
+ [2]Op{OpAMD64ORLload, OpAMD64LEAQ8}: OpAMD64ORLloadidx8,
+ [2]Op{OpAMD64ORQload, OpAMD64LEAQ1}: OpAMD64ORQloadidx1,
+ [2]Op{OpAMD64ORQload, OpAMD64LEAQ8}: OpAMD64ORQloadidx8,
+ [2]Op{OpAMD64XORLload, OpAMD64LEAQ1}: OpAMD64XORLloadidx1,
+ [2]Op{OpAMD64XORLload, OpAMD64LEAQ4}: OpAMD64XORLloadidx4,
+ [2]Op{OpAMD64XORLload, OpAMD64LEAQ8}: OpAMD64XORLloadidx8,
+ [2]Op{OpAMD64XORQload, OpAMD64LEAQ1}: OpAMD64XORQloadidx1,
+ [2]Op{OpAMD64XORQload, OpAMD64LEAQ8}: OpAMD64XORQloadidx8,
+
+ [2]Op{OpAMD64ADDLmodify, OpAMD64ADDQ}: OpAMD64ADDLmodifyidx1,
+ [2]Op{OpAMD64ADDQmodify, OpAMD64ADDQ}: OpAMD64ADDQmodifyidx1,
+ [2]Op{OpAMD64SUBLmodify, OpAMD64ADDQ}: OpAMD64SUBLmodifyidx1,
+ [2]Op{OpAMD64SUBQmodify, OpAMD64ADDQ}: OpAMD64SUBQmodifyidx1,
+ [2]Op{OpAMD64ANDLmodify, OpAMD64ADDQ}: OpAMD64ANDLmodifyidx1,
+ [2]Op{OpAMD64ANDQmodify, OpAMD64ADDQ}: OpAMD64ANDQmodifyidx1,
+ [2]Op{OpAMD64ORLmodify, OpAMD64ADDQ}: OpAMD64ORLmodifyidx1,
+ [2]Op{OpAMD64ORQmodify, OpAMD64ADDQ}: OpAMD64ORQmodifyidx1,
+ [2]Op{OpAMD64XORLmodify, OpAMD64ADDQ}: OpAMD64XORLmodifyidx1,
+ [2]Op{OpAMD64XORQmodify, OpAMD64ADDQ}: OpAMD64XORQmodifyidx1,
+
+ [2]Op{OpAMD64ADDLmodify, OpAMD64LEAQ1}: OpAMD64ADDLmodifyidx1,
+ [2]Op{OpAMD64ADDLmodify, OpAMD64LEAQ4}: OpAMD64ADDLmodifyidx4,
+ [2]Op{OpAMD64ADDLmodify, OpAMD64LEAQ8}: OpAMD64ADDLmodifyidx8,
+ [2]Op{OpAMD64ADDQmodify, OpAMD64LEAQ1}: OpAMD64ADDQmodifyidx1,
+ [2]Op{OpAMD64ADDQmodify, OpAMD64LEAQ8}: OpAMD64ADDQmodifyidx8,
+ [2]Op{OpAMD64SUBLmodify, OpAMD64LEAQ1}: OpAMD64SUBLmodifyidx1,
+ [2]Op{OpAMD64SUBLmodify, OpAMD64LEAQ4}: OpAMD64SUBLmodifyidx4,
+ [2]Op{OpAMD64SUBLmodify, OpAMD64LEAQ8}: OpAMD64SUBLmodifyidx8,
+ [2]Op{OpAMD64SUBQmodify, OpAMD64LEAQ1}: OpAMD64SUBQmodifyidx1,
+ [2]Op{OpAMD64SUBQmodify, OpAMD64LEAQ8}: OpAMD64SUBQmodifyidx8,
+ [2]Op{OpAMD64ANDLmodify, OpAMD64LEAQ1}: OpAMD64ANDLmodifyidx1,
+ [2]Op{OpAMD64ANDLmodify, OpAMD64LEAQ4}: OpAMD64ANDLmodifyidx4,
+ [2]Op{OpAMD64ANDLmodify, OpAMD64LEAQ8}: OpAMD64ANDLmodifyidx8,
+ [2]Op{OpAMD64ANDQmodify, OpAMD64LEAQ1}: OpAMD64ANDQmodifyidx1,
+ [2]Op{OpAMD64ANDQmodify, OpAMD64LEAQ8}: OpAMD64ANDQmodifyidx8,
+ [2]Op{OpAMD64ORLmodify, OpAMD64LEAQ1}: OpAMD64ORLmodifyidx1,
+ [2]Op{OpAMD64ORLmodify, OpAMD64LEAQ4}: OpAMD64ORLmodifyidx4,
+ [2]Op{OpAMD64ORLmodify, OpAMD64LEAQ8}: OpAMD64ORLmodifyidx8,
+ [2]Op{OpAMD64ORQmodify, OpAMD64LEAQ1}: OpAMD64ORQmodifyidx1,
+ [2]Op{OpAMD64ORQmodify, OpAMD64LEAQ8}: OpAMD64ORQmodifyidx8,
+ [2]Op{OpAMD64XORLmodify, OpAMD64LEAQ1}: OpAMD64XORLmodifyidx1,
+ [2]Op{OpAMD64XORLmodify, OpAMD64LEAQ4}: OpAMD64XORLmodifyidx4,
+ [2]Op{OpAMD64XORLmodify, OpAMD64LEAQ8}: OpAMD64XORLmodifyidx8,
+ [2]Op{OpAMD64XORQmodify, OpAMD64LEAQ1}: OpAMD64XORQmodifyidx1,
+ [2]Op{OpAMD64XORQmodify, OpAMD64LEAQ8}: OpAMD64XORQmodifyidx8,
+
+ [2]Op{OpAMD64ADDLconstmodify, OpAMD64ADDQ}: OpAMD64ADDLconstmodifyidx1,
+ [2]Op{OpAMD64ADDQconstmodify, OpAMD64ADDQ}: OpAMD64ADDQconstmodifyidx1,
+ [2]Op{OpAMD64ANDLconstmodify, OpAMD64ADDQ}: OpAMD64ANDLconstmodifyidx1,
+ [2]Op{OpAMD64ANDQconstmodify, OpAMD64ADDQ}: OpAMD64ANDQconstmodifyidx1,
+ [2]Op{OpAMD64ORLconstmodify, OpAMD64ADDQ}: OpAMD64ORLconstmodifyidx1,
+ [2]Op{OpAMD64ORQconstmodify, OpAMD64ADDQ}: OpAMD64ORQconstmodifyidx1,
+ [2]Op{OpAMD64XORLconstmodify, OpAMD64ADDQ}: OpAMD64XORLconstmodifyidx1,
+ [2]Op{OpAMD64XORQconstmodify, OpAMD64ADDQ}: OpAMD64XORQconstmodifyidx1,
+
+ [2]Op{OpAMD64ADDLconstmodify, OpAMD64LEAQ1}: OpAMD64ADDLconstmodifyidx1,
+ [2]Op{OpAMD64ADDLconstmodify, OpAMD64LEAQ4}: OpAMD64ADDLconstmodifyidx4,
+ [2]Op{OpAMD64ADDLconstmodify, OpAMD64LEAQ8}: OpAMD64ADDLconstmodifyidx8,
+ [2]Op{OpAMD64ADDQconstmodify, OpAMD64LEAQ1}: OpAMD64ADDQconstmodifyidx1,
+ [2]Op{OpAMD64ADDQconstmodify, OpAMD64LEAQ8}: OpAMD64ADDQconstmodifyidx8,
+ [2]Op{OpAMD64ANDLconstmodify, OpAMD64LEAQ1}: OpAMD64ANDLconstmodifyidx1,
+ [2]Op{OpAMD64ANDLconstmodify, OpAMD64LEAQ4}: OpAMD64ANDLconstmodifyidx4,
+ [2]Op{OpAMD64ANDLconstmodify, OpAMD64LEAQ8}: OpAMD64ANDLconstmodifyidx8,
+ [2]Op{OpAMD64ANDQconstmodify, OpAMD64LEAQ1}: OpAMD64ANDQconstmodifyidx1,
+ [2]Op{OpAMD64ANDQconstmodify, OpAMD64LEAQ8}: OpAMD64ANDQconstmodifyidx8,
+ [2]Op{OpAMD64ORLconstmodify, OpAMD64LEAQ1}: OpAMD64ORLconstmodifyidx1,
+ [2]Op{OpAMD64ORLconstmodify, OpAMD64LEAQ4}: OpAMD64ORLconstmodifyidx4,
+ [2]Op{OpAMD64ORLconstmodify, OpAMD64LEAQ8}: OpAMD64ORLconstmodifyidx8,
+ [2]Op{OpAMD64ORQconstmodify, OpAMD64LEAQ1}: OpAMD64ORQconstmodifyidx1,
+ [2]Op{OpAMD64ORQconstmodify, OpAMD64LEAQ8}: OpAMD64ORQconstmodifyidx8,
+ [2]Op{OpAMD64XORLconstmodify, OpAMD64LEAQ1}: OpAMD64XORLconstmodifyidx1,
+ [2]Op{OpAMD64XORLconstmodify, OpAMD64LEAQ4}: OpAMD64XORLconstmodifyidx4,
+ [2]Op{OpAMD64XORLconstmodify, OpAMD64LEAQ8}: OpAMD64XORLconstmodifyidx8,
+ [2]Op{OpAMD64XORQconstmodify, OpAMD64LEAQ1}: OpAMD64XORQconstmodifyidx1,
+ [2]Op{OpAMD64XORQconstmodify, OpAMD64LEAQ8}: OpAMD64XORQconstmodifyidx8,
+
+ [2]Op{OpAMD64ADDSSload, OpAMD64LEAQ1}: OpAMD64ADDSSloadidx1,
+ [2]Op{OpAMD64ADDSSload, OpAMD64LEAQ4}: OpAMD64ADDSSloadidx4,
+ [2]Op{OpAMD64ADDSDload, OpAMD64LEAQ1}: OpAMD64ADDSDloadidx1,
+ [2]Op{OpAMD64ADDSDload, OpAMD64LEAQ8}: OpAMD64ADDSDloadidx8,
+ [2]Op{OpAMD64SUBSSload, OpAMD64LEAQ1}: OpAMD64SUBSSloadidx1,
+ [2]Op{OpAMD64SUBSSload, OpAMD64LEAQ4}: OpAMD64SUBSSloadidx4,
+ [2]Op{OpAMD64SUBSDload, OpAMD64LEAQ1}: OpAMD64SUBSDloadidx1,
+ [2]Op{OpAMD64SUBSDload, OpAMD64LEAQ8}: OpAMD64SUBSDloadidx8,
+ [2]Op{OpAMD64MULSSload, OpAMD64LEAQ1}: OpAMD64MULSSloadidx1,
+ [2]Op{OpAMD64MULSSload, OpAMD64LEAQ4}: OpAMD64MULSSloadidx4,
+ [2]Op{OpAMD64MULSDload, OpAMD64LEAQ1}: OpAMD64MULSDloadidx1,
+ [2]Op{OpAMD64MULSDload, OpAMD64LEAQ8}: OpAMD64MULSDloadidx8,
+ [2]Op{OpAMD64DIVSSload, OpAMD64LEAQ1}: OpAMD64DIVSSloadidx1,
+ [2]Op{OpAMD64DIVSSload, OpAMD64LEAQ4}: OpAMD64DIVSSloadidx4,
+ [2]Op{OpAMD64DIVSDload, OpAMD64LEAQ1}: OpAMD64DIVSDloadidx1,
+ [2]Op{OpAMD64DIVSDload, OpAMD64LEAQ8}: OpAMD64DIVSDloadidx8,
+
+ // 386
+ [2]Op{Op386MOVBload, Op386ADDL}: Op386MOVBloadidx1,
+ [2]Op{Op386MOVWload, Op386ADDL}: Op386MOVWloadidx1,
+ [2]Op{Op386MOVLload, Op386ADDL}: Op386MOVLloadidx1,
+ [2]Op{Op386MOVSSload, Op386ADDL}: Op386MOVSSloadidx1,
+ [2]Op{Op386MOVSDload, Op386ADDL}: Op386MOVSDloadidx1,
+
+ [2]Op{Op386MOVBstore, Op386ADDL}: Op386MOVBstoreidx1,
+ [2]Op{Op386MOVWstore, Op386ADDL}: Op386MOVWstoreidx1,
+ [2]Op{Op386MOVLstore, Op386ADDL}: Op386MOVLstoreidx1,
+ [2]Op{Op386MOVSSstore, Op386ADDL}: Op386MOVSSstoreidx1,
+ [2]Op{Op386MOVSDstore, Op386ADDL}: Op386MOVSDstoreidx1,
+
+ [2]Op{Op386MOVBstoreconst, Op386ADDL}: Op386MOVBstoreconstidx1,
+ [2]Op{Op386MOVWstoreconst, Op386ADDL}: Op386MOVWstoreconstidx1,
+ [2]Op{Op386MOVLstoreconst, Op386ADDL}: Op386MOVLstoreconstidx1,
+
+ [2]Op{Op386MOVBload, Op386LEAL1}: Op386MOVBloadidx1,
+ [2]Op{Op386MOVWload, Op386LEAL1}: Op386MOVWloadidx1,
+ [2]Op{Op386MOVWload, Op386LEAL2}: Op386MOVWloadidx2,
+ [2]Op{Op386MOVLload, Op386LEAL1}: Op386MOVLloadidx1,
+ [2]Op{Op386MOVLload, Op386LEAL4}: Op386MOVLloadidx4,
+ [2]Op{Op386MOVSSload, Op386LEAL1}: Op386MOVSSloadidx1,
+ [2]Op{Op386MOVSSload, Op386LEAL4}: Op386MOVSSloadidx4,
+ [2]Op{Op386MOVSDload, Op386LEAL1}: Op386MOVSDloadidx1,
+ [2]Op{Op386MOVSDload, Op386LEAL8}: Op386MOVSDloadidx8,
+
+ [2]Op{Op386MOVBstore, Op386LEAL1}: Op386MOVBstoreidx1,
+ [2]Op{Op386MOVWstore, Op386LEAL1}: Op386MOVWstoreidx1,
+ [2]Op{Op386MOVWstore, Op386LEAL2}: Op386MOVWstoreidx2,
+ [2]Op{Op386MOVLstore, Op386LEAL1}: Op386MOVLstoreidx1,
+ [2]Op{Op386MOVLstore, Op386LEAL4}: Op386MOVLstoreidx4,
+ [2]Op{Op386MOVSSstore, Op386LEAL1}: Op386MOVSSstoreidx1,
+ [2]Op{Op386MOVSSstore, Op386LEAL4}: Op386MOVSSstoreidx4,
+ [2]Op{Op386MOVSDstore, Op386LEAL1}: Op386MOVSDstoreidx1,
+ [2]Op{Op386MOVSDstore, Op386LEAL8}: Op386MOVSDstoreidx8,
+
+ [2]Op{Op386MOVBstoreconst, Op386LEAL1}: Op386MOVBstoreconstidx1,
+ [2]Op{Op386MOVWstoreconst, Op386LEAL1}: Op386MOVWstoreconstidx1,
+ [2]Op{Op386MOVWstoreconst, Op386LEAL2}: Op386MOVWstoreconstidx2,
+ [2]Op{Op386MOVLstoreconst, Op386LEAL1}: Op386MOVLstoreconstidx1,
+ [2]Op{Op386MOVLstoreconst, Op386LEAL4}: Op386MOVLstoreconstidx4,
+
+ [2]Op{Op386ADDLload, Op386LEAL4}: Op386ADDLloadidx4,
+ [2]Op{Op386SUBLload, Op386LEAL4}: Op386SUBLloadidx4,
+ [2]Op{Op386MULLload, Op386LEAL4}: Op386MULLloadidx4,
+ [2]Op{Op386ANDLload, Op386LEAL4}: Op386ANDLloadidx4,
+ [2]Op{Op386ORLload, Op386LEAL4}: Op386ORLloadidx4,
+ [2]Op{Op386XORLload, Op386LEAL4}: Op386XORLloadidx4,
+
+ [2]Op{Op386ADDLmodify, Op386LEAL4}: Op386ADDLmodifyidx4,
+ [2]Op{Op386SUBLmodify, Op386LEAL4}: Op386SUBLmodifyidx4,
+ [2]Op{Op386ANDLmodify, Op386LEAL4}: Op386ANDLmodifyidx4,
+ [2]Op{Op386ORLmodify, Op386LEAL4}: Op386ORLmodifyidx4,
+ [2]Op{Op386XORLmodify, Op386LEAL4}: Op386XORLmodifyidx4,
+
+ [2]Op{Op386ADDLconstmodify, Op386LEAL4}: Op386ADDLconstmodifyidx4,
+ [2]Op{Op386ANDLconstmodify, Op386LEAL4}: Op386ANDLconstmodifyidx4,
+ [2]Op{Op386ORLconstmodify, Op386LEAL4}: Op386ORLconstmodifyidx4,
+ [2]Op{Op386XORLconstmodify, Op386LEAL4}: Op386XORLconstmodifyidx4,
+
+ // s390x
+ [2]Op{OpS390XMOVDload, OpS390XADD}: OpS390XMOVDloadidx,
+ [2]Op{OpS390XMOVWload, OpS390XADD}: OpS390XMOVWloadidx,
+ [2]Op{OpS390XMOVHload, OpS390XADD}: OpS390XMOVHloadidx,
+ [2]Op{OpS390XMOVBload, OpS390XADD}: OpS390XMOVBloadidx,
+
+ [2]Op{OpS390XMOVWZload, OpS390XADD}: OpS390XMOVWZloadidx,
+ [2]Op{OpS390XMOVHZload, OpS390XADD}: OpS390XMOVHZloadidx,
+ [2]Op{OpS390XMOVBZload, OpS390XADD}: OpS390XMOVBZloadidx,
+
+ [2]Op{OpS390XMOVDBRload, OpS390XADD}: OpS390XMOVDBRloadidx,
+ [2]Op{OpS390XMOVWBRload, OpS390XADD}: OpS390XMOVWBRloadidx,
+ [2]Op{OpS390XMOVHBRload, OpS390XADD}: OpS390XMOVHBRloadidx,
+
+ [2]Op{OpS390XFMOVDload, OpS390XADD}: OpS390XFMOVDloadidx,
+ [2]Op{OpS390XFMOVSload, OpS390XADD}: OpS390XFMOVSloadidx,
+
+ [2]Op{OpS390XMOVDstore, OpS390XADD}: OpS390XMOVDstoreidx,
+ [2]Op{OpS390XMOVWstore, OpS390XADD}: OpS390XMOVWstoreidx,
+ [2]Op{OpS390XMOVHstore, OpS390XADD}: OpS390XMOVHstoreidx,
+ [2]Op{OpS390XMOVBstore, OpS390XADD}: OpS390XMOVBstoreidx,
+
+ [2]Op{OpS390XMOVDBRstore, OpS390XADD}: OpS390XMOVDBRstoreidx,
+ [2]Op{OpS390XMOVWBRstore, OpS390XADD}: OpS390XMOVWBRstoreidx,
+ [2]Op{OpS390XMOVHBRstore, OpS390XADD}: OpS390XMOVHBRstoreidx,
+
+ [2]Op{OpS390XFMOVDstore, OpS390XADD}: OpS390XFMOVDstoreidx,
+ [2]Op{OpS390XFMOVSstore, OpS390XADD}: OpS390XFMOVSstoreidx,
+
+ [2]Op{OpS390XMOVDload, OpS390XMOVDaddridx}: OpS390XMOVDloadidx,
+ [2]Op{OpS390XMOVWload, OpS390XMOVDaddridx}: OpS390XMOVWloadidx,
+ [2]Op{OpS390XMOVHload, OpS390XMOVDaddridx}: OpS390XMOVHloadidx,
+ [2]Op{OpS390XMOVBload, OpS390XMOVDaddridx}: OpS390XMOVBloadidx,
+
+ [2]Op{OpS390XMOVWZload, OpS390XMOVDaddridx}: OpS390XMOVWZloadidx,
+ [2]Op{OpS390XMOVHZload, OpS390XMOVDaddridx}: OpS390XMOVHZloadidx,
+ [2]Op{OpS390XMOVBZload, OpS390XMOVDaddridx}: OpS390XMOVBZloadidx,
+
+ [2]Op{OpS390XMOVDBRload, OpS390XMOVDaddridx}: OpS390XMOVDBRloadidx,
+ [2]Op{OpS390XMOVWBRload, OpS390XMOVDaddridx}: OpS390XMOVWBRloadidx,
+ [2]Op{OpS390XMOVHBRload, OpS390XMOVDaddridx}: OpS390XMOVHBRloadidx,
+
+ [2]Op{OpS390XFMOVDload, OpS390XMOVDaddridx}: OpS390XFMOVDloadidx,
+ [2]Op{OpS390XFMOVSload, OpS390XMOVDaddridx}: OpS390XFMOVSloadidx,
+
+ [2]Op{OpS390XMOVDstore, OpS390XMOVDaddridx}: OpS390XMOVDstoreidx,
+ [2]Op{OpS390XMOVWstore, OpS390XMOVDaddridx}: OpS390XMOVWstoreidx,
+ [2]Op{OpS390XMOVHstore, OpS390XMOVDaddridx}: OpS390XMOVHstoreidx,
+ [2]Op{OpS390XMOVBstore, OpS390XMOVDaddridx}: OpS390XMOVBstoreidx,
+
+ [2]Op{OpS390XMOVDBRstore, OpS390XMOVDaddridx}: OpS390XMOVDBRstoreidx,
+ [2]Op{OpS390XMOVWBRstore, OpS390XMOVDaddridx}: OpS390XMOVWBRstoreidx,
+ [2]Op{OpS390XMOVHBRstore, OpS390XMOVDaddridx}: OpS390XMOVHBRstoreidx,
+
+ [2]Op{OpS390XFMOVDstore, OpS390XMOVDaddridx}: OpS390XFMOVDstoreidx,
+ [2]Op{OpS390XFMOVSstore, OpS390XMOVDaddridx}: OpS390XFMOVSstoreidx,
+}
diff --git a/src/cmd/compile/internal/ssa/biasedsparsemap.go b/src/cmd/compile/internal/ssa/biasedsparsemap.go
new file mode 100644
index 0000000..0d35154
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/biasedsparsemap.go
@@ -0,0 +1,112 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "math"
+)
+
+// A biasedSparseMap is a sparseMap for integers between J and K inclusive,
+// where J might be somewhat larger than zero (and K-J is probably much smaller than J).
+// (The motivating use case is the line numbers of statements for a single function.)
+// Not all features of a SparseMap are exported, and it is also easy to treat a
+// biasedSparseMap like a SparseSet.
+type biasedSparseMap struct {
+ s *sparseMap
+ first int
+}
+
+// newBiasedSparseMap returns a new biasedSparseMap for values between first and last, inclusive.
+func newBiasedSparseMap(first, last int) *biasedSparseMap {
+ if first > last {
+ return &biasedSparseMap{first: math.MaxInt32, s: nil}
+ }
+ return &biasedSparseMap{first: first, s: newSparseMap(1 + last - first)}
+}
+
+// cap returns one more than the largest key valid for s
+func (s *biasedSparseMap) cap() int {
+ if s == nil || s.s == nil {
+ return 0
+ }
+ return s.s.cap() + int(s.first)
+}
+
+// size returns the number of entries stored in s
+func (s *biasedSparseMap) size() int {
+ if s == nil || s.s == nil {
+ return 0
+ }
+ return s.s.size()
+}
+
+// contains reports whether x is a key in s
+func (s *biasedSparseMap) contains(x uint) bool {
+ if s == nil || s.s == nil {
+ return false
+ }
+ if int(x) < s.first {
+ return false
+ }
+ if int(x) >= s.cap() {
+ return false
+ }
+ return s.s.contains(ID(int(x) - s.first))
+}
+
+// get returns the value s maps for key x, or -1 if
+// x is not mapped or is out of range for s.
+func (s *biasedSparseMap) get(x uint) int32 {
+ if s == nil || s.s == nil {
+ return -1
+ }
+ if int(x) < s.first {
+ return -1
+ }
+ if int(x) >= s.cap() {
+ return -1
+ }
+ return s.s.get(ID(int(x) - s.first))
+}
+
+// getEntry returns the i'th key and value stored in s,
+// where 0 <= i < s.size()
+func (s *biasedSparseMap) getEntry(i int) (x uint, v int32) {
+ e := s.s.contents()[i]
+ x = uint(int(e.key) + s.first)
+ v = e.val
+ return
+}
+
+// add inserts x->0 into s, provided that x is in the range of keys stored in s.
+func (s *biasedSparseMap) add(x uint) {
+ if int(x) < s.first || int(x) >= s.cap() {
+ return
+ }
+ s.s.set(ID(int(x)-s.first), 0, src.NoXPos)
+}
+
+// add inserts x->v into s, provided that x is in the range of keys stored in s.
+func (s *biasedSparseMap) set(x uint, v int32) {
+ if int(x) < s.first || int(x) >= s.cap() {
+ return
+ }
+ s.s.set(ID(int(x)-s.first), v, src.NoXPos)
+}
+
+// remove removes key x from s.
+func (s *biasedSparseMap) remove(x uint) {
+ if int(x) < s.first || int(x) >= s.cap() {
+ return
+ }
+ s.s.remove(ID(int(x) - s.first))
+}
+
+func (s *biasedSparseMap) clear() {
+ if s.s != nil {
+ s.s.clear()
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go
new file mode 100644
index 0000000..519ac21
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/block.go
@@ -0,0 +1,371 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "fmt"
+)
+
+// Block represents a basic block in the control flow graph of a function.
+type Block struct {
+ // A unique identifier for the block. The system will attempt to allocate
+ // these IDs densely, but no guarantees.
+ ID ID
+
+ // Source position for block's control operation
+ Pos src.XPos
+
+ // The kind of block this is.
+ Kind BlockKind
+
+ // Likely direction for branches.
+ // If BranchLikely, Succs[0] is the most likely branch taken.
+ // If BranchUnlikely, Succs[1] is the most likely branch taken.
+ // Ignored if len(Succs) < 2.
+ // Fatal if not BranchUnknown and len(Succs) > 2.
+ Likely BranchPrediction
+
+ // After flagalloc, records whether flags are live at the end of the block.
+ FlagsLiveAtEnd bool
+
+ // Subsequent blocks, if any. The number and order depend on the block kind.
+ Succs []Edge
+
+ // Inverse of successors.
+ // The order is significant to Phi nodes in the block.
+ // TODO: predecessors is a pain to maintain. Can we somehow order phi
+ // arguments by block id and have this field computed explicitly when needed?
+ Preds []Edge
+
+ // A list of values that determine how the block is exited. The number
+ // and type of control values depends on the Kind of the block. For
+ // instance, a BlockIf has a single boolean control value and BlockExit
+ // has a single memory control value.
+ //
+ // The ControlValues() method may be used to get a slice with the non-nil
+ // control values that can be ranged over.
+ //
+ // Controls[1] must be nil if Controls[0] is nil.
+ Controls [2]*Value
+
+ // Auxiliary info for the block. Its value depends on the Kind.
+ Aux interface{}
+ AuxInt int64
+
+ // The unordered set of Values that define the operation of this block.
+ // After the scheduling pass, this list is ordered.
+ Values []*Value
+
+ // The containing function
+ Func *Func
+
+ // Storage for Succs, Preds and Values.
+ succstorage [2]Edge
+ predstorage [4]Edge
+ valstorage [9]*Value
+}
+
+// Edge represents a CFG edge.
+// Example edges for b branching to either c or d.
+// (c and d have other predecessors.)
+// b.Succs = [{c,3}, {d,1}]
+// c.Preds = [?, ?, ?, {b,0}]
+// d.Preds = [?, {b,1}, ?]
+// These indexes allow us to edit the CFG in constant time.
+// In addition, it informs phi ops in degenerate cases like:
+// b:
+// if k then c else c
+// c:
+// v = Phi(x, y)
+// Then the indexes tell you whether x is chosen from
+// the if or else branch from b.
+// b.Succs = [{c,0},{c,1}]
+// c.Preds = [{b,0},{b,1}]
+// means x is chosen if k is true.
+type Edge struct {
+ // block edge goes to (in a Succs list) or from (in a Preds list)
+ b *Block
+ // index of reverse edge. Invariant:
+ // e := x.Succs[idx]
+ // e.b.Preds[e.i] = Edge{x,idx}
+ // and similarly for predecessors.
+ i int
+}
+
+func (e Edge) Block() *Block {
+ return e.b
+}
+func (e Edge) Index() int {
+ return e.i
+}
+func (e Edge) String() string {
+ return fmt.Sprintf("{%v,%d}", e.b, e.i)
+}
+
+// kind controls successors
+// ------------------------------------------
+// Exit [return mem] []
+// Plain [] [next]
+// If [boolean Value] [then, else]
+// Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc)
+type BlockKind int8
+
+// short form print
+func (b *Block) String() string {
+ return fmt.Sprintf("b%d", b.ID)
+}
+
+// long form print
+func (b *Block) LongString() string {
+ s := b.Kind.String()
+ if b.Aux != nil {
+ s += fmt.Sprintf(" {%s}", b.Aux)
+ }
+ if t := b.AuxIntString(); t != "" {
+ s += fmt.Sprintf(" [%s]", t)
+ }
+ for _, c := range b.ControlValues() {
+ s += fmt.Sprintf(" %s", c)
+ }
+ if len(b.Succs) > 0 {
+ s += " ->"
+ for _, c := range b.Succs {
+ s += " " + c.b.String()
+ }
+ }
+ switch b.Likely {
+ case BranchUnlikely:
+ s += " (unlikely)"
+ case BranchLikely:
+ s += " (likely)"
+ }
+ return s
+}
+
+// NumControls returns the number of non-nil control values the
+// block has.
+func (b *Block) NumControls() int {
+ if b.Controls[0] == nil {
+ return 0
+ }
+ if b.Controls[1] == nil {
+ return 1
+ }
+ return 2
+}
+
+// ControlValues returns a slice containing the non-nil control
+// values of the block. The index of each control value will be
+// the same as it is in the Controls property and can be used
+// in ReplaceControl calls.
+func (b *Block) ControlValues() []*Value {
+ if b.Controls[0] == nil {
+ return b.Controls[:0]
+ }
+ if b.Controls[1] == nil {
+ return b.Controls[:1]
+ }
+ return b.Controls[:2]
+}
+
+// SetControl removes all existing control values and then adds
+// the control value provided. The number of control values after
+// a call to SetControl will always be 1.
+func (b *Block) SetControl(v *Value) {
+ b.ResetControls()
+ b.Controls[0] = v
+ v.Uses++
+}
+
+// ResetControls sets the number of controls for the block to 0.
+func (b *Block) ResetControls() {
+ if b.Controls[0] != nil {
+ b.Controls[0].Uses--
+ }
+ if b.Controls[1] != nil {
+ b.Controls[1].Uses--
+ }
+ b.Controls = [2]*Value{} // reset both controls to nil
+}
+
+// AddControl appends a control value to the existing list of control values.
+func (b *Block) AddControl(v *Value) {
+ i := b.NumControls()
+ b.Controls[i] = v // panics if array is full
+ v.Uses++
+}
+
+// ReplaceControl exchanges the existing control value at the index provided
+// for the new value. The index must refer to a valid control value.
+func (b *Block) ReplaceControl(i int, v *Value) {
+ b.Controls[i].Uses--
+ b.Controls[i] = v
+ v.Uses++
+}
+
+// CopyControls replaces the controls for this block with those from the
+// provided block. The provided block is not modified.
+func (b *Block) CopyControls(from *Block) {
+ if b == from {
+ return
+ }
+ b.ResetControls()
+ for _, c := range from.ControlValues() {
+ b.AddControl(c)
+ }
+}
+
+// Reset sets the block to the provided kind and clears all the blocks control
+// and auxiliary values. Other properties of the block, such as its successors,
+// predecessors and values are left unmodified.
+func (b *Block) Reset(kind BlockKind) {
+ b.Kind = kind
+ b.ResetControls()
+ b.Aux = nil
+ b.AuxInt = 0
+}
+
+// resetWithControl resets b and adds control v.
+// It is equivalent to b.Reset(kind); b.AddControl(v),
+// except that it is one call instead of two and avoids a bounds check.
+// It is intended for use by rewrite rules, where this matters.
+func (b *Block) resetWithControl(kind BlockKind, v *Value) {
+ b.Kind = kind
+ b.ResetControls()
+ b.Aux = nil
+ b.AuxInt = 0
+ b.Controls[0] = v
+ v.Uses++
+}
+
+// resetWithControl2 resets b and adds controls v and w.
+// It is equivalent to b.Reset(kind); b.AddControl(v); b.AddControl(w),
+// except that it is one call instead of three and avoids two bounds checks.
+// It is intended for use by rewrite rules, where this matters.
+func (b *Block) resetWithControl2(kind BlockKind, v, w *Value) {
+ b.Kind = kind
+ b.ResetControls()
+ b.Aux = nil
+ b.AuxInt = 0
+ b.Controls[0] = v
+ b.Controls[1] = w
+ v.Uses++
+ w.Uses++
+}
+
+// truncateValues truncates b.Values at the ith element, zeroing subsequent elements.
+// The values in b.Values after i must already have had their args reset,
+// to maintain correct value uses counts.
+func (b *Block) truncateValues(i int) {
+ tail := b.Values[i:]
+ for j := range tail {
+ tail[j] = nil
+ }
+ b.Values = b.Values[:i]
+}
+
+// AddEdgeTo adds an edge from block b to block c. Used during building of the
+// SSA graph; do not use on an already-completed SSA graph.
+func (b *Block) AddEdgeTo(c *Block) {
+ i := len(b.Succs)
+ j := len(c.Preds)
+ b.Succs = append(b.Succs, Edge{c, j})
+ c.Preds = append(c.Preds, Edge{b, i})
+ b.Func.invalidateCFG()
+}
+
+// removePred removes the ith input edge from b.
+// It is the responsibility of the caller to remove
+// the corresponding successor edge.
+func (b *Block) removePred(i int) {
+ n := len(b.Preds) - 1
+ if i != n {
+ e := b.Preds[n]
+ b.Preds[i] = e
+ // Update the other end of the edge we moved.
+ e.b.Succs[e.i].i = i
+ }
+ b.Preds[n] = Edge{}
+ b.Preds = b.Preds[:n]
+ b.Func.invalidateCFG()
+}
+
+// removeSucc removes the ith output edge from b.
+// It is the responsibility of the caller to remove
+// the corresponding predecessor edge.
+func (b *Block) removeSucc(i int) {
+ n := len(b.Succs) - 1
+ if i != n {
+ e := b.Succs[n]
+ b.Succs[i] = e
+ // Update the other end of the edge we moved.
+ e.b.Preds[e.i].i = i
+ }
+ b.Succs[n] = Edge{}
+ b.Succs = b.Succs[:n]
+ b.Func.invalidateCFG()
+}
+
+func (b *Block) swapSuccessors() {
+ if len(b.Succs) != 2 {
+ b.Fatalf("swapSuccessors with len(Succs)=%d", len(b.Succs))
+ }
+ e0 := b.Succs[0]
+ e1 := b.Succs[1]
+ b.Succs[0] = e1
+ b.Succs[1] = e0
+ e0.b.Preds[e0.i].i = 1
+ e1.b.Preds[e1.i].i = 0
+ b.Likely *= -1
+}
+
+// LackingPos indicates whether b is a block whose position should be inherited
+// from its successors. This is true if all the values within it have unreliable positions
+// and if it is "plain", meaning that there is no control flow that is also very likely
+// to correspond to a well-understood source position.
+func (b *Block) LackingPos() bool {
+ // Non-plain predecessors are If or Defer, which both (1) have two successors,
+ // which might have different line numbers and (2) correspond to statements
+ // in the source code that have positions, so this case ought not occur anyway.
+ if b.Kind != BlockPlain {
+ return false
+ }
+ if b.Pos != src.NoXPos {
+ return false
+ }
+ for _, v := range b.Values {
+ if v.LackingPos() {
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+func (b *Block) AuxIntString() string {
+ switch b.Kind.AuxIntType() {
+ case "int8":
+ return fmt.Sprintf("%v", int8(b.AuxInt))
+ case "uint8":
+ return fmt.Sprintf("%v", uint8(b.AuxInt))
+ default: // type specified but not implemented - print as int64
+ return fmt.Sprintf("%v", b.AuxInt)
+ case "": // no aux int type
+ return ""
+ }
+}
+
+func (b *Block) Logf(msg string, args ...interface{}) { b.Func.Logf(msg, args...) }
+func (b *Block) Log() bool { return b.Func.Log() }
+func (b *Block) Fatalf(msg string, args ...interface{}) { b.Func.Fatalf(msg, args...) }
+
+type BranchPrediction int8
+
+const (
+ BranchUnlikely = BranchPrediction(-1)
+ BranchUnknown = BranchPrediction(0)
+ BranchLikely = BranchPrediction(+1)
+)
diff --git a/src/cmd/compile/internal/ssa/branchelim.go b/src/cmd/compile/internal/ssa/branchelim.go
new file mode 100644
index 0000000..1d34f81
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/branchelim.go
@@ -0,0 +1,449 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/internal/src"
+
+// branchelim tries to eliminate branches by
+// generating CondSelect instructions.
+//
+// Search for basic blocks that look like
+//
+// bb0 bb0
+// | \ / \
+// | bb1 or bb1 bb2 <- trivial if/else blocks
+// | / \ /
+// bb2 bb3
+//
+// where the intermediate blocks are mostly empty (with no side-effects);
+// rewrite Phis in the postdominator as CondSelects.
+func branchelim(f *Func) {
+ // FIXME: add support for lowering CondSelects on more architectures
+ switch f.Config.arch {
+ case "arm64", "amd64", "wasm":
+ // implemented
+ default:
+ return
+ }
+
+ // Find all the values used in computing the address of any load.
+ // Typically these values have operations like AddPtr, Lsh64x64, etc.
+ loadAddr := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(loadAddr)
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpLoad, OpAtomicLoad8, OpAtomicLoad32, OpAtomicLoad64, OpAtomicLoadPtr, OpAtomicLoadAcq32, OpAtomicLoadAcq64:
+ loadAddr.add(v.Args[0].ID)
+ case OpMove:
+ loadAddr.add(v.Args[1].ID)
+ }
+ }
+ }
+ po := f.postorder()
+ for {
+ n := loadAddr.size()
+ for _, b := range po {
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if !loadAddr.contains(v.ID) {
+ continue
+ }
+ for _, a := range v.Args {
+ if a.Type.IsInteger() || a.Type.IsPtr() || a.Type.IsUnsafePtr() {
+ loadAddr.add(a.ID)
+ }
+ }
+ }
+ }
+ if loadAddr.size() == n {
+ break
+ }
+ }
+
+ change := true
+ for change {
+ change = false
+ for _, b := range f.Blocks {
+ change = elimIf(f, loadAddr, b) || elimIfElse(f, loadAddr, b) || change
+ }
+ }
+}
+
+func canCondSelect(v *Value, arch string, loadAddr *sparseSet) bool {
+ if loadAddr.contains(v.ID) {
+ // The result of the soon-to-be conditional move is used to compute a load address.
+ // We want to avoid generating a conditional move in this case
+ // because the load address would now be data-dependent on the condition.
+ // Previously it would only be control-dependent on the condition, which is faster
+ // if the branch predicts well (or possibly even if it doesn't, if the load will
+ // be an expensive cache miss).
+ // See issue #26306.
+ return false
+ }
+ // For now, stick to simple scalars that fit in registers
+ switch {
+ case v.Type.Size() > v.Block.Func.Config.RegSize:
+ return false
+ case v.Type.IsPtrShaped():
+ return true
+ case v.Type.IsInteger():
+ if arch == "amd64" && v.Type.Size() < 2 {
+ // amd64 doesn't support CMOV with byte registers
+ return false
+ }
+ return true
+ default:
+ return false
+ }
+}
+
+// elimIf converts the one-way branch starting at dom in f to a conditional move if possible.
+// loadAddr is a set of values which are used to compute the address of a load.
+// Those values are exempt from CMOV generation.
+func elimIf(f *Func, loadAddr *sparseSet, dom *Block) bool {
+ // See if dom is an If with one arm that
+ // is trivial and succeeded by the other
+ // successor of dom.
+ if dom.Kind != BlockIf || dom.Likely != BranchUnknown {
+ return false
+ }
+ var simple, post *Block
+ for i := range dom.Succs {
+ bb, other := dom.Succs[i].Block(), dom.Succs[i^1].Block()
+ if isLeafPlain(bb) && bb.Succs[0].Block() == other {
+ simple = bb
+ post = other
+ break
+ }
+ }
+ if simple == nil || len(post.Preds) != 2 || post == dom {
+ return false
+ }
+
+ // We've found our diamond CFG of blocks.
+ // Now decide if fusing 'simple' into dom+post
+ // looks profitable.
+
+ // Check that there are Phis, and that all of them
+ // can be safely rewritten to CondSelect.
+ hasphis := false
+ for _, v := range post.Values {
+ if v.Op == OpPhi {
+ hasphis = true
+ if !canCondSelect(v, f.Config.arch, loadAddr) {
+ return false
+ }
+ }
+ }
+ if !hasphis {
+ return false
+ }
+
+ // Pick some upper bound for the number of instructions
+ // we'd be willing to execute just to generate a dead
+ // argument to CondSelect. In the worst case, this is
+ // the number of useless instructions executed.
+ const maxfuseinsts = 2
+
+ if len(simple.Values) > maxfuseinsts || !canSpeculativelyExecute(simple) {
+ return false
+ }
+
+ // Replace Phi instructions in b with CondSelect instructions
+ swap := (post.Preds[0].Block() == dom) != (dom.Succs[0].Block() == post)
+ for _, v := range post.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ v.Op = OpCondSelect
+ if swap {
+ v.Args[0], v.Args[1] = v.Args[1], v.Args[0]
+ }
+ v.AddArg(dom.Controls[0])
+ }
+
+ // Put all of the instructions into 'dom'
+ // and update the CFG appropriately.
+ dom.Kind = post.Kind
+ dom.CopyControls(post)
+ dom.Aux = post.Aux
+ dom.Succs = append(dom.Succs[:0], post.Succs...)
+ for i := range dom.Succs {
+ e := dom.Succs[i]
+ e.b.Preds[e.i].b = dom
+ }
+
+ // Try really hard to preserve statement marks attached to blocks.
+ simplePos := simple.Pos
+ postPos := post.Pos
+ simpleStmt := simplePos.IsStmt() == src.PosIsStmt
+ postStmt := postPos.IsStmt() == src.PosIsStmt
+
+ for _, v := range simple.Values {
+ v.Block = dom
+ }
+ for _, v := range post.Values {
+ v.Block = dom
+ }
+
+ // findBlockPos determines if b contains a stmt-marked value
+ // that has the same line number as the Pos for b itself.
+ // (i.e. is the position on b actually redundant?)
+ findBlockPos := func(b *Block) bool {
+ pos := b.Pos
+ for _, v := range b.Values {
+ // See if there is a stmt-marked value already that matches simple.Pos (and perhaps post.Pos)
+ if pos.SameFileAndLine(v.Pos) && v.Pos.IsStmt() == src.PosIsStmt {
+ return true
+ }
+ }
+ return false
+ }
+ if simpleStmt {
+ simpleStmt = !findBlockPos(simple)
+ if !simpleStmt && simplePos.SameFileAndLine(postPos) {
+ postStmt = false
+ }
+
+ }
+ if postStmt {
+ postStmt = !findBlockPos(post)
+ }
+
+ // If simpleStmt and/or postStmt are still true, then try harder
+ // to find the corresponding statement marks new homes.
+
+ // setBlockPos determines if b contains a can-be-statement value
+ // that has the same line number as the Pos for b itself, and
+ // puts a statement mark on it, and returns whether it succeeded
+ // in this operation.
+ setBlockPos := func(b *Block) bool {
+ pos := b.Pos
+ for _, v := range b.Values {
+ if pos.SameFileAndLine(v.Pos) && !isPoorStatementOp(v.Op) {
+ v.Pos = v.Pos.WithIsStmt()
+ return true
+ }
+ }
+ return false
+ }
+ // If necessary and possible, add a mark to a value in simple
+ if simpleStmt {
+ if setBlockPos(simple) && simplePos.SameFileAndLine(postPos) {
+ postStmt = false
+ }
+ }
+ // If necessary and possible, add a mark to a value in post
+ if postStmt {
+ postStmt = !setBlockPos(post)
+ }
+
+ // Before giving up (this was added because it helps), try the end of "dom", and if that is not available,
+ // try the values in the successor block if it is uncomplicated.
+ if postStmt {
+ if dom.Pos.IsStmt() != src.PosIsStmt {
+ dom.Pos = postPos
+ } else {
+ // Try the successor block
+ if len(dom.Succs) == 1 && len(dom.Succs[0].Block().Preds) == 1 {
+ succ := dom.Succs[0].Block()
+ for _, v := range succ.Values {
+ if isPoorStatementOp(v.Op) {
+ continue
+ }
+ if postPos.SameFileAndLine(v.Pos) {
+ v.Pos = v.Pos.WithIsStmt()
+ }
+ postStmt = false
+ break
+ }
+ // If postStmt still true, tag the block itself if possible
+ if postStmt && succ.Pos.IsStmt() != src.PosIsStmt {
+ succ.Pos = postPos
+ }
+ }
+ }
+ }
+
+ dom.Values = append(dom.Values, simple.Values...)
+ dom.Values = append(dom.Values, post.Values...)
+
+ // Trash 'post' and 'simple'
+ clobberBlock(post)
+ clobberBlock(simple)
+
+ f.invalidateCFG()
+ return true
+}
+
+// is this a BlockPlain with one predecessor?
+func isLeafPlain(b *Block) bool {
+ return b.Kind == BlockPlain && len(b.Preds) == 1
+}
+
+func clobberBlock(b *Block) {
+ b.Values = nil
+ b.Preds = nil
+ b.Succs = nil
+ b.Aux = nil
+ b.ResetControls()
+ b.Likely = BranchUnknown
+ b.Kind = BlockInvalid
+}
+
+// elimIfElse converts the two-way branch starting at dom in f to a conditional move if possible.
+// loadAddr is a set of values which are used to compute the address of a load.
+// Those values are exempt from CMOV generation.
+func elimIfElse(f *Func, loadAddr *sparseSet, b *Block) bool {
+ // See if 'b' ends in an if/else: it should
+ // have two successors, both of which are BlockPlain
+ // and succeeded by the same block.
+ if b.Kind != BlockIf || b.Likely != BranchUnknown {
+ return false
+ }
+ yes, no := b.Succs[0].Block(), b.Succs[1].Block()
+ if !isLeafPlain(yes) || len(yes.Values) > 1 || !canSpeculativelyExecute(yes) {
+ return false
+ }
+ if !isLeafPlain(no) || len(no.Values) > 1 || !canSpeculativelyExecute(no) {
+ return false
+ }
+ if b.Succs[0].Block().Succs[0].Block() != b.Succs[1].Block().Succs[0].Block() {
+ return false
+ }
+ // block that postdominates the if/else
+ post := b.Succs[0].Block().Succs[0].Block()
+ if len(post.Preds) != 2 || post == b {
+ return false
+ }
+ hasphis := false
+ for _, v := range post.Values {
+ if v.Op == OpPhi {
+ hasphis = true
+ if !canCondSelect(v, f.Config.arch, loadAddr) {
+ return false
+ }
+ }
+ }
+ if !hasphis {
+ return false
+ }
+
+ // Don't generate CondSelects if branch is cheaper.
+ if !shouldElimIfElse(no, yes, post, f.Config.arch) {
+ return false
+ }
+
+ // now we're committed: rewrite each Phi as a CondSelect
+ swap := post.Preds[0].Block() != b.Succs[0].Block()
+ for _, v := range post.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ v.Op = OpCondSelect
+ if swap {
+ v.Args[0], v.Args[1] = v.Args[1], v.Args[0]
+ }
+ v.AddArg(b.Controls[0])
+ }
+
+ // Move the contents of all of these
+ // blocks into 'b' and update CFG edges accordingly
+ b.Kind = post.Kind
+ b.CopyControls(post)
+ b.Aux = post.Aux
+ b.Succs = append(b.Succs[:0], post.Succs...)
+ for i := range b.Succs {
+ e := b.Succs[i]
+ e.b.Preds[e.i].b = b
+ }
+ for i := range post.Values {
+ post.Values[i].Block = b
+ }
+ for i := range yes.Values {
+ yes.Values[i].Block = b
+ }
+ for i := range no.Values {
+ no.Values[i].Block = b
+ }
+ b.Values = append(b.Values, yes.Values...)
+ b.Values = append(b.Values, no.Values...)
+ b.Values = append(b.Values, post.Values...)
+
+ // trash post, yes, and no
+ clobberBlock(yes)
+ clobberBlock(no)
+ clobberBlock(post)
+
+ f.invalidateCFG()
+ return true
+}
+
+// shouldElimIfElse reports whether estimated cost of eliminating branch
+// is lower than threshold.
+func shouldElimIfElse(no, yes, post *Block, arch string) bool {
+ switch arch {
+ default:
+ return true
+ case "amd64":
+ const maxcost = 2
+ phi := 0
+ other := 0
+ for _, v := range post.Values {
+ if v.Op == OpPhi {
+ // Each phi results in CondSelect, which lowers into CMOV,
+ // CMOV has latency >1 on most CPUs.
+ phi++
+ }
+ for _, x := range v.Args {
+ if x.Block == no || x.Block == yes {
+ other++
+ }
+ }
+ }
+ cost := phi * 1
+ if phi > 1 {
+ // If we have more than 1 phi and some values in post have args
+ // in yes or no blocks, we may have to recalculate condition, because
+ // those args may clobber flags. For now assume that all operations clobber flags.
+ cost += other * 1
+ }
+ return cost < maxcost
+ }
+}
+
+// canSpeculativelyExecute reports whether every value in the block can
+// be evaluated without causing any observable side effects (memory
+// accesses, panics and so on) except for execution time changes. It
+// also ensures that the block does not contain any phis which we can't
+// speculatively execute.
+// Warning: this function cannot currently detect values that represent
+// instructions the execution of which need to be guarded with CPU
+// hardware feature checks. See issue #34950.
+func canSpeculativelyExecute(b *Block) bool {
+ // don't fuse memory ops, Phi ops, divides (can panic),
+ // or anything else with side-effects
+ for _, v := range b.Values {
+ if v.Op == OpPhi || isDivMod(v.Op) || v.Type.IsMemory() ||
+ v.MemoryArg() != nil || opcodeTable[v.Op].hasSideEffects {
+ return false
+ }
+ }
+ return true
+}
+
+func isDivMod(op Op) bool {
+ switch op {
+ case OpDiv8, OpDiv8u, OpDiv16, OpDiv16u,
+ OpDiv32, OpDiv32u, OpDiv64, OpDiv64u, OpDiv128u,
+ OpDiv32F, OpDiv64F,
+ OpMod8, OpMod8u, OpMod16, OpMod16u,
+ OpMod32, OpMod32u, OpMod64, OpMod64u:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/branchelim_test.go b/src/cmd/compile/internal/ssa/branchelim_test.go
new file mode 100644
index 0000000..20fa84d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/branchelim_test.go
@@ -0,0 +1,172 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+// Test that a trivial 'if' is eliminated
+func TestBranchElimIf(t *testing.T) {
+ var testData = []struct {
+ arch string
+ intType string
+ ok bool
+ }{
+ {"arm64", "int32", true},
+ {"amd64", "int32", true},
+ {"amd64", "int8", false},
+ }
+
+ for _, data := range testData {
+ t.Run(data.arch+"/"+data.intType, func(t *testing.T) {
+ c := testConfigArch(t, data.arch)
+ boolType := c.config.Types.Bool
+ var intType *types.Type
+ switch data.intType {
+ case "int32":
+ intType = c.config.Types.Int32
+ case "int8":
+ intType = c.config.Types.Int8
+ default:
+ t.Fatal("invalid integer type:", data.intType)
+ }
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("const1", OpConst32, intType, 1, nil),
+ Valu("const2", OpConst32, intType, 2, nil),
+ Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
+ Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
+ If("cond", "b2", "b3")),
+ Bloc("b2",
+ Goto("b3")),
+ Bloc("b3",
+ Valu("phi", OpPhi, intType, 0, nil, "const1", "const2"),
+ Valu("retstore", OpStore, types.TypeMem, 0, nil, "phi", "sb", "start"),
+ Exit("retstore")))
+
+ CheckFunc(fun.f)
+ branchelim(fun.f)
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if data.ok {
+
+ if len(fun.f.Blocks) != 1 {
+ t.Fatalf("expected 1 block after branchelim and deadcode; found %d", len(fun.f.Blocks))
+ }
+ if fun.values["phi"].Op != OpCondSelect {
+ t.Fatalf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
+ }
+ if fun.values["phi"].Args[2] != fun.values["cond"] {
+ t.Errorf("expected CondSelect condition to be %s; found %s", fun.values["cond"], fun.values["phi"].Args[2])
+ }
+ if fun.blocks["entry"].Kind != BlockExit {
+ t.Errorf("expected entry to be BlockExit; found kind %s", fun.blocks["entry"].Kind.String())
+ }
+ } else {
+ if len(fun.f.Blocks) != 3 {
+ t.Fatalf("expected 3 block after branchelim and deadcode; found %d", len(fun.f.Blocks))
+ }
+ }
+ })
+ }
+}
+
+// Test that a trivial if/else is eliminated
+func TestBranchElimIfElse(t *testing.T) {
+ for _, arch := range []string{"arm64", "amd64"} {
+ t.Run(arch, func(t *testing.T) {
+ c := testConfigArch(t, arch)
+ boolType := c.config.Types.Bool
+ intType := c.config.Types.Int32
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("const1", OpConst32, intType, 1, nil),
+ Valu("const2", OpConst32, intType, 2, nil),
+ Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
+ Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
+ If("cond", "b2", "b3")),
+ Bloc("b2",
+ Goto("b4")),
+ Bloc("b3",
+ Goto("b4")),
+ Bloc("b4",
+ Valu("phi", OpPhi, intType, 0, nil, "const1", "const2"),
+ Valu("retstore", OpStore, types.TypeMem, 0, nil, "phi", "sb", "start"),
+ Exit("retstore")))
+
+ CheckFunc(fun.f)
+ branchelim(fun.f)
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if len(fun.f.Blocks) != 1 {
+ t.Fatalf("expected 1 block after branchelim; found %d", len(fun.f.Blocks))
+ }
+ if fun.values["phi"].Op != OpCondSelect {
+ t.Fatalf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
+ }
+ if fun.values["phi"].Args[2] != fun.values["cond"] {
+ t.Errorf("expected CondSelect condition to be %s; found %s", fun.values["cond"], fun.values["phi"].Args[2])
+ }
+ if fun.blocks["entry"].Kind != BlockExit {
+ t.Errorf("expected entry to be BlockExit; found kind %s", fun.blocks["entry"].Kind.String())
+ }
+ })
+ }
+}
+
+// Test that an if/else CFG that loops back
+// into itself does *not* get eliminated.
+func TestNoBranchElimLoop(t *testing.T) {
+ for _, arch := range []string{"arm64", "amd64"} {
+ t.Run(arch, func(t *testing.T) {
+ c := testConfigArch(t, arch)
+ boolType := c.config.Types.Bool
+ intType := c.config.Types.Int32
+
+ // The control flow here is totally bogus,
+ // but a dead cycle seems like the only plausible
+ // way to arrive at a diamond CFG that is also a loop.
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("const2", OpConst32, intType, 2, nil),
+ Valu("const3", OpConst32, intType, 3, nil),
+ Goto("b5")),
+ Bloc("b2",
+ Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
+ Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
+ Valu("phi", OpPhi, intType, 0, nil, "const2", "const3"),
+ If("cond", "b3", "b4")),
+ Bloc("b3",
+ Goto("b2")),
+ Bloc("b4",
+ Goto("b2")),
+ Bloc("b5",
+ Exit("start")))
+
+ CheckFunc(fun.f)
+ branchelim(fun.f)
+ CheckFunc(fun.f)
+
+ if len(fun.f.Blocks) != 5 {
+ t.Errorf("expected 5 block after branchelim; found %d", len(fun.f.Blocks))
+ }
+ if fun.values["phi"].Op != OpPhi {
+ t.Errorf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
+ }
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/cache.go b/src/cmd/compile/internal/ssa/cache.go
new file mode 100644
index 0000000..dbec2e1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/cache.go
@@ -0,0 +1,81 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/obj"
+ "sort"
+)
+
+// A Cache holds reusable compiler state.
+// It is intended to be re-used for multiple Func compilations.
+type Cache struct {
+ // Storage for low-numbered values and blocks.
+ values [2000]Value
+ blocks [200]Block
+ locs [2000]Location
+
+ // Reusable stackAllocState.
+ // See stackalloc.go's {new,put}StackAllocState.
+ stackAllocState *stackAllocState
+
+ domblockstore []ID // scratch space for computing dominators
+ scrSparseSet []*sparseSet // scratch sparse sets to be re-used.
+ scrSparseMap []*sparseMap // scratch sparse maps to be re-used.
+ scrPoset []*poset // scratch poset to be reused
+ // deadcode contains reusable slices specifically for the deadcode pass.
+ // It gets special treatment because of the frequency with which it is run.
+ deadcode struct {
+ liveOrderStmts []*Value
+ live []bool
+ q []*Value
+ }
+ // Reusable regalloc state.
+ regallocValues []valState
+
+ ValueToProgAfter []*obj.Prog
+ debugState debugState
+
+ Liveness interface{} // *gc.livenessFuncCache
+}
+
+func (c *Cache) Reset() {
+ nv := sort.Search(len(c.values), func(i int) bool { return c.values[i].ID == 0 })
+ xv := c.values[:nv]
+ for i := range xv {
+ xv[i] = Value{}
+ }
+ nb := sort.Search(len(c.blocks), func(i int) bool { return c.blocks[i].ID == 0 })
+ xb := c.blocks[:nb]
+ for i := range xb {
+ xb[i] = Block{}
+ }
+ nl := sort.Search(len(c.locs), func(i int) bool { return c.locs[i] == nil })
+ xl := c.locs[:nl]
+ for i := range xl {
+ xl[i] = nil
+ }
+
+ // regalloc sets the length of c.regallocValues to whatever it may use,
+ // so clear according to length.
+ for i := range c.regallocValues {
+ c.regallocValues[i] = valState{}
+ }
+
+ // liveOrderStmts gets used multiple times during compilation of a function.
+ // We don't know where the high water mark was, so reslice to cap and search.
+ c.deadcode.liveOrderStmts = c.deadcode.liveOrderStmts[:cap(c.deadcode.liveOrderStmts)]
+ no := sort.Search(len(c.deadcode.liveOrderStmts), func(i int) bool { return c.deadcode.liveOrderStmts[i] == nil })
+ xo := c.deadcode.liveOrderStmts[:no]
+ for i := range xo {
+ xo[i] = nil
+ }
+ c.deadcode.q = c.deadcode.q[:cap(c.deadcode.q)]
+ nq := sort.Search(len(c.deadcode.q), func(i int) bool { return c.deadcode.q[i] == nil })
+ xq := c.deadcode.q[:nq]
+ for i := range xq {
+ xq[i] = nil
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
new file mode 100644
index 0000000..2dade7a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -0,0 +1,597 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/obj/s390x"
+ "math"
+ "math/bits"
+)
+
+// checkFunc checks invariants of f.
+func checkFunc(f *Func) {
+ blockMark := make([]bool, f.NumBlocks())
+ valueMark := make([]bool, f.NumValues())
+
+ for _, b := range f.Blocks {
+ if blockMark[b.ID] {
+ f.Fatalf("block %s appears twice in %s!", b, f.Name)
+ }
+ blockMark[b.ID] = true
+ if b.Func != f {
+ f.Fatalf("%s.Func=%s, want %s", b, b.Func.Name, f.Name)
+ }
+
+ for i, e := range b.Preds {
+ if se := e.b.Succs[e.i]; se.b != b || se.i != i {
+ f.Fatalf("block pred/succ not crosslinked correctly %d:%s %d:%s", i, b, se.i, se.b)
+ }
+ }
+ for i, e := range b.Succs {
+ if pe := e.b.Preds[e.i]; pe.b != b || pe.i != i {
+ f.Fatalf("block succ/pred not crosslinked correctly %d:%s %d:%s", i, b, pe.i, pe.b)
+ }
+ }
+
+ switch b.Kind {
+ case BlockExit:
+ if len(b.Succs) != 0 {
+ f.Fatalf("exit block %s has successors", b)
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("exit block %s has no control value", b)
+ }
+ if !b.Controls[0].Type.IsMemory() {
+ f.Fatalf("exit block %s has non-memory control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockRet:
+ if len(b.Succs) != 0 {
+ f.Fatalf("ret block %s has successors", b)
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("ret block %s has nil control", b)
+ }
+ if !b.Controls[0].Type.IsMemory() {
+ f.Fatalf("ret block %s has non-memory control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockRetJmp:
+ if len(b.Succs) != 0 {
+ f.Fatalf("retjmp block %s len(Succs)==%d, want 0", b, len(b.Succs))
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("retjmp block %s has nil control", b)
+ }
+ if !b.Controls[0].Type.IsMemory() {
+ f.Fatalf("retjmp block %s has non-memory control value %s", b, b.Controls[0].LongString())
+ }
+ if b.Aux == nil {
+ f.Fatalf("retjmp block %s has nil Aux field", b)
+ }
+ case BlockPlain:
+ if len(b.Succs) != 1 {
+ f.Fatalf("plain block %s len(Succs)==%d, want 1", b, len(b.Succs))
+ }
+ if b.NumControls() != 0 {
+ f.Fatalf("plain block %s has non-nil control %s", b, b.Controls[0].LongString())
+ }
+ case BlockIf:
+ if len(b.Succs) != 2 {
+ f.Fatalf("if block %s len(Succs)==%d, want 2", b, len(b.Succs))
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("if block %s has no control value", b)
+ }
+ if !b.Controls[0].Type.IsBoolean() {
+ f.Fatalf("if block %s has non-bool control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockDefer:
+ if len(b.Succs) != 2 {
+ f.Fatalf("defer block %s len(Succs)==%d, want 2", b, len(b.Succs))
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("defer block %s has no control value", b)
+ }
+ if !b.Controls[0].Type.IsMemory() {
+ f.Fatalf("defer block %s has non-memory control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockFirst:
+ if len(b.Succs) != 2 {
+ f.Fatalf("plain/dead block %s len(Succs)==%d, want 2", b, len(b.Succs))
+ }
+ if b.NumControls() != 0 {
+ f.Fatalf("plain/dead block %s has a control value", b)
+ }
+ }
+ if len(b.Succs) != 2 && b.Likely != BranchUnknown {
+ f.Fatalf("likeliness prediction %d for block %s with %d successors", b.Likely, b, len(b.Succs))
+ }
+
+ for _, v := range b.Values {
+ // Check to make sure argument count makes sense (argLen of -1 indicates
+ // variable length args)
+ nArgs := opcodeTable[v.Op].argLen
+ if nArgs != -1 && int32(len(v.Args)) != nArgs {
+ f.Fatalf("value %s has %d args, expected %d", v.LongString(),
+ len(v.Args), nArgs)
+ }
+
+ // Check to make sure aux values make sense.
+ canHaveAux := false
+ canHaveAuxInt := false
+ // TODO: enforce types of Aux in this switch (like auxString does below)
+ switch opcodeTable[v.Op].auxType {
+ case auxNone:
+ case auxBool:
+ if v.AuxInt < 0 || v.AuxInt > 1 {
+ f.Fatalf("bad bool AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxInt8:
+ if v.AuxInt != int64(int8(v.AuxInt)) {
+ f.Fatalf("bad int8 AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxInt16:
+ if v.AuxInt != int64(int16(v.AuxInt)) {
+ f.Fatalf("bad int16 AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxInt32:
+ if v.AuxInt != int64(int32(v.AuxInt)) {
+ f.Fatalf("bad int32 AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxInt64, auxARM64BitField:
+ canHaveAuxInt = true
+ case auxInt128:
+ // AuxInt must be zero, so leave canHaveAuxInt set to false.
+ case auxUInt8:
+ if v.AuxInt != int64(uint8(v.AuxInt)) {
+ f.Fatalf("bad uint8 AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxFloat32:
+ canHaveAuxInt = true
+ if math.IsNaN(v.AuxFloat()) {
+ f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
+ }
+ if !isExactFloat32(v.AuxFloat()) {
+ f.Fatalf("value %v has an AuxInt value that is not an exact float32", v)
+ }
+ case auxFloat64:
+ canHaveAuxInt = true
+ if math.IsNaN(v.AuxFloat()) {
+ f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
+ }
+ case auxString:
+ if _, ok := v.Aux.(string); !ok {
+ f.Fatalf("value %v has Aux type %T, want string", v, v.Aux)
+ }
+ canHaveAux = true
+ case auxCallOff:
+ canHaveAuxInt = true
+ fallthrough
+ case auxCall:
+ if ac, ok := v.Aux.(*AuxCall); ok {
+ if v.Op == OpStaticCall && ac.Fn == nil {
+ f.Fatalf("value %v has *AuxCall with nil Fn", v)
+ }
+ } else {
+ f.Fatalf("value %v has Aux type %T, want *AuxCall", v, v.Aux)
+ }
+ canHaveAux = true
+ case auxSym, auxTyp:
+ canHaveAux = true
+ case auxSymOff, auxSymValAndOff, auxTypSize:
+ canHaveAuxInt = true
+ canHaveAux = true
+ case auxCCop:
+ if opcodeTable[Op(v.AuxInt)].name == "OpInvalid" {
+ f.Fatalf("value %v has an AuxInt value that is a valid opcode", v)
+ }
+ canHaveAuxInt = true
+ case auxS390XCCMask:
+ if _, ok := v.Aux.(s390x.CCMask); !ok {
+ f.Fatalf("bad type %T for S390XCCMask in %v", v.Aux, v)
+ }
+ canHaveAux = true
+ case auxS390XRotateParams:
+ if _, ok := v.Aux.(s390x.RotateParams); !ok {
+ f.Fatalf("bad type %T for S390XRotateParams in %v", v.Aux, v)
+ }
+ canHaveAux = true
+ case auxFlagConstant:
+ if v.AuxInt < 0 || v.AuxInt > 15 {
+ f.Fatalf("bad FlagConstant AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ default:
+ f.Fatalf("unknown aux type for %s", v.Op)
+ }
+ if !canHaveAux && v.Aux != nil {
+ f.Fatalf("value %s has an Aux value %v but shouldn't", v.LongString(), v.Aux)
+ }
+ if !canHaveAuxInt && v.AuxInt != 0 {
+ f.Fatalf("value %s has an AuxInt value %d but shouldn't", v.LongString(), v.AuxInt)
+ }
+
+ for i, arg := range v.Args {
+ if arg == nil {
+ f.Fatalf("value %s has nil arg", v.LongString())
+ }
+ if v.Op != OpPhi {
+ // For non-Phi ops, memory args must be last, if present
+ if arg.Type.IsMemory() && i != len(v.Args)-1 {
+ f.Fatalf("value %s has non-final memory arg (%d < %d)", v.LongString(), i, len(v.Args)-1)
+ }
+ }
+ }
+
+ if valueMark[v.ID] {
+ f.Fatalf("value %s appears twice!", v.LongString())
+ }
+ valueMark[v.ID] = true
+
+ if v.Block != b {
+ f.Fatalf("%s.block != %s", v, b)
+ }
+ if v.Op == OpPhi && len(v.Args) != len(b.Preds) {
+ f.Fatalf("phi length %s does not match pred length %d for block %s", v.LongString(), len(b.Preds), b)
+ }
+
+ if v.Op == OpAddr {
+ if len(v.Args) == 0 {
+ f.Fatalf("no args for OpAddr %s", v.LongString())
+ }
+ if v.Args[0].Op != OpSB {
+ f.Fatalf("bad arg to OpAddr %v", v)
+ }
+ }
+
+ if v.Op == OpLocalAddr {
+ if len(v.Args) != 2 {
+ f.Fatalf("wrong # of args for OpLocalAddr %s", v.LongString())
+ }
+ if v.Args[0].Op != OpSP {
+ f.Fatalf("bad arg 0 to OpLocalAddr %v", v)
+ }
+ if !v.Args[1].Type.IsMemory() {
+ f.Fatalf("bad arg 1 to OpLocalAddr %v", v)
+ }
+ }
+
+ if f.RegAlloc != nil && f.Config.SoftFloat && v.Type.IsFloat() {
+ f.Fatalf("unexpected floating-point type %v", v.LongString())
+ }
+
+ // Check types.
+ // TODO: more type checks?
+ switch c := f.Config; v.Op {
+ case OpSP, OpSB:
+ if v.Type != c.Types.Uintptr {
+ f.Fatalf("bad %s type: want uintptr, have %s",
+ v.Op, v.Type.String())
+ }
+ case OpStringLen:
+ if v.Type != c.Types.Int {
+ f.Fatalf("bad %s type: want int, have %s",
+ v.Op, v.Type.String())
+ }
+ case OpLoad:
+ if !v.Args[1].Type.IsMemory() {
+ f.Fatalf("bad arg 1 type to %s: want mem, have %s",
+ v.Op, v.Args[1].Type.String())
+ }
+ case OpStore:
+ if !v.Type.IsMemory() {
+ f.Fatalf("bad %s type: want mem, have %s",
+ v.Op, v.Type.String())
+ }
+ if !v.Args[2].Type.IsMemory() {
+ f.Fatalf("bad arg 2 type to %s: want mem, have %s",
+ v.Op, v.Args[2].Type.String())
+ }
+ case OpCondSelect:
+ if !v.Args[2].Type.IsBoolean() {
+ f.Fatalf("bad arg 2 type to %s: want boolean, have %s",
+ v.Op, v.Args[2].Type.String())
+ }
+ case OpAddPtr:
+ if !v.Args[0].Type.IsPtrShaped() && v.Args[0].Type != c.Types.Uintptr {
+ f.Fatalf("bad arg 0 type to %s: want ptr, have %s", v.Op, v.Args[0].LongString())
+ }
+ if !v.Args[1].Type.IsInteger() {
+ f.Fatalf("bad arg 1 type to %s: want integer, have %s", v.Op, v.Args[1].LongString())
+ }
+
+ }
+
+ // TODO: check for cycles in values
+ }
+ }
+
+ // Check to make sure all Blocks referenced are in the function.
+ if !blockMark[f.Entry.ID] {
+ f.Fatalf("entry block %v is missing", f.Entry)
+ }
+ for _, b := range f.Blocks {
+ for _, c := range b.Preds {
+ if !blockMark[c.b.ID] {
+ f.Fatalf("predecessor block %v for %v is missing", c, b)
+ }
+ }
+ for _, c := range b.Succs {
+ if !blockMark[c.b.ID] {
+ f.Fatalf("successor block %v for %v is missing", c, b)
+ }
+ }
+ }
+
+ if len(f.Entry.Preds) > 0 {
+ f.Fatalf("entry block %s of %s has predecessor(s) %v", f.Entry, f.Name, f.Entry.Preds)
+ }
+
+ // Check to make sure all Values referenced are in the function.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if !valueMark[a.ID] {
+ f.Fatalf("%v, arg %d of %s, is missing", a, i, v.LongString())
+ }
+ }
+ }
+ for _, c := range b.ControlValues() {
+ if !valueMark[c.ID] {
+ f.Fatalf("control value for %s is missing: %v", b, c)
+ }
+ }
+ }
+ for b := f.freeBlocks; b != nil; b = b.succstorage[0].b {
+ if blockMark[b.ID] {
+ f.Fatalf("used block b%d in free list", b.ID)
+ }
+ }
+ for v := f.freeValues; v != nil; v = v.argstorage[0] {
+ if valueMark[v.ID] {
+ f.Fatalf("used value v%d in free list", v.ID)
+ }
+ }
+
+ // Check to make sure all args dominate uses.
+ if f.RegAlloc == nil {
+ // Note: regalloc introduces non-dominating args.
+ // See TODO in regalloc.go.
+ sdom := f.Sdom()
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, arg := range v.Args {
+ x := arg.Block
+ y := b
+ if v.Op == OpPhi {
+ y = b.Preds[i].b
+ }
+ if !domCheck(f, sdom, x, y) {
+ f.Fatalf("arg %d of value %s does not dominate, arg=%s", i, v.LongString(), arg.LongString())
+ }
+ }
+ }
+ for _, c := range b.ControlValues() {
+ if !domCheck(f, sdom, c.Block, b) {
+ f.Fatalf("control value %s for %s doesn't dominate", c, b)
+ }
+ }
+ }
+ }
+
+ // Check loop construction
+ if f.RegAlloc == nil && f.pass != nil { // non-nil pass allows better-targeted debug printing
+ ln := f.loopnest()
+ if !ln.hasIrreducible {
+ po := f.postorder() // use po to avoid unreachable blocks.
+ for _, b := range po {
+ for _, s := range b.Succs {
+ bb := s.Block()
+ if ln.b2l[b.ID] == nil && ln.b2l[bb.ID] != nil && bb != ln.b2l[bb.ID].header {
+ f.Fatalf("block %s not in loop branches to non-header block %s in loop", b.String(), bb.String())
+ }
+ if ln.b2l[b.ID] != nil && ln.b2l[bb.ID] != nil && bb != ln.b2l[bb.ID].header && !ln.b2l[b.ID].isWithinOrEq(ln.b2l[bb.ID]) {
+ f.Fatalf("block %s in loop branches to non-header block %s in non-containing loop", b.String(), bb.String())
+ }
+ }
+ }
+ }
+ }
+
+ // Check use counts
+ uses := make([]int32, f.NumValues())
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for _, a := range v.Args {
+ uses[a.ID]++
+ }
+ }
+ for _, c := range b.ControlValues() {
+ uses[c.ID]++
+ }
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Uses != uses[v.ID] {
+ f.Fatalf("%s has %d uses, but has Uses=%d", v, uses[v.ID], v.Uses)
+ }
+ }
+ }
+
+ memCheck(f)
+}
+
+func memCheck(f *Func) {
+ // Check that if a tuple has a memory type, it is second.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Type.IsTuple() && v.Type.FieldType(0).IsMemory() {
+ f.Fatalf("memory is first in a tuple: %s\n", v.LongString())
+ }
+ }
+ }
+
+ // Single live memory checks.
+ // These checks only work if there are no memory copies.
+ // (Memory copies introduce ambiguity about which mem value is really live.
+ // probably fixable, but it's easier to avoid the problem.)
+ // For the same reason, disable this check if some memory ops are unused.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if (v.Op == OpCopy || v.Uses == 0) && v.Type.IsMemory() {
+ return
+ }
+ }
+ if b != f.Entry && len(b.Preds) == 0 {
+ return
+ }
+ }
+
+ // Compute live memory at the end of each block.
+ lastmem := make([]*Value, f.NumBlocks())
+ ss := newSparseSet(f.NumValues())
+ for _, b := range f.Blocks {
+ // Mark overwritten memory values. Those are args of other
+ // ops that generate memory values.
+ ss.clear()
+ for _, v := range b.Values {
+ if v.Op == OpPhi || !v.Type.IsMemory() {
+ continue
+ }
+ if m := v.MemoryArg(); m != nil {
+ ss.add(m.ID)
+ }
+ }
+ // There should be at most one remaining unoverwritten memory value.
+ for _, v := range b.Values {
+ if !v.Type.IsMemory() {
+ continue
+ }
+ if ss.contains(v.ID) {
+ continue
+ }
+ if lastmem[b.ID] != nil {
+ f.Fatalf("two live memory values in %s: %s and %s", b, lastmem[b.ID], v)
+ }
+ lastmem[b.ID] = v
+ }
+ // If there is no remaining memory value, that means there was no memory update.
+ // Take any memory arg.
+ if lastmem[b.ID] == nil {
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ continue
+ }
+ m := v.MemoryArg()
+ if m == nil {
+ continue
+ }
+ if lastmem[b.ID] != nil && lastmem[b.ID] != m {
+ f.Fatalf("two live memory values in %s: %s and %s", b, lastmem[b.ID], m)
+ }
+ lastmem[b.ID] = m
+ }
+ }
+ }
+ // Propagate last live memory through storeless blocks.
+ for {
+ changed := false
+ for _, b := range f.Blocks {
+ if lastmem[b.ID] != nil {
+ continue
+ }
+ for _, e := range b.Preds {
+ p := e.b
+ if lastmem[p.ID] != nil {
+ lastmem[b.ID] = lastmem[p.ID]
+ changed = true
+ break
+ }
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+ // Check merge points.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpPhi && v.Type.IsMemory() {
+ for i, a := range v.Args {
+ if a != lastmem[b.Preds[i].b.ID] {
+ f.Fatalf("inconsistent memory phi %s %d %s %s", v.LongString(), i, a, lastmem[b.Preds[i].b.ID])
+ }
+ }
+ }
+ }
+ }
+
+ // Check that only one memory is live at any point.
+ if f.scheduled {
+ for _, b := range f.Blocks {
+ var mem *Value // the current live memory in the block
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ if v.Type.IsMemory() {
+ mem = v
+ }
+ continue
+ }
+ if mem == nil && len(b.Preds) > 0 {
+ // If no mem phi, take mem of any predecessor.
+ mem = lastmem[b.Preds[0].b.ID]
+ }
+ for _, a := range v.Args {
+ if a.Type.IsMemory() && a != mem {
+ f.Fatalf("two live mems @ %s: %s and %s", v, mem, a)
+ }
+ }
+ if v.Type.IsMemory() {
+ mem = v
+ }
+ }
+ }
+ }
+
+ // Check that after scheduling, phis are always first in the block.
+ if f.scheduled {
+ for _, b := range f.Blocks {
+ seenNonPhi := false
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpPhi:
+ if seenNonPhi {
+ f.Fatalf("phi after non-phi @ %s: %s", b, v)
+ }
+ default:
+ seenNonPhi = true
+ }
+ }
+ }
+ }
+}
+
+// domCheck reports whether x dominates y (including x==y).
+func domCheck(f *Func, sdom SparseTree, x, y *Block) bool {
+ if !sdom.IsAncestorEq(f.Entry, y) {
+ // unreachable - ignore
+ return true
+ }
+ return sdom.IsAncestorEq(x, y)
+}
+
+// isExactFloat32 reports whether x can be exactly represented as a float32.
+func isExactFloat32(x float64) bool {
+ // Check the mantissa is in range.
+ if bits.TrailingZeros64(math.Float64bits(x)) < 52-23 {
+ return false
+ }
+ // Check the exponent is in range. The mantissa check above is sufficient for NaN values.
+ return math.IsNaN(x) || x == float64(float32(x))
+}
diff --git a/src/cmd/compile/internal/ssa/checkbce.go b/src/cmd/compile/internal/ssa/checkbce.go
new file mode 100644
index 0000000..6a9ce2b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/checkbce.go
@@ -0,0 +1,35 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/compile/internal/logopt"
+
+// checkbce prints all bounds checks that are present in the function.
+// Useful to find regressions. checkbce is only activated when with
+// corresponding debug options, so it's off by default.
+// See test/checkbce.go
+func checkbce(f *Func) {
+ if f.pass.debug <= 0 && !logopt.Enabled() {
+ return
+ }
+
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpIsInBounds || v.Op == OpIsSliceInBounds {
+ if f.pass.debug > 0 {
+ f.Warnl(v.Pos, "Found %v", v.Op)
+ }
+ if logopt.Enabled() {
+ if v.Op == OpIsInBounds {
+ logopt.LogOpt(v.Pos, "isInBounds", "checkbce", f.Name)
+ }
+ if v.Op == OpIsSliceInBounds {
+ logopt.LogOpt(v.Pos, "isSliceInBounds", "checkbce", f.Name)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
new file mode 100644
index 0000000..63994d1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -0,0 +1,573 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "bytes"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "fmt"
+ "hash/crc32"
+ "log"
+ "math/rand"
+ "os"
+ "regexp"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
+)
+
+// Compile is the main entry point for this package.
+// Compile modifies f so that on return:
+// · all Values in f map to 0 or 1 assembly instructions of the target architecture
+// · the order of f.Blocks is the order to emit the Blocks
+// · the order of b.Values is the order to emit the Values in each Block
+// · f has a non-nil regAlloc field
+func Compile(f *Func) {
+ // TODO: debugging - set flags to control verbosity of compiler,
+ // which phases to dump IR before/after, etc.
+ if f.Log() {
+ f.Logf("compiling %s\n", f.Name)
+ }
+
+ var rnd *rand.Rand
+ if checkEnabled {
+ seed := int64(crc32.ChecksumIEEE(([]byte)(f.Name))) ^ int64(checkRandSeed)
+ rnd = rand.New(rand.NewSource(seed))
+ }
+
+ // hook to print function & phase if panic happens
+ phaseName := "init"
+ defer func() {
+ if phaseName != "" {
+ err := recover()
+ stack := make([]byte, 16384)
+ n := runtime.Stack(stack, false)
+ stack = stack[:n]
+ if f.HTMLWriter != nil {
+ f.HTMLWriter.flushPhases()
+ }
+ f.Fatalf("panic during %s while compiling %s:\n\n%v\n\n%s\n", phaseName, f.Name, err, stack)
+ }
+ }()
+
+ // Run all the passes
+ if f.Log() {
+ printFunc(f)
+ }
+ f.HTMLWriter.WritePhase("start", "start")
+ if BuildDump != "" && BuildDump == f.Name {
+ f.dumpFile("build")
+ }
+ if checkEnabled {
+ checkFunc(f)
+ }
+ const logMemStats = false
+ for _, p := range passes {
+ if !f.Config.optimize && !p.required || p.disabled {
+ continue
+ }
+ f.pass = &p
+ phaseName = p.name
+ if f.Log() {
+ f.Logf(" pass %s begin\n", p.name)
+ }
+ // TODO: capture logging during this pass, add it to the HTML
+ var mStart runtime.MemStats
+ if logMemStats || p.mem {
+ runtime.ReadMemStats(&mStart)
+ }
+
+ if checkEnabled && !f.scheduled {
+ // Test that we don't depend on the value order, by randomizing
+ // the order of values in each block. See issue 18169.
+ for _, b := range f.Blocks {
+ for i := 0; i < len(b.Values)-1; i++ {
+ j := i + rnd.Intn(len(b.Values)-i)
+ b.Values[i], b.Values[j] = b.Values[j], b.Values[i]
+ }
+ }
+ }
+
+ tStart := time.Now()
+ p.fn(f)
+ tEnd := time.Now()
+
+ // Need something less crude than "Log the whole intermediate result".
+ if f.Log() || f.HTMLWriter != nil {
+ time := tEnd.Sub(tStart).Nanoseconds()
+ var stats string
+ if logMemStats {
+ var mEnd runtime.MemStats
+ runtime.ReadMemStats(&mEnd)
+ nBytes := mEnd.TotalAlloc - mStart.TotalAlloc
+ nAllocs := mEnd.Mallocs - mStart.Mallocs
+ stats = fmt.Sprintf("[%d ns %d allocs %d bytes]", time, nAllocs, nBytes)
+ } else {
+ stats = fmt.Sprintf("[%d ns]", time)
+ }
+
+ if f.Log() {
+ f.Logf(" pass %s end %s\n", p.name, stats)
+ printFunc(f)
+ }
+ f.HTMLWriter.WritePhase(phaseName, fmt.Sprintf("%s <span class=\"stats\">%s</span>", phaseName, stats))
+ }
+ if p.time || p.mem {
+ // Surround timing information w/ enough context to allow comparisons.
+ time := tEnd.Sub(tStart).Nanoseconds()
+ if p.time {
+ f.LogStat("TIME(ns)", time)
+ }
+ if p.mem {
+ var mEnd runtime.MemStats
+ runtime.ReadMemStats(&mEnd)
+ nBytes := mEnd.TotalAlloc - mStart.TotalAlloc
+ nAllocs := mEnd.Mallocs - mStart.Mallocs
+ f.LogStat("TIME(ns):BYTES:ALLOCS", time, nBytes, nAllocs)
+ }
+ }
+ if p.dump != nil && p.dump[f.Name] {
+ // Dump function to appropriately named file
+ f.dumpFile(phaseName)
+ }
+ if checkEnabled {
+ checkFunc(f)
+ }
+ }
+
+ if f.HTMLWriter != nil {
+ // Ensure we write any pending phases to the html
+ f.HTMLWriter.flushPhases()
+ }
+
+ if f.ruleMatches != nil {
+ var keys []string
+ for key := range f.ruleMatches {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ buf := new(bytes.Buffer)
+ fmt.Fprintf(buf, "%s: ", f.Name)
+ for _, key := range keys {
+ fmt.Fprintf(buf, "%s=%d ", key, f.ruleMatches[key])
+ }
+ fmt.Fprint(buf, "\n")
+ fmt.Print(buf.String())
+ }
+
+ // Squash error printing defer
+ phaseName = ""
+}
+
+// dumpFile creates a file from the phase name and function name
+// Dumping is done to files to avoid buffering huge strings before
+// output.
+func (f *Func) dumpFile(phaseName string) {
+ f.dumpFileSeq++
+ fname := fmt.Sprintf("%s_%02d__%s.dump", f.Name, int(f.dumpFileSeq), phaseName)
+ fname = strings.Replace(fname, " ", "_", -1)
+ fname = strings.Replace(fname, "/", "_", -1)
+ fname = strings.Replace(fname, ":", "_", -1)
+
+ fi, err := os.Create(fname)
+ if err != nil {
+ f.Warnl(src.NoXPos, "Unable to create after-phase dump file %s", fname)
+ return
+ }
+
+ p := stringFuncPrinter{w: fi}
+ fprintFunc(p, f)
+ fi.Close()
+}
+
+type pass struct {
+ name string
+ fn func(*Func)
+ required bool
+ disabled bool
+ time bool // report time to run pass
+ mem bool // report mem stats to run pass
+ stats int // pass reports own "stats" (e.g., branches removed)
+ debug int // pass performs some debugging. =1 should be in error-testing-friendly Warnl format.
+ test int // pass-specific ad-hoc option, perhaps useful in development
+ dump map[string]bool // dump if function name matches
+}
+
+func (p *pass) addDump(s string) {
+ if p.dump == nil {
+ p.dump = make(map[string]bool)
+ }
+ p.dump[s] = true
+}
+
+func (p *pass) String() string {
+ if p == nil {
+ return "nil pass"
+ }
+ return p.name
+}
+
+// Run consistency checker between each phase
+var (
+ checkEnabled = false
+ checkRandSeed = 0
+)
+
+// Debug output
+var IntrinsicsDebug int
+var IntrinsicsDisable bool
+
+var BuildDebug int
+var BuildTest int
+var BuildStats int
+var BuildDump string // name of function to dump after initial build of ssa
+
+// PhaseOption sets the specified flag in the specified ssa phase,
+// returning empty string if this was successful or a string explaining
+// the error if it was not.
+// A version of the phase name with "_" replaced by " " is also checked for a match.
+// If the phase name begins a '~' then the rest of the underscores-replaced-with-blanks
+// version is used as a regular expression to match the phase name(s).
+//
+// Special cases that have turned out to be useful:
+// ssa/check/on enables checking after each phase
+// ssa/all/time enables time reporting for all phases
+//
+// See gc/lex.go for dissection of the option string.
+// Example uses:
+//
+// GO_GCFLAGS=-d=ssa/generic_cse/time,ssa/generic_cse/stats,ssa/generic_cse/debug=3 ./make.bash
+//
+// BOOT_GO_GCFLAGS=-d='ssa/~^.*scc$/off' GO_GCFLAGS='-d=ssa/~^.*scc$/off' ./make.bash
+//
+func PhaseOption(phase, flag string, val int, valString string) string {
+ switch phase {
+ case "", "help":
+ lastcr := 0
+ phasenames := " check, all, build, intrinsics"
+ for _, p := range passes {
+ pn := strings.Replace(p.name, " ", "_", -1)
+ if len(pn)+len(phasenames)-lastcr > 70 {
+ phasenames += "\n "
+ lastcr = len(phasenames)
+ phasenames += pn
+ } else {
+ phasenames += ", " + pn
+ }
+ }
+ return `PhaseOptions usage:
+
+ go tool compile -d=ssa/<phase>/<flag>[=<value>|<function_name>]
+
+where:
+
+- <phase> is one of:
+` + phasenames + `
+
+- <flag> is one of:
+ on, off, debug, mem, time, test, stats, dump, seed
+
+- <value> defaults to 1
+
+- <function_name> is required for the "dump" flag, and specifies the
+ name of function to dump after <phase>
+
+Phase "all" supports flags "time", "mem", and "dump".
+Phase "intrinsics" supports flags "on", "off", and "debug".
+
+If the "dump" flag is specified, the output is written on a file named
+<phase>__<function_name>_<seq>.dump; otherwise it is directed to stdout.
+
+Examples:
+
+ -d=ssa/check/on
+enables checking after each phase
+
+ -d=ssa/check/seed=1234
+enables checking after each phase, using 1234 to seed the PRNG
+used for value order randomization
+
+ -d=ssa/all/time
+enables time reporting for all phases
+
+ -d=ssa/prove/debug=2
+sets debugging level to 2 in the prove pass
+
+Multiple flags can be passed at once, by separating them with
+commas. For example:
+
+ -d=ssa/check/on,ssa/all/time
+`
+ }
+
+ if phase == "check" {
+ switch flag {
+ case "on":
+ checkEnabled = val != 0
+ debugPoset = checkEnabled // also turn on advanced self-checking in prove's datastructure
+ return ""
+ case "off":
+ checkEnabled = val == 0
+ debugPoset = checkEnabled
+ return ""
+ case "seed":
+ checkEnabled = true
+ checkRandSeed = val
+ debugPoset = checkEnabled
+ return ""
+ }
+ }
+
+ alltime := false
+ allmem := false
+ alldump := false
+ if phase == "all" {
+ switch flag {
+ case "time":
+ alltime = val != 0
+ case "mem":
+ allmem = val != 0
+ case "dump":
+ alldump = val != 0
+ if alldump {
+ BuildDump = valString
+ }
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
+ }
+ }
+
+ if phase == "intrinsics" {
+ switch flag {
+ case "on":
+ IntrinsicsDisable = val == 0
+ case "off":
+ IntrinsicsDisable = val != 0
+ case "debug":
+ IntrinsicsDebug = val
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
+ }
+ return ""
+ }
+ if phase == "build" {
+ switch flag {
+ case "debug":
+ BuildDebug = val
+ case "test":
+ BuildTest = val
+ case "stats":
+ BuildStats = val
+ case "dump":
+ BuildDump = valString
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
+ }
+ return ""
+ }
+
+ underphase := strings.Replace(phase, "_", " ", -1)
+ var re *regexp.Regexp
+ if phase[0] == '~' {
+ r, ok := regexp.Compile(underphase[1:])
+ if ok != nil {
+ return fmt.Sprintf("Error %s in regexp for phase %s, flag %s", ok.Error(), phase, flag)
+ }
+ re = r
+ }
+ matchedOne := false
+ for i, p := range passes {
+ if phase == "all" {
+ p.time = alltime
+ p.mem = allmem
+ if alldump {
+ p.addDump(valString)
+ }
+ passes[i] = p
+ matchedOne = true
+ } else if p.name == phase || p.name == underphase || re != nil && re.MatchString(p.name) {
+ switch flag {
+ case "on":
+ p.disabled = val == 0
+ case "off":
+ p.disabled = val != 0
+ case "time":
+ p.time = val != 0
+ case "mem":
+ p.mem = val != 0
+ case "debug":
+ p.debug = val
+ case "stats":
+ p.stats = val
+ case "test":
+ p.test = val
+ case "dump":
+ p.addDump(valString)
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
+ }
+ if p.disabled && p.required {
+ return fmt.Sprintf("Cannot disable required SSA phase %s using -d=ssa/%s debug option", phase, phase)
+ }
+ passes[i] = p
+ matchedOne = true
+ }
+ }
+ if matchedOne {
+ return ""
+ }
+ return fmt.Sprintf("Did not find a phase matching %s in -d=ssa/... debug option", phase)
+}
+
+// list of passes for the compiler
+var passes = [...]pass{
+ // TODO: combine phielim and copyelim into a single pass?
+ {name: "number lines", fn: numberLines, required: true},
+ {name: "early phielim", fn: phielim},
+ {name: "early copyelim", fn: copyelim},
+ {name: "early deadcode", fn: deadcode}, // remove generated dead code to avoid doing pointless work during opt
+ {name: "short circuit", fn: shortcircuit},
+ {name: "decompose args", fn: decomposeArgs, required: !go116lateCallExpansion, disabled: go116lateCallExpansion}, // handled by late call lowering
+ {name: "decompose user", fn: decomposeUser, required: true},
+ {name: "pre-opt deadcode", fn: deadcode},
+ {name: "opt", fn: opt, required: true}, // NB: some generic rules know the name of the opt pass. TODO: split required rules and optimizing rules
+ {name: "zero arg cse", fn: zcse, required: true}, // required to merge OpSB values
+ {name: "opt deadcode", fn: deadcode, required: true}, // remove any blocks orphaned during opt
+ {name: "generic cse", fn: cse},
+ {name: "phiopt", fn: phiopt},
+ {name: "gcse deadcode", fn: deadcode, required: true}, // clean out after cse and phiopt
+ {name: "nilcheckelim", fn: nilcheckelim},
+ {name: "prove", fn: prove},
+ {name: "early fuse", fn: fuseEarly},
+ {name: "decompose builtin", fn: decomposeBuiltIn, required: true},
+ {name: "expand calls", fn: expandCalls, required: true},
+ {name: "softfloat", fn: softfloat, required: true},
+ {name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
+ {name: "dead auto elim", fn: elimDeadAutosGeneric},
+ {name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain
+ {name: "check bce", fn: checkbce},
+ {name: "branchelim", fn: branchelim},
+ {name: "late fuse", fn: fuseLate},
+ {name: "dse", fn: dse},
+ {name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops
+ {name: "insert resched checks", fn: insertLoopReschedChecks,
+ disabled: objabi.Preemptibleloops_enabled == 0}, // insert resched checks in loops.
+ {name: "lower", fn: lower, required: true},
+ {name: "addressing modes", fn: addressingModes, required: false},
+ {name: "lowered deadcode for cse", fn: deadcode}, // deadcode immediately before CSE avoids CSE making dead values live again
+ {name: "lowered cse", fn: cse},
+ {name: "elim unread autos", fn: elimUnreadAutos},
+ {name: "tighten tuple selectors", fn: tightenTupleSelectors, required: true},
+ {name: "lowered deadcode", fn: deadcode, required: true},
+ {name: "checkLower", fn: checkLower, required: true},
+ {name: "late phielim", fn: phielim},
+ {name: "late copyelim", fn: copyelim},
+ {name: "tighten", fn: tighten}, // move values closer to their uses
+ {name: "late deadcode", fn: deadcode},
+ {name: "critical", fn: critical, required: true}, // remove critical edges
+ {name: "phi tighten", fn: phiTighten}, // place rematerializable phi args near uses to reduce value lifetimes
+ {name: "likelyadjust", fn: likelyadjust},
+ {name: "layout", fn: layout, required: true}, // schedule blocks
+ {name: "schedule", fn: schedule, required: true}, // schedule values
+ {name: "late nilcheck", fn: nilcheckelim2},
+ {name: "flagalloc", fn: flagalloc, required: true}, // allocate flags register
+ {name: "regalloc", fn: regalloc, required: true}, // allocate int & float registers + stack slots
+ {name: "loop rotate", fn: loopRotate},
+ {name: "stackframe", fn: stackframe, required: true},
+ {name: "trim", fn: trim}, // remove empty blocks
+}
+
+// Double-check phase ordering constraints.
+// This code is intended to document the ordering requirements
+// between different phases. It does not override the passes
+// list above.
+type constraint struct {
+ a, b string // a must come before b
+}
+
+var passOrder = [...]constraint{
+ // "insert resched checks" uses mem, better to clean out stores first.
+ {"dse", "insert resched checks"},
+ // insert resched checks adds new blocks containing generic instructions
+ {"insert resched checks", "lower"},
+ {"insert resched checks", "tighten"},
+
+ // prove relies on common-subexpression elimination for maximum benefits.
+ {"generic cse", "prove"},
+ // deadcode after prove to eliminate all new dead blocks.
+ {"prove", "generic deadcode"},
+ // common-subexpression before dead-store elim, so that we recognize
+ // when two address expressions are the same.
+ {"generic cse", "dse"},
+ // cse substantially improves nilcheckelim efficacy
+ {"generic cse", "nilcheckelim"},
+ // allow deadcode to clean up after nilcheckelim
+ {"nilcheckelim", "generic deadcode"},
+ // nilcheckelim generates sequences of plain basic blocks
+ {"nilcheckelim", "late fuse"},
+ // nilcheckelim relies on opt to rewrite user nil checks
+ {"opt", "nilcheckelim"},
+ // tighten will be most effective when as many values have been removed as possible
+ {"generic deadcode", "tighten"},
+ {"generic cse", "tighten"},
+ // checkbce needs the values removed
+ {"generic deadcode", "check bce"},
+ // don't run optimization pass until we've decomposed builtin objects
+ {"decompose builtin", "late opt"},
+ // decompose builtin is the last pass that may introduce new float ops, so run softfloat after it
+ {"decompose builtin", "softfloat"},
+ // tuple selectors must be tightened to generators and de-duplicated before scheduling
+ {"tighten tuple selectors", "schedule"},
+ // remove critical edges before phi tighten, so that phi args get better placement
+ {"critical", "phi tighten"},
+ // don't layout blocks until critical edges have been removed
+ {"critical", "layout"},
+ // regalloc requires the removal of all critical edges
+ {"critical", "regalloc"},
+ // regalloc requires all the values in a block to be scheduled
+ {"schedule", "regalloc"},
+ // checkLower must run after lowering & subsequent dead code elim
+ {"lower", "checkLower"},
+ {"lowered deadcode", "checkLower"},
+ // late nilcheck needs instructions to be scheduled.
+ {"schedule", "late nilcheck"},
+ // flagalloc needs instructions to be scheduled.
+ {"schedule", "flagalloc"},
+ // regalloc needs flags to be allocated first.
+ {"flagalloc", "regalloc"},
+ // loopRotate will confuse regalloc.
+ {"regalloc", "loop rotate"},
+ // stackframe needs to know about spilled registers.
+ {"regalloc", "stackframe"},
+ // trim needs regalloc to be done first.
+ {"regalloc", "trim"},
+}
+
+func init() {
+ for _, c := range passOrder {
+ a, b := c.a, c.b
+ i := -1
+ j := -1
+ for k, p := range passes {
+ if p.name == a {
+ i = k
+ }
+ if p.name == b {
+ j = k
+ }
+ }
+ if i < 0 {
+ log.Panicf("pass %s not found", a)
+ }
+ if j < 0 {
+ log.Panicf("pass %s not found", b)
+ }
+ if i >= j {
+ log.Panicf("passes %s and %s out of order", a, b)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
new file mode 100644
index 0000000..0fe0337
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -0,0 +1,390 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// A Config holds readonly compilation information.
+// It is created once, early during compilation,
+// and shared across all compilations.
+type Config struct {
+ arch string // "amd64", etc.
+ PtrSize int64 // 4 or 8; copy of cmd/internal/sys.Arch.PtrSize
+ RegSize int64 // 4 or 8; copy of cmd/internal/sys.Arch.RegSize
+ Types Types
+ lowerBlock blockRewriter // lowering function
+ lowerValue valueRewriter // lowering function
+ splitLoad valueRewriter // function for splitting merged load ops; only used on some architectures
+ registers []Register // machine registers
+ gpRegMask regMask // general purpose integer register mask
+ fpRegMask regMask // floating point register mask
+ fp32RegMask regMask // floating point register mask
+ fp64RegMask regMask // floating point register mask
+ specialRegMask regMask // special register mask
+ GCRegMap []*Register // garbage collector register map, by GC register index
+ FPReg int8 // register number of frame pointer, -1 if not used
+ LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used
+ hasGReg bool // has hardware g register
+ ctxt *obj.Link // Generic arch information
+ optimize bool // Do optimization
+ noDuffDevice bool // Don't use Duff's device
+ useSSE bool // Use SSE for non-float operations
+ useAvg bool // Use optimizations that need Avg* operations
+ useHmul bool // Use optimizations that need Hmul* operations
+ SoftFloat bool //
+ Race bool // race detector enabled
+ NeedsFpScratch bool // No direct move between GP and FP register sets
+ BigEndian bool //
+ UseFMA bool // Use hardware FMA operation
+}
+
+type (
+ blockRewriter func(*Block) bool
+ valueRewriter func(*Value) bool
+)
+
+type Types struct {
+ Bool *types.Type
+ Int8 *types.Type
+ Int16 *types.Type
+ Int32 *types.Type
+ Int64 *types.Type
+ UInt8 *types.Type
+ UInt16 *types.Type
+ UInt32 *types.Type
+ UInt64 *types.Type
+ Int *types.Type
+ Float32 *types.Type
+ Float64 *types.Type
+ UInt *types.Type
+ Uintptr *types.Type
+ String *types.Type
+ BytePtr *types.Type // TODO: use unsafe.Pointer instead?
+ Int32Ptr *types.Type
+ UInt32Ptr *types.Type
+ IntPtr *types.Type
+ UintptrPtr *types.Type
+ Float32Ptr *types.Type
+ Float64Ptr *types.Type
+ BytePtrPtr *types.Type
+}
+
+// NewTypes creates and populates a Types.
+func NewTypes() *Types {
+ t := new(Types)
+ t.SetTypPtrs()
+ return t
+}
+
+// SetTypPtrs populates t.
+func (t *Types) SetTypPtrs() {
+ t.Bool = types.Types[types.TBOOL]
+ t.Int8 = types.Types[types.TINT8]
+ t.Int16 = types.Types[types.TINT16]
+ t.Int32 = types.Types[types.TINT32]
+ t.Int64 = types.Types[types.TINT64]
+ t.UInt8 = types.Types[types.TUINT8]
+ t.UInt16 = types.Types[types.TUINT16]
+ t.UInt32 = types.Types[types.TUINT32]
+ t.UInt64 = types.Types[types.TUINT64]
+ t.Int = types.Types[types.TINT]
+ t.Float32 = types.Types[types.TFLOAT32]
+ t.Float64 = types.Types[types.TFLOAT64]
+ t.UInt = types.Types[types.TUINT]
+ t.Uintptr = types.Types[types.TUINTPTR]
+ t.String = types.Types[types.TSTRING]
+ t.BytePtr = types.NewPtr(types.Types[types.TUINT8])
+ t.Int32Ptr = types.NewPtr(types.Types[types.TINT32])
+ t.UInt32Ptr = types.NewPtr(types.Types[types.TUINT32])
+ t.IntPtr = types.NewPtr(types.Types[types.TINT])
+ t.UintptrPtr = types.NewPtr(types.Types[types.TUINTPTR])
+ t.Float32Ptr = types.NewPtr(types.Types[types.TFLOAT32])
+ t.Float64Ptr = types.NewPtr(types.Types[types.TFLOAT64])
+ t.BytePtrPtr = types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))
+}
+
+type Logger interface {
+ // Logf logs a message from the compiler.
+ Logf(string, ...interface{})
+
+ // Log reports whether logging is not a no-op
+ // some logging calls account for more than a few heap allocations.
+ Log() bool
+
+ // Fatal reports a compiler error and exits.
+ Fatalf(pos src.XPos, msg string, args ...interface{})
+
+ // Warnl writes compiler messages in the form expected by "errorcheck" tests
+ Warnl(pos src.XPos, fmt_ string, args ...interface{})
+
+ // Forwards the Debug flags from gc
+ Debug_checknil() bool
+}
+
+type Frontend interface {
+ CanSSA(t *types.Type) bool
+
+ Logger
+
+ // StringData returns a symbol pointing to the given string's contents.
+ StringData(string) *obj.LSym
+
+ // Auto returns a Node for an auto variable of the given type.
+ // The SSA compiler uses this function to allocate space for spills.
+ Auto(src.XPos, *types.Type) GCNode
+
+ // Given the name for a compound type, returns the name we should use
+ // for the parts of that compound type.
+ SplitString(LocalSlot) (LocalSlot, LocalSlot)
+ SplitInterface(LocalSlot) (LocalSlot, LocalSlot)
+ SplitSlice(LocalSlot) (LocalSlot, LocalSlot, LocalSlot)
+ SplitComplex(LocalSlot) (LocalSlot, LocalSlot)
+ SplitStruct(LocalSlot, int) LocalSlot
+ SplitArray(LocalSlot) LocalSlot // array must be length 1
+ SplitInt64(LocalSlot) (LocalSlot, LocalSlot) // returns (hi, lo)
+ SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot
+
+ // DerefItab dereferences an itab function
+ // entry, given the symbol of the itab and
+ // the byte offset of the function pointer.
+ // It may return nil.
+ DerefItab(sym *obj.LSym, offset int64) *obj.LSym
+
+ // Line returns a string describing the given position.
+ Line(src.XPos) string
+
+ // AllocFrame assigns frame offsets to all live auto variables.
+ AllocFrame(f *Func)
+
+ // Syslook returns a symbol of the runtime function/variable with the
+ // given name.
+ Syslook(string) *obj.LSym
+
+ // UseWriteBarrier reports whether write barrier is enabled
+ UseWriteBarrier() bool
+
+ // SetWBPos indicates that a write barrier has been inserted
+ // in this function at position pos.
+ SetWBPos(pos src.XPos)
+
+ // MyImportPath provides the import name (roughly, the package) for the function being compiled.
+ MyImportPath() string
+}
+
+// interface used to hold a *gc.Node (a stack variable).
+// We'd use *gc.Node directly but that would lead to an import cycle.
+type GCNode interface {
+ Typ() *types.Type
+ String() string
+ IsSynthetic() bool
+ IsAutoTmp() bool
+ StorageClass() StorageClass
+}
+
+type StorageClass uint8
+
+const (
+ ClassAuto StorageClass = iota // local stack variable
+ ClassParam // argument
+ ClassParamOut // return value
+)
+
+const go116lateCallExpansion = true
+
+// LateCallExpansionEnabledWithin returns true if late call expansion should be tested
+// within compilation of a function/method.
+func LateCallExpansionEnabledWithin(f *Func) bool {
+ return go116lateCallExpansion
+}
+
+// NewConfig returns a new configuration object for the given architecture.
+func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config {
+ c := &Config{arch: arch, Types: types}
+ c.useAvg = true
+ c.useHmul = true
+ switch arch {
+ case "amd64":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockAMD64
+ c.lowerValue = rewriteValueAMD64
+ c.splitLoad = rewriteValueAMD64splitload
+ c.registers = registersAMD64[:]
+ c.gpRegMask = gpRegMaskAMD64
+ c.fpRegMask = fpRegMaskAMD64
+ c.FPReg = framepointerRegAMD64
+ c.LinkReg = linkRegAMD64
+ c.hasGReg = false
+ case "386":
+ c.PtrSize = 4
+ c.RegSize = 4
+ c.lowerBlock = rewriteBlock386
+ c.lowerValue = rewriteValue386
+ c.splitLoad = rewriteValue386splitload
+ c.registers = registers386[:]
+ c.gpRegMask = gpRegMask386
+ c.fpRegMask = fpRegMask386
+ c.FPReg = framepointerReg386
+ c.LinkReg = linkReg386
+ c.hasGReg = false
+ case "arm":
+ c.PtrSize = 4
+ c.RegSize = 4
+ c.lowerBlock = rewriteBlockARM
+ c.lowerValue = rewriteValueARM
+ c.registers = registersARM[:]
+ c.gpRegMask = gpRegMaskARM
+ c.fpRegMask = fpRegMaskARM
+ c.FPReg = framepointerRegARM
+ c.LinkReg = linkRegARM
+ c.hasGReg = true
+ case "arm64":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockARM64
+ c.lowerValue = rewriteValueARM64
+ c.registers = registersARM64[:]
+ c.gpRegMask = gpRegMaskARM64
+ c.fpRegMask = fpRegMaskARM64
+ c.FPReg = framepointerRegARM64
+ c.LinkReg = linkRegARM64
+ c.hasGReg = true
+ c.noDuffDevice = objabi.GOOS == "darwin" || objabi.GOOS == "ios" // darwin linker cannot handle BR26 reloc with non-zero addend
+ case "ppc64":
+ c.BigEndian = true
+ fallthrough
+ case "ppc64le":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockPPC64
+ c.lowerValue = rewriteValuePPC64
+ c.registers = registersPPC64[:]
+ c.gpRegMask = gpRegMaskPPC64
+ c.fpRegMask = fpRegMaskPPC64
+ c.FPReg = framepointerRegPPC64
+ c.LinkReg = linkRegPPC64
+ c.noDuffDevice = true // TODO: Resolve PPC64 DuffDevice (has zero, but not copy)
+ c.hasGReg = true
+ case "mips64":
+ c.BigEndian = true
+ fallthrough
+ case "mips64le":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockMIPS64
+ c.lowerValue = rewriteValueMIPS64
+ c.registers = registersMIPS64[:]
+ c.gpRegMask = gpRegMaskMIPS64
+ c.fpRegMask = fpRegMaskMIPS64
+ c.specialRegMask = specialRegMaskMIPS64
+ c.FPReg = framepointerRegMIPS64
+ c.LinkReg = linkRegMIPS64
+ c.hasGReg = true
+ case "s390x":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockS390X
+ c.lowerValue = rewriteValueS390X
+ c.registers = registersS390X[:]
+ c.gpRegMask = gpRegMaskS390X
+ c.fpRegMask = fpRegMaskS390X
+ c.FPReg = framepointerRegS390X
+ c.LinkReg = linkRegS390X
+ c.hasGReg = true
+ c.noDuffDevice = true
+ c.BigEndian = true
+ case "mips":
+ c.BigEndian = true
+ fallthrough
+ case "mipsle":
+ c.PtrSize = 4
+ c.RegSize = 4
+ c.lowerBlock = rewriteBlockMIPS
+ c.lowerValue = rewriteValueMIPS
+ c.registers = registersMIPS[:]
+ c.gpRegMask = gpRegMaskMIPS
+ c.fpRegMask = fpRegMaskMIPS
+ c.specialRegMask = specialRegMaskMIPS
+ c.FPReg = framepointerRegMIPS
+ c.LinkReg = linkRegMIPS
+ c.hasGReg = true
+ c.noDuffDevice = true
+ case "riscv64":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockRISCV64
+ c.lowerValue = rewriteValueRISCV64
+ c.registers = registersRISCV64[:]
+ c.gpRegMask = gpRegMaskRISCV64
+ c.fpRegMask = fpRegMaskRISCV64
+ c.FPReg = framepointerRegRISCV64
+ c.hasGReg = true
+ case "wasm":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockWasm
+ c.lowerValue = rewriteValueWasm
+ c.registers = registersWasm[:]
+ c.gpRegMask = gpRegMaskWasm
+ c.fpRegMask = fpRegMaskWasm
+ c.fp32RegMask = fp32RegMaskWasm
+ c.fp64RegMask = fp64RegMaskWasm
+ c.FPReg = framepointerRegWasm
+ c.LinkReg = linkRegWasm
+ c.hasGReg = true
+ c.noDuffDevice = true
+ c.useAvg = false
+ c.useHmul = false
+ default:
+ ctxt.Diag("arch %s not implemented", arch)
+ }
+ c.ctxt = ctxt
+ c.optimize = optimize
+ c.useSSE = true
+ c.UseFMA = true
+
+ // On Plan 9, floating point operations are not allowed in note handler.
+ if objabi.GOOS == "plan9" {
+ // Don't use FMA on Plan 9
+ c.UseFMA = false
+
+ // Don't use Duff's device and SSE on Plan 9 AMD64.
+ if arch == "amd64" {
+ c.noDuffDevice = true
+ c.useSSE = false
+ }
+ }
+
+ if ctxt.Flag_shared {
+ // LoweredWB is secretly a CALL and CALLs on 386 in
+ // shared mode get rewritten by obj6.go to go through
+ // the GOT, which clobbers BX.
+ opcodeTable[Op386LoweredWB].reg.clobbers |= 1 << 3 // BX
+ }
+
+ // Create the GC register map index.
+ // TODO: This is only used for debug printing. Maybe export config.registers?
+ gcRegMapSize := int16(0)
+ for _, r := range c.registers {
+ if r.gcNum+1 > gcRegMapSize {
+ gcRegMapSize = r.gcNum + 1
+ }
+ }
+ c.GCRegMap = make([]*Register, gcRegMapSize)
+ for i, r := range c.registers {
+ if r.gcNum != -1 {
+ c.GCRegMap[r.gcNum] = &c.registers[i]
+ }
+ }
+
+ return c
+}
+
+func (c *Config) Ctxt() *obj.Link { return c.ctxt }
diff --git a/src/cmd/compile/internal/ssa/copyelim.go b/src/cmd/compile/internal/ssa/copyelim.go
new file mode 100644
index 0000000..5954d3b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/copyelim.go
@@ -0,0 +1,84 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// copyelim removes all uses of OpCopy values from f.
+// A subsequent deadcode pass is needed to actually remove the copies.
+func copyelim(f *Func) {
+ // Modify all values so no arg (including args
+ // of OpCopy) is a copy.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ copyelimValue(v)
+ }
+ }
+
+ // Update block control values.
+ for _, b := range f.Blocks {
+ for i, v := range b.ControlValues() {
+ if v.Op == OpCopy {
+ b.ReplaceControl(i, v.Args[0])
+ }
+ }
+ }
+
+ // Update named values.
+ for _, name := range f.Names {
+ values := f.NamedValues[name]
+ for i, v := range values {
+ if v.Op == OpCopy {
+ values[i] = v.Args[0]
+ }
+ }
+ }
+}
+
+// copySource returns the (non-copy) op which is the
+// ultimate source of v. v must be a copy op.
+func copySource(v *Value) *Value {
+ w := v.Args[0]
+
+ // This loop is just:
+ // for w.Op == OpCopy {
+ // w = w.Args[0]
+ // }
+ // but we take some extra care to make sure we
+ // don't get stuck in an infinite loop.
+ // Infinite copy loops may happen in unreachable code.
+ // (TODO: or can they? Needs a test.)
+ slow := w
+ var advance bool
+ for w.Op == OpCopy {
+ w = w.Args[0]
+ if w == slow {
+ w.reset(OpUnknown)
+ break
+ }
+ if advance {
+ slow = slow.Args[0]
+ }
+ advance = !advance
+ }
+
+ // The answer is w. Update all the copies we saw
+ // to point directly to w. Doing this update makes
+ // sure that we don't end up doing O(n^2) work
+ // for a chain of n copies.
+ for v != w {
+ x := v.Args[0]
+ v.SetArg(0, w)
+ v = x
+ }
+ return w
+}
+
+// copyelimValue ensures that no args of v are copies.
+func copyelimValue(v *Value) {
+ for i, a := range v.Args {
+ if a.Op == OpCopy {
+ v.SetArg(i, copySource(a))
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/copyelim_test.go b/src/cmd/compile/internal/ssa/copyelim_test.go
new file mode 100644
index 0000000..fe31b12
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/copyelim_test.go
@@ -0,0 +1,41 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+ "testing"
+)
+
+func BenchmarkCopyElim1(b *testing.B) { benchmarkCopyElim(b, 1) }
+func BenchmarkCopyElim10(b *testing.B) { benchmarkCopyElim(b, 10) }
+func BenchmarkCopyElim100(b *testing.B) { benchmarkCopyElim(b, 100) }
+func BenchmarkCopyElim1000(b *testing.B) { benchmarkCopyElim(b, 1000) }
+func BenchmarkCopyElim10000(b *testing.B) { benchmarkCopyElim(b, 10000) }
+func BenchmarkCopyElim100000(b *testing.B) { benchmarkCopyElim(b, 100000) }
+
+func benchmarkCopyElim(b *testing.B, n int) {
+ c := testConfig(b)
+
+ values := make([]interface{}, 0, n+2)
+ values = append(values, Valu("mem", OpInitMem, types.TypeMem, 0, nil))
+ last := "mem"
+ for i := 0; i < n; i++ {
+ name := fmt.Sprintf("copy%d", i)
+ values = append(values, Valu(name, OpCopy, types.TypeMem, 0, nil, last))
+ last = name
+ }
+ values = append(values, Exit(last))
+ // Reverse values array to make it hard
+ for i := 0; i < len(values)/2; i++ {
+ values[i], values[len(values)-1-i] = values[len(values)-1-i], values[i]
+ }
+
+ for i := 0; i < b.N; i++ {
+ fun := c.Fun("entry", Bloc("entry", values...))
+ Copyelim(fun.f)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/critical.go b/src/cmd/compile/internal/ssa/critical.go
new file mode 100644
index 0000000..b85721e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/critical.go
@@ -0,0 +1,116 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// critical splits critical edges (those that go from a block with
+// more than one outedge to a block with more than one inedge).
+// Regalloc wants a critical-edge-free CFG so it can implement phi values.
+func critical(f *Func) {
+ // maps from phi arg ID to the new block created for that argument
+ blocks := make([]*Block, f.NumValues())
+ // need to iterate over f.Blocks without range, as we might
+ // need to split critical edges on newly constructed blocks
+ for j := 0; j < len(f.Blocks); j++ {
+ b := f.Blocks[j]
+ if len(b.Preds) <= 1 {
+ continue
+ }
+
+ var phi *Value
+ // determine if we've only got a single phi in this
+ // block, this is easier to handle than the general
+ // case of a block with multiple phi values.
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ if phi != nil {
+ phi = nil
+ break
+ }
+ phi = v
+ }
+ }
+
+ // reset our block map
+ if phi != nil {
+ for _, v := range phi.Args {
+ blocks[v.ID] = nil
+ }
+ }
+
+ // split input edges coming from multi-output blocks.
+ for i := 0; i < len(b.Preds); {
+ e := b.Preds[i]
+ p := e.b
+ pi := e.i
+ if p.Kind == BlockPlain {
+ i++
+ continue // only single output block
+ }
+
+ var d *Block // new block used to remove critical edge
+ reusedBlock := false // if true, then this is not the first use of this block
+ if phi != nil {
+ argID := phi.Args[i].ID
+ // find or record the block that we used to split
+ // critical edges for this argument
+ if d = blocks[argID]; d == nil {
+ // splitting doesn't necessarily remove the critical edge,
+ // since we're iterating over len(f.Blocks) above, this forces
+ // the new blocks to be re-examined.
+ d = f.NewBlock(BlockPlain)
+ d.Pos = p.Pos
+ blocks[argID] = d
+ if f.pass.debug > 0 {
+ f.Warnl(p.Pos, "split critical edge")
+ }
+ } else {
+ reusedBlock = true
+ }
+ } else {
+ // no existing block, so allocate a new block
+ // to place on the edge
+ d = f.NewBlock(BlockPlain)
+ d.Pos = p.Pos
+ if f.pass.debug > 0 {
+ f.Warnl(p.Pos, "split critical edge")
+ }
+ }
+
+ // if this not the first argument for the
+ // block, then we need to remove the
+ // corresponding elements from the block
+ // predecessors and phi args
+ if reusedBlock {
+ // Add p->d edge
+ p.Succs[pi] = Edge{d, len(d.Preds)}
+ d.Preds = append(d.Preds, Edge{p, pi})
+
+ // Remove p as a predecessor from b.
+ b.removePred(i)
+
+ // Update corresponding phi args
+ n := len(b.Preds)
+ phi.Args[i].Uses--
+ phi.Args[i] = phi.Args[n]
+ phi.Args[n] = nil
+ phi.Args = phi.Args[:n]
+ // splitting occasionally leads to a phi having
+ // a single argument (occurs with -N)
+ if n == 1 {
+ phi.Op = OpCopy
+ }
+ // Don't increment i in this case because we moved
+ // an unprocessed predecessor down into slot i.
+ } else {
+ // splice it in
+ p.Succs[pi] = Edge{d, 0}
+ b.Preds[i] = Edge{d, 0}
+ d.Preds = append(d.Preds, Edge{p, pi})
+ d.Succs = append(d.Succs, Edge{b, i})
+ i++
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go
new file mode 100644
index 0000000..3b4f2be
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/cse.go
@@ -0,0 +1,373 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "sort"
+)
+
+// cse does common-subexpression elimination on the Function.
+// Values are just relinked, nothing is deleted. A subsequent deadcode
+// pass is required to actually remove duplicate expressions.
+func cse(f *Func) {
+ // Two values are equivalent if they satisfy the following definition:
+ // equivalent(v, w):
+ // v.op == w.op
+ // v.type == w.type
+ // v.aux == w.aux
+ // v.auxint == w.auxint
+ // len(v.args) == len(w.args)
+ // v.block == w.block if v.op == OpPhi
+ // equivalent(v.args[i], w.args[i]) for i in 0..len(v.args)-1
+
+ // The algorithm searches for a partition of f's values into
+ // equivalence classes using the above definition.
+ // It starts with a coarse partition and iteratively refines it
+ // until it reaches a fixed point.
+
+ // Make initial coarse partitions by using a subset of the conditions above.
+ a := make([]*Value, 0, f.NumValues())
+ if f.auxmap == nil {
+ f.auxmap = auxmap{}
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Type.IsMemory() {
+ continue // memory values can never cse
+ }
+ if f.auxmap[v.Aux] == 0 {
+ f.auxmap[v.Aux] = int32(len(f.auxmap)) + 1
+ }
+ a = append(a, v)
+ }
+ }
+ partition := partitionValues(a, f.auxmap)
+
+ // map from value id back to eqclass id
+ valueEqClass := make([]ID, f.NumValues())
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ // Use negative equivalence class #s for unique values.
+ valueEqClass[v.ID] = -v.ID
+ }
+ }
+ var pNum ID = 1
+ for _, e := range partition {
+ if f.pass.debug > 1 && len(e) > 500 {
+ fmt.Printf("CSE.large partition (%d): ", len(e))
+ for j := 0; j < 3; j++ {
+ fmt.Printf("%s ", e[j].LongString())
+ }
+ fmt.Println()
+ }
+
+ for _, v := range e {
+ valueEqClass[v.ID] = pNum
+ }
+ if f.pass.debug > 2 && len(e) > 1 {
+ fmt.Printf("CSE.partition #%d:", pNum)
+ for _, v := range e {
+ fmt.Printf(" %s", v.String())
+ }
+ fmt.Printf("\n")
+ }
+ pNum++
+ }
+
+ // Split equivalence classes at points where they have
+ // non-equivalent arguments. Repeat until we can't find any
+ // more splits.
+ var splitPoints []int
+ byArgClass := new(partitionByArgClass) // reuseable partitionByArgClass to reduce allocations
+ for {
+ changed := false
+
+ // partition can grow in the loop. By not using a range loop here,
+ // we process new additions as they arrive, avoiding O(n^2) behavior.
+ for i := 0; i < len(partition); i++ {
+ e := partition[i]
+
+ if opcodeTable[e[0].Op].commutative {
+ // Order the first two args before comparison.
+ for _, v := range e {
+ if valueEqClass[v.Args[0].ID] > valueEqClass[v.Args[1].ID] {
+ v.Args[0], v.Args[1] = v.Args[1], v.Args[0]
+ }
+ }
+ }
+
+ // Sort by eq class of arguments.
+ byArgClass.a = e
+ byArgClass.eqClass = valueEqClass
+ sort.Sort(byArgClass)
+
+ // Find split points.
+ splitPoints = append(splitPoints[:0], 0)
+ for j := 1; j < len(e); j++ {
+ v, w := e[j-1], e[j]
+ // Note: commutative args already correctly ordered by byArgClass.
+ eqArgs := true
+ for k, a := range v.Args {
+ b := w.Args[k]
+ if valueEqClass[a.ID] != valueEqClass[b.ID] {
+ eqArgs = false
+ break
+ }
+ }
+ if !eqArgs {
+ splitPoints = append(splitPoints, j)
+ }
+ }
+ if len(splitPoints) == 1 {
+ continue // no splits, leave equivalence class alone.
+ }
+
+ // Move another equivalence class down in place of e.
+ partition[i] = partition[len(partition)-1]
+ partition = partition[:len(partition)-1]
+ i--
+
+ // Add new equivalence classes for the parts of e we found.
+ splitPoints = append(splitPoints, len(e))
+ for j := 0; j < len(splitPoints)-1; j++ {
+ f := e[splitPoints[j]:splitPoints[j+1]]
+ if len(f) == 1 {
+ // Don't add singletons.
+ valueEqClass[f[0].ID] = -f[0].ID
+ continue
+ }
+ for _, v := range f {
+ valueEqClass[v.ID] = pNum
+ }
+ pNum++
+ partition = append(partition, f)
+ }
+ changed = true
+ }
+
+ if !changed {
+ break
+ }
+ }
+
+ sdom := f.Sdom()
+
+ // Compute substitutions we would like to do. We substitute v for w
+ // if v and w are in the same equivalence class and v dominates w.
+ rewrite := make([]*Value, f.NumValues())
+ byDom := new(partitionByDom) // reusable partitionByDom to reduce allocs
+ for _, e := range partition {
+ byDom.a = e
+ byDom.sdom = sdom
+ sort.Sort(byDom)
+ for i := 0; i < len(e)-1; i++ {
+ // e is sorted by domorder, so a maximal dominant element is first in the slice
+ v := e[i]
+ if v == nil {
+ continue
+ }
+
+ e[i] = nil
+ // Replace all elements of e which v dominates
+ for j := i + 1; j < len(e); j++ {
+ w := e[j]
+ if w == nil {
+ continue
+ }
+ if sdom.IsAncestorEq(v.Block, w.Block) {
+ rewrite[w.ID] = v
+ e[j] = nil
+ } else {
+ // e is sorted by domorder, so v.Block doesn't dominate any subsequent blocks in e
+ break
+ }
+ }
+ }
+ }
+
+ rewrites := int64(0)
+
+ // Apply substitutions
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, w := range v.Args {
+ if x := rewrite[w.ID]; x != nil {
+ if w.Pos.IsStmt() == src.PosIsStmt {
+ // about to lose a statement marker, w
+ // w is an input to v; if they're in the same block
+ // and the same line, v is a good-enough new statement boundary.
+ if w.Block == v.Block && w.Pos.Line() == v.Pos.Line() {
+ v.Pos = v.Pos.WithIsStmt()
+ w.Pos = w.Pos.WithNotStmt()
+ } // TODO and if this fails?
+ }
+ v.SetArg(i, x)
+ rewrites++
+ }
+ }
+ }
+ for i, v := range b.ControlValues() {
+ if x := rewrite[v.ID]; x != nil {
+ if v.Op == OpNilCheck {
+ // nilcheck pass will remove the nil checks and log
+ // them appropriately, so don't mess with them here.
+ continue
+ }
+ b.ReplaceControl(i, x)
+ }
+ }
+ }
+
+ if f.pass.stats > 0 {
+ f.LogStat("CSE REWRITES", rewrites)
+ }
+}
+
+// An eqclass approximates an equivalence class. During the
+// algorithm it may represent the union of several of the
+// final equivalence classes.
+type eqclass []*Value
+
+// partitionValues partitions the values into equivalence classes
+// based on having all the following features match:
+// - opcode
+// - type
+// - auxint
+// - aux
+// - nargs
+// - block # if a phi op
+// - first two arg's opcodes and auxint
+// - NOT first two arg's aux; that can break CSE.
+// partitionValues returns a list of equivalence classes, each
+// being a sorted by ID list of *Values. The eqclass slices are
+// backed by the same storage as the input slice.
+// Equivalence classes of size 1 are ignored.
+func partitionValues(a []*Value, auxIDs auxmap) []eqclass {
+ sort.Sort(sortvalues{a, auxIDs})
+
+ var partition []eqclass
+ for len(a) > 0 {
+ v := a[0]
+ j := 1
+ for ; j < len(a); j++ {
+ w := a[j]
+ if cmpVal(v, w, auxIDs) != types.CMPeq {
+ break
+ }
+ }
+ if j > 1 {
+ partition = append(partition, a[:j])
+ }
+ a = a[j:]
+ }
+
+ return partition
+}
+func lt2Cmp(isLt bool) types.Cmp {
+ if isLt {
+ return types.CMPlt
+ }
+ return types.CMPgt
+}
+
+type auxmap map[interface{}]int32
+
+func cmpVal(v, w *Value, auxIDs auxmap) types.Cmp {
+ // Try to order these comparison by cost (cheaper first)
+ if v.Op != w.Op {
+ return lt2Cmp(v.Op < w.Op)
+ }
+ if v.AuxInt != w.AuxInt {
+ return lt2Cmp(v.AuxInt < w.AuxInt)
+ }
+ if len(v.Args) != len(w.Args) {
+ return lt2Cmp(len(v.Args) < len(w.Args))
+ }
+ if v.Op == OpPhi && v.Block != w.Block {
+ return lt2Cmp(v.Block.ID < w.Block.ID)
+ }
+ if v.Type.IsMemory() {
+ // We will never be able to CSE two values
+ // that generate memory.
+ return lt2Cmp(v.ID < w.ID)
+ }
+ // OpSelect is a pseudo-op. We need to be more aggressive
+ // regarding CSE to keep multiple OpSelect's of the same
+ // argument from existing.
+ if v.Op != OpSelect0 && v.Op != OpSelect1 {
+ if tc := v.Type.Compare(w.Type); tc != types.CMPeq {
+ return tc
+ }
+ }
+
+ if v.Aux != w.Aux {
+ if v.Aux == nil {
+ return types.CMPlt
+ }
+ if w.Aux == nil {
+ return types.CMPgt
+ }
+ return lt2Cmp(auxIDs[v.Aux] < auxIDs[w.Aux])
+ }
+
+ return types.CMPeq
+}
+
+// Sort values to make the initial partition.
+type sortvalues struct {
+ a []*Value // array of values
+ auxIDs auxmap // aux -> aux ID map
+}
+
+func (sv sortvalues) Len() int { return len(sv.a) }
+func (sv sortvalues) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] }
+func (sv sortvalues) Less(i, j int) bool {
+ v := sv.a[i]
+ w := sv.a[j]
+ if cmp := cmpVal(v, w, sv.auxIDs); cmp != types.CMPeq {
+ return cmp == types.CMPlt
+ }
+
+ // Sort by value ID last to keep the sort result deterministic.
+ return v.ID < w.ID
+}
+
+type partitionByDom struct {
+ a []*Value // array of values
+ sdom SparseTree
+}
+
+func (sv partitionByDom) Len() int { return len(sv.a) }
+func (sv partitionByDom) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] }
+func (sv partitionByDom) Less(i, j int) bool {
+ v := sv.a[i]
+ w := sv.a[j]
+ return sv.sdom.domorder(v.Block) < sv.sdom.domorder(w.Block)
+}
+
+type partitionByArgClass struct {
+ a []*Value // array of values
+ eqClass []ID // equivalence class IDs of values
+}
+
+func (sv partitionByArgClass) Len() int { return len(sv.a) }
+func (sv partitionByArgClass) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] }
+func (sv partitionByArgClass) Less(i, j int) bool {
+ v := sv.a[i]
+ w := sv.a[j]
+ for i, a := range v.Args {
+ b := w.Args[i]
+ if sv.eqClass[a.ID] < sv.eqClass[b.ID] {
+ return true
+ }
+ if sv.eqClass[a.ID] > sv.eqClass[b.ID] {
+ return false
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/cse_test.go b/src/cmd/compile/internal/ssa/cse_test.go
new file mode 100644
index 0000000..9e76645
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/cse_test.go
@@ -0,0 +1,129 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "testing"
+)
+
+type tstAux struct {
+ s string
+}
+
+// This tests for a bug found when partitioning, but not sorting by the Aux value.
+func TestCSEAuxPartitionBug(t *testing.T) {
+ c := testConfig(t)
+ arg1Aux := &tstAux{"arg1-aux"}
+ arg2Aux := &tstAux{"arg2-aux"}
+ arg3Aux := &tstAux{"arg3-aux"}
+ a := c.Frontend().Auto(src.NoXPos, c.config.Types.Int8)
+
+ // construct lots of values with args that have aux values and place
+ // them in an order that triggers the bug
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("r7", OpAdd64, c.config.Types.Int64, 0, nil, "arg3", "arg1"),
+ Valu("r1", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"),
+ Valu("arg1", OpArg, c.config.Types.Int64, 0, arg1Aux),
+ Valu("arg2", OpArg, c.config.Types.Int64, 0, arg2Aux),
+ Valu("arg3", OpArg, c.config.Types.Int64, 0, arg3Aux),
+ Valu("r9", OpAdd64, c.config.Types.Int64, 0, nil, "r7", "r8"),
+ Valu("r4", OpAdd64, c.config.Types.Int64, 0, nil, "r1", "r2"),
+ Valu("r8", OpAdd64, c.config.Types.Int64, 0, nil, "arg3", "arg2"),
+ Valu("r2", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"),
+ Valu("raddr", OpLocalAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sp", "start"),
+ Valu("raddrdef", OpVarDef, types.TypeMem, 0, a, "start"),
+ Valu("r6", OpAdd64, c.config.Types.Int64, 0, nil, "r4", "r5"),
+ Valu("r3", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"),
+ Valu("r5", OpAdd64, c.config.Types.Int64, 0, nil, "r2", "r3"),
+ Valu("r10", OpAdd64, c.config.Types.Int64, 0, nil, "r6", "r9"),
+ Valu("rstore", OpStore, types.TypeMem, 0, c.config.Types.Int64, "raddr", "r10", "raddrdef"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("rstore")))
+
+ CheckFunc(fun.f)
+ cse(fun.f)
+ deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ s1Cnt := 2
+ // r1 == r2 == r3, needs to remove two of this set
+ s2Cnt := 1
+ // r4 == r5, needs to remove one of these
+ for k, v := range fun.values {
+ if v.Op == OpInvalid {
+ switch k {
+ case "r1":
+ fallthrough
+ case "r2":
+ fallthrough
+ case "r3":
+ if s1Cnt == 0 {
+ t.Errorf("cse removed all of r1,r2,r3")
+ }
+ s1Cnt--
+
+ case "r4":
+ fallthrough
+ case "r5":
+ if s2Cnt == 0 {
+ t.Errorf("cse removed all of r4,r5")
+ }
+ s2Cnt--
+ default:
+ t.Errorf("cse removed %s, but shouldn't have", k)
+ }
+ }
+ }
+
+ if s1Cnt != 0 || s2Cnt != 0 {
+ t.Errorf("%d values missed during cse", s1Cnt+s2Cnt)
+ }
+}
+
+// TestZCSE tests the zero arg cse.
+func TestZCSE(t *testing.T) {
+ c := testConfig(t)
+ a := c.Frontend().Auto(src.NoXPos, c.config.Types.Int8)
+
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("sb1", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("sb2", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("addr1", OpAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sb1"),
+ Valu("addr2", OpAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sb2"),
+ Valu("a1ld", OpLoad, c.config.Types.Int64, 0, nil, "addr1", "start"),
+ Valu("a2ld", OpLoad, c.config.Types.Int64, 0, nil, "addr2", "start"),
+ Valu("c1", OpConst64, c.config.Types.Int64, 1, nil),
+ Valu("r1", OpAdd64, c.config.Types.Int64, 0, nil, "a1ld", "c1"),
+ Valu("c2", OpConst64, c.config.Types.Int64, 1, nil),
+ Valu("r2", OpAdd64, c.config.Types.Int64, 0, nil, "a2ld", "c2"),
+ Valu("r3", OpAdd64, c.config.Types.Int64, 0, nil, "r1", "r2"),
+ Valu("raddr", OpLocalAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sp", "start"),
+ Valu("raddrdef", OpVarDef, types.TypeMem, 0, a, "start"),
+ Valu("rstore", OpStore, types.TypeMem, 0, c.config.Types.Int64, "raddr", "r3", "raddrdef"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("rstore")))
+
+ CheckFunc(fun.f)
+ zcse(fun.f)
+ deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if fun.values["c1"].Op != OpInvalid && fun.values["c2"].Op != OpInvalid {
+ t.Errorf("zsce should have removed c1 or c2")
+ }
+ if fun.values["sb1"].Op != OpInvalid && fun.values["sb2"].Op != OpInvalid {
+ t.Errorf("zsce should have removed sb1 or sb2")
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/deadcode.go b/src/cmd/compile/internal/ssa/deadcode.go
new file mode 100644
index 0000000..96b552e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/deadcode.go
@@ -0,0 +1,393 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+)
+
+// findlive returns the reachable blocks and live values in f.
+// The caller should call f.retDeadcodeLive(live) when it is done with it.
+func findlive(f *Func) (reachable []bool, live []bool) {
+ reachable = ReachableBlocks(f)
+ var order []*Value
+ live, order = liveValues(f, reachable)
+ f.retDeadcodeLiveOrderStmts(order)
+ return
+}
+
+// ReachableBlocks returns the reachable blocks in f.
+func ReachableBlocks(f *Func) []bool {
+ reachable := make([]bool, f.NumBlocks())
+ reachable[f.Entry.ID] = true
+ p := make([]*Block, 0, 64) // stack-like worklist
+ p = append(p, f.Entry)
+ for len(p) > 0 {
+ // Pop a reachable block
+ b := p[len(p)-1]
+ p = p[:len(p)-1]
+ // Mark successors as reachable
+ s := b.Succs
+ if b.Kind == BlockFirst {
+ s = s[:1]
+ }
+ for _, e := range s {
+ c := e.b
+ if int(c.ID) >= len(reachable) {
+ f.Fatalf("block %s >= f.NumBlocks()=%d?", c, len(reachable))
+ }
+ if !reachable[c.ID] {
+ reachable[c.ID] = true
+ p = append(p, c) // push
+ }
+ }
+ }
+ return reachable
+}
+
+// liveValues returns the live values in f and a list of values that are eligible
+// to be statements in reversed data flow order.
+// The second result is used to help conserve statement boundaries for debugging.
+// reachable is a map from block ID to whether the block is reachable.
+// The caller should call f.retDeadcodeLive(live) and f.retDeadcodeLiveOrderStmts(liveOrderStmts)
+// when they are done with the return values.
+func liveValues(f *Func, reachable []bool) (live []bool, liveOrderStmts []*Value) {
+ live = f.newDeadcodeLive()
+ if cap(live) < f.NumValues() {
+ live = make([]bool, f.NumValues())
+ } else {
+ live = live[:f.NumValues()]
+ for i := range live {
+ live[i] = false
+ }
+ }
+
+ liveOrderStmts = f.newDeadcodeLiveOrderStmts()
+ liveOrderStmts = liveOrderStmts[:0]
+
+ // After regalloc, consider all values to be live.
+ // See the comment at the top of regalloc.go and in deadcode for details.
+ if f.RegAlloc != nil {
+ for i := range live {
+ live[i] = true
+ }
+ return
+ }
+
+ // Record all the inline indexes we need
+ var liveInlIdx map[int]bool
+ pt := f.Config.ctxt.PosTable
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ i := pt.Pos(v.Pos).Base().InliningIndex()
+ if i < 0 {
+ continue
+ }
+ if liveInlIdx == nil {
+ liveInlIdx = map[int]bool{}
+ }
+ liveInlIdx[i] = true
+ }
+ i := pt.Pos(b.Pos).Base().InliningIndex()
+ if i < 0 {
+ continue
+ }
+ if liveInlIdx == nil {
+ liveInlIdx = map[int]bool{}
+ }
+ liveInlIdx[i] = true
+ }
+
+ // Find all live values
+ q := f.Cache.deadcode.q[:0]
+ defer func() { f.Cache.deadcode.q = q }()
+
+ // Starting set: all control values of reachable blocks are live.
+ // Calls are live (because callee can observe the memory state).
+ for _, b := range f.Blocks {
+ if !reachable[b.ID] {
+ continue
+ }
+ for _, v := range b.ControlValues() {
+ if !live[v.ID] {
+ live[v.ID] = true
+ q = append(q, v)
+ if v.Pos.IsStmt() != src.PosNotStmt {
+ liveOrderStmts = append(liveOrderStmts, v)
+ }
+ }
+ }
+ for _, v := range b.Values {
+ if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects) && !live[v.ID] {
+ live[v.ID] = true
+ q = append(q, v)
+ if v.Pos.IsStmt() != src.PosNotStmt {
+ liveOrderStmts = append(liveOrderStmts, v)
+ }
+ }
+ if v.Type.IsVoid() && !live[v.ID] {
+ // The only Void ops are nil checks and inline marks. We must keep these.
+ if v.Op == OpInlMark && !liveInlIdx[int(v.AuxInt)] {
+ // We don't need marks for bodies that
+ // have been completely optimized away.
+ // TODO: save marks only for bodies which
+ // have a faulting instruction or a call?
+ continue
+ }
+ live[v.ID] = true
+ q = append(q, v)
+ if v.Pos.IsStmt() != src.PosNotStmt {
+ liveOrderStmts = append(liveOrderStmts, v)
+ }
+ }
+ }
+ }
+
+ // Compute transitive closure of live values.
+ for len(q) > 0 {
+ // pop a reachable value
+ v := q[len(q)-1]
+ q = q[:len(q)-1]
+ for i, x := range v.Args {
+ if v.Op == OpPhi && !reachable[v.Block.Preds[i].b.ID] {
+ continue
+ }
+ if !live[x.ID] {
+ live[x.ID] = true
+ q = append(q, x) // push
+ if x.Pos.IsStmt() != src.PosNotStmt {
+ liveOrderStmts = append(liveOrderStmts, x)
+ }
+ }
+ }
+ }
+
+ return
+}
+
+// deadcode removes dead code from f.
+func deadcode(f *Func) {
+ // deadcode after regalloc is forbidden for now. Regalloc
+ // doesn't quite generate legal SSA which will lead to some
+ // required moves being eliminated. See the comment at the
+ // top of regalloc.go for details.
+ if f.RegAlloc != nil {
+ f.Fatalf("deadcode after regalloc")
+ }
+
+ // Find reachable blocks.
+ reachable := ReachableBlocks(f)
+
+ // Get rid of edges from dead to live code.
+ for _, b := range f.Blocks {
+ if reachable[b.ID] {
+ continue
+ }
+ for i := 0; i < len(b.Succs); {
+ e := b.Succs[i]
+ if reachable[e.b.ID] {
+ b.removeEdge(i)
+ } else {
+ i++
+ }
+ }
+ }
+
+ // Get rid of dead edges from live code.
+ for _, b := range f.Blocks {
+ if !reachable[b.ID] {
+ continue
+ }
+ if b.Kind != BlockFirst {
+ continue
+ }
+ b.removeEdge(1)
+ b.Kind = BlockPlain
+ b.Likely = BranchUnknown
+ }
+
+ // Splice out any copies introduced during dead block removal.
+ copyelim(f)
+
+ // Find live values.
+ live, order := liveValues(f, reachable)
+ defer f.retDeadcodeLive(live)
+ defer f.retDeadcodeLiveOrderStmts(order)
+
+ // Remove dead & duplicate entries from namedValues map.
+ s := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(s)
+ i := 0
+ for _, name := range f.Names {
+ j := 0
+ s.clear()
+ values := f.NamedValues[name]
+ for _, v := range values {
+ if live[v.ID] && !s.contains(v.ID) {
+ values[j] = v
+ j++
+ s.add(v.ID)
+ }
+ }
+ if j == 0 {
+ delete(f.NamedValues, name)
+ } else {
+ f.Names[i] = name
+ i++
+ for k := len(values) - 1; k >= j; k-- {
+ values[k] = nil
+ }
+ f.NamedValues[name] = values[:j]
+ }
+ }
+ clearNames := f.Names[i:]
+ for j := range clearNames {
+ clearNames[j] = LocalSlot{}
+ }
+ f.Names = f.Names[:i]
+
+ pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
+ pendingLines.clear()
+
+ // Unlink values and conserve statement boundaries
+ for i, b := range f.Blocks {
+ if !reachable[b.ID] {
+ // TODO what if control is statement boundary? Too late here.
+ b.ResetControls()
+ }
+ for _, v := range b.Values {
+ if !live[v.ID] {
+ v.resetArgs()
+ if v.Pos.IsStmt() == src.PosIsStmt && reachable[b.ID] {
+ pendingLines.set(v.Pos, int32(i)) // TODO could be more than one pos for a line
+ }
+ }
+ }
+ }
+
+ // Find new homes for lost lines -- require earliest in data flow with same line that is also in same block
+ for i := len(order) - 1; i >= 0; i-- {
+ w := order[i]
+ if j := pendingLines.get(w.Pos); j > -1 && f.Blocks[j] == w.Block {
+ w.Pos = w.Pos.WithIsStmt()
+ pendingLines.remove(w.Pos)
+ }
+ }
+
+ // Any boundary that failed to match a live value can move to a block end
+ pendingLines.foreachEntry(func(j int32, l uint, bi int32) {
+ b := f.Blocks[bi]
+ if b.Pos.Line() == l && b.Pos.FileIndex() == j {
+ b.Pos = b.Pos.WithIsStmt()
+ }
+ })
+
+ // Remove dead values from blocks' value list. Return dead
+ // values to the allocator.
+ for _, b := range f.Blocks {
+ i := 0
+ for _, v := range b.Values {
+ if live[v.ID] {
+ b.Values[i] = v
+ i++
+ } else {
+ f.freeValue(v)
+ }
+ }
+ b.truncateValues(i)
+ }
+
+ // Remove dead blocks from WBLoads list.
+ i = 0
+ for _, b := range f.WBLoads {
+ if reachable[b.ID] {
+ f.WBLoads[i] = b
+ i++
+ }
+ }
+ clearWBLoads := f.WBLoads[i:]
+ for j := range clearWBLoads {
+ clearWBLoads[j] = nil
+ }
+ f.WBLoads = f.WBLoads[:i]
+
+ // Remove unreachable blocks. Return dead blocks to allocator.
+ i = 0
+ for _, b := range f.Blocks {
+ if reachable[b.ID] {
+ f.Blocks[i] = b
+ i++
+ } else {
+ if len(b.Values) > 0 {
+ b.Fatalf("live values in unreachable block %v: %v", b, b.Values)
+ }
+ f.freeBlock(b)
+ }
+ }
+ // zero remainder to help GC
+ tail := f.Blocks[i:]
+ for j := range tail {
+ tail[j] = nil
+ }
+ f.Blocks = f.Blocks[:i]
+}
+
+// removeEdge removes the i'th outgoing edge from b (and
+// the corresponding incoming edge from b.Succs[i].b).
+func (b *Block) removeEdge(i int) {
+ e := b.Succs[i]
+ c := e.b
+ j := e.i
+
+ // Adjust b.Succs
+ b.removeSucc(i)
+
+ // Adjust c.Preds
+ c.removePred(j)
+
+ // Remove phi args from c's phis.
+ n := len(c.Preds)
+ for _, v := range c.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ v.Args[j].Uses--
+ v.Args[j] = v.Args[n]
+ v.Args[n] = nil
+ v.Args = v.Args[:n]
+ phielimValue(v)
+ // Note: this is trickier than it looks. Replacing
+ // a Phi with a Copy can in general cause problems because
+ // Phi and Copy don't have exactly the same semantics.
+ // Phi arguments always come from a predecessor block,
+ // whereas copies don't. This matters in loops like:
+ // 1: x = (Phi y)
+ // y = (Add x 1)
+ // goto 1
+ // If we replace Phi->Copy, we get
+ // 1: x = (Copy y)
+ // y = (Add x 1)
+ // goto 1
+ // (Phi y) refers to the *previous* value of y, whereas
+ // (Copy y) refers to the *current* value of y.
+ // The modified code has a cycle and the scheduler
+ // will barf on it.
+ //
+ // Fortunately, this situation can only happen for dead
+ // code loops. We know the code we're working with is
+ // not dead, so we're ok.
+ // Proof: If we have a potential bad cycle, we have a
+ // situation like this:
+ // x = (Phi z)
+ // y = (op1 x ...)
+ // z = (op2 y ...)
+ // Where opX are not Phi ops. But such a situation
+ // implies a cycle in the dominator graph. In the
+ // example, x.Block dominates y.Block, y.Block dominates
+ // z.Block, and z.Block dominates x.Block (treating
+ // "dominates" as reflexive). Cycles in the dominator
+ // graph can only happen in an unreachable cycle.
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/deadcode_test.go b/src/cmd/compile/internal/ssa/deadcode_test.go
new file mode 100644
index 0000000..5777b84
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/deadcode_test.go
@@ -0,0 +1,161 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+ "strconv"
+ "testing"
+)
+
+func TestDeadLoop(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")),
+ // dead loop
+ Bloc("deadblock",
+ // dead value in dead block
+ Valu("deadval", OpConstBool, c.config.Types.Bool, 1, nil),
+ If("deadval", "deadblock", "exit")))
+
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["deadblock"] {
+ t.Errorf("dead block not removed")
+ }
+ for _, v := range b.Values {
+ if v == fun.values["deadval"] {
+ t.Errorf("control value of dead block not removed")
+ }
+ }
+ }
+}
+
+func TestDeadValue(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("deadval", OpConst64, c.config.Types.Int64, 37, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ for _, v := range b.Values {
+ if v == fun.values["deadval"] {
+ t.Errorf("dead value not removed")
+ }
+ }
+ }
+}
+
+func TestNeverTaken(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("cond", OpConstBool, c.config.Types.Bool, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ If("cond", "then", "else")),
+ Bloc("then",
+ Goto("exit")),
+ Bloc("else",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ Opt(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if fun.blocks["entry"].Kind != BlockPlain {
+ t.Errorf("if(false) not simplified")
+ }
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] {
+ t.Errorf("then block still present")
+ }
+ for _, v := range b.Values {
+ if v == fun.values["cond"] {
+ t.Errorf("constant condition still present")
+ }
+ }
+ }
+
+}
+
+func TestNestedDeadBlocks(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("cond", OpConstBool, c.config.Types.Bool, 0, nil),
+ If("cond", "b2", "b4")),
+ Bloc("b2",
+ If("cond", "b3", "b4")),
+ Bloc("b3",
+ If("cond", "b3", "b4")),
+ Bloc("b4",
+ If("cond", "b3", "exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ Opt(fun.f)
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+ if fun.blocks["entry"].Kind != BlockPlain {
+ t.Errorf("if(false) not simplified")
+ }
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["b2"] {
+ t.Errorf("b2 block still present")
+ }
+ if b == fun.blocks["b3"] {
+ t.Errorf("b3 block still present")
+ }
+ for _, v := range b.Values {
+ if v == fun.values["cond"] {
+ t.Errorf("constant condition still present")
+ }
+ }
+ }
+}
+
+func BenchmarkDeadCode(b *testing.B) {
+ for _, n := range [...]int{1, 10, 100, 1000, 10000, 100000, 200000} {
+ b.Run(strconv.Itoa(n), func(b *testing.B) {
+ c := testConfig(b)
+ blocks := make([]bloc, 0, n+2)
+ blocks = append(blocks,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")))
+ blocks = append(blocks, Bloc("exit", Exit("mem")))
+ for i := 0; i < n; i++ {
+ blocks = append(blocks, Bloc(fmt.Sprintf("dead%d", i), Goto("exit")))
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ fun := c.Fun("entry", blocks...)
+ Deadcode(fun.f)
+ }
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
new file mode 100644
index 0000000..0664013
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -0,0 +1,348 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// dse does dead-store elimination on the Function.
+// Dead stores are those which are unconditionally followed by
+// another store to the same location, with no intervening load.
+// This implementation only works within a basic block. TODO: use something more global.
+func dse(f *Func) {
+ var stores []*Value
+ loadUse := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(loadUse)
+ storeUse := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(storeUse)
+ shadowed := f.newSparseMap(f.NumValues())
+ defer f.retSparseMap(shadowed)
+ for _, b := range f.Blocks {
+ // Find all the stores in this block. Categorize their uses:
+ // loadUse contains stores which are used by a subsequent load.
+ // storeUse contains stores which are used by a subsequent store.
+ loadUse.clear()
+ storeUse.clear()
+ stores = stores[:0]
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ // Ignore phis - they will always be first and can't be eliminated
+ continue
+ }
+ if v.Type.IsMemory() {
+ stores = append(stores, v)
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ storeUse.add(a.ID)
+ if v.Op != OpStore && v.Op != OpZero && v.Op != OpVarDef && v.Op != OpVarKill {
+ // CALL, DUFFCOPY, etc. are both
+ // reads and writes.
+ loadUse.add(a.ID)
+ }
+ }
+ }
+ } else {
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ loadUse.add(a.ID)
+ }
+ }
+ }
+ }
+ if len(stores) == 0 {
+ continue
+ }
+
+ // find last store in the block
+ var last *Value
+ for _, v := range stores {
+ if storeUse.contains(v.ID) {
+ continue
+ }
+ if last != nil {
+ b.Fatalf("two final stores - simultaneous live stores %s %s", last.LongString(), v.LongString())
+ }
+ last = v
+ }
+ if last == nil {
+ b.Fatalf("no last store found - cycle?")
+ }
+
+ // Walk backwards looking for dead stores. Keep track of shadowed addresses.
+ // A "shadowed address" is a pointer and a size describing a memory region that
+ // is known to be written. We keep track of shadowed addresses in the shadowed
+ // map, mapping the ID of the address to the size of the shadowed region.
+ // Since we're walking backwards, writes to a shadowed region are useless,
+ // as they will be immediately overwritten.
+ shadowed.clear()
+ v := last
+
+ walkloop:
+ if loadUse.contains(v.ID) {
+ // Someone might be reading this memory state.
+ // Clear all shadowed addresses.
+ shadowed.clear()
+ }
+ if v.Op == OpStore || v.Op == OpZero {
+ var sz int64
+ if v.Op == OpStore {
+ sz = v.Aux.(*types.Type).Size()
+ } else { // OpZero
+ sz = v.AuxInt
+ }
+ if shadowedSize := int64(shadowed.get(v.Args[0].ID)); shadowedSize != -1 && shadowedSize >= sz {
+ // Modify the store/zero into a copy of the memory state,
+ // effectively eliding the store operation.
+ if v.Op == OpStore {
+ // store addr value mem
+ v.SetArgs1(v.Args[2])
+ } else {
+ // zero addr mem
+ v.SetArgs1(v.Args[1])
+ }
+ v.Aux = nil
+ v.AuxInt = 0
+ v.Op = OpCopy
+ } else {
+ if sz > 0x7fffffff { // work around sparseMap's int32 value type
+ sz = 0x7fffffff
+ }
+ shadowed.set(v.Args[0].ID, int32(sz), src.NoXPos)
+ }
+ }
+ // walk to previous store
+ if v.Op == OpPhi {
+ // At start of block. Move on to next block.
+ // The memory phi, if it exists, is always
+ // the first logical store in the block.
+ // (Even if it isn't the first in the current b.Values order.)
+ continue
+ }
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ v = a
+ goto walkloop
+ }
+ }
+ }
+}
+
+// elimDeadAutosGeneric deletes autos that are never accessed. To achieve this
+// we track the operations that the address of each auto reaches and if it only
+// reaches stores then we delete all the stores. The other operations will then
+// be eliminated by the dead code elimination pass.
+func elimDeadAutosGeneric(f *Func) {
+ addr := make(map[*Value]GCNode) // values that the address of the auto reaches
+ elim := make(map[*Value]GCNode) // values that could be eliminated if the auto is
+ used := make(map[GCNode]bool) // used autos that must be kept
+
+ // visit the value and report whether any of the maps are updated
+ visit := func(v *Value) (changed bool) {
+ args := v.Args
+ switch v.Op {
+ case OpAddr, OpLocalAddr:
+ // Propagate the address if it points to an auto.
+ n, ok := v.Aux.(GCNode)
+ if !ok || n.StorageClass() != ClassAuto {
+ return
+ }
+ if addr[v] == nil {
+ addr[v] = n
+ changed = true
+ }
+ return
+ case OpVarDef, OpVarKill:
+ // v should be eliminated if we eliminate the auto.
+ n, ok := v.Aux.(GCNode)
+ if !ok || n.StorageClass() != ClassAuto {
+ return
+ }
+ if elim[v] == nil {
+ elim[v] = n
+ changed = true
+ }
+ return
+ case OpVarLive:
+ // Don't delete the auto if it needs to be kept alive.
+
+ // We depend on this check to keep the autotmp stack slots
+ // for open-coded defers from being removed (since they
+ // may not be used by the inline code, but will be used by
+ // panic processing).
+ n, ok := v.Aux.(GCNode)
+ if !ok || n.StorageClass() != ClassAuto {
+ return
+ }
+ if !used[n] {
+ used[n] = true
+ changed = true
+ }
+ return
+ case OpStore, OpMove, OpZero:
+ // v should be eliminated if we eliminate the auto.
+ n, ok := addr[args[0]]
+ if ok && elim[v] == nil {
+ elim[v] = n
+ changed = true
+ }
+ // Other args might hold pointers to autos.
+ args = args[1:]
+ }
+
+ // The code below assumes that we have handled all the ops
+ // with sym effects already. Sanity check that here.
+ // Ignore Args since they can't be autos.
+ if v.Op.SymEffect() != SymNone && v.Op != OpArg {
+ panic("unhandled op with sym effect")
+ }
+
+ if v.Uses == 0 && v.Op != OpNilCheck || len(args) == 0 {
+ // Nil check has no use, but we need to keep it.
+ return
+ }
+
+ // If the address of the auto reaches a memory or control
+ // operation not covered above then we probably need to keep it.
+ // We also need to keep autos if they reach Phis (issue #26153).
+ if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil {
+ for _, a := range args {
+ if n, ok := addr[a]; ok {
+ if !used[n] {
+ used[n] = true
+ changed = true
+ }
+ }
+ }
+ return
+ }
+
+ // Propagate any auto addresses through v.
+ node := GCNode(nil)
+ for _, a := range args {
+ if n, ok := addr[a]; ok && !used[n] {
+ if node == nil {
+ node = n
+ } else if node != n {
+ // Most of the time we only see one pointer
+ // reaching an op, but some ops can take
+ // multiple pointers (e.g. NeqPtr, Phi etc.).
+ // This is rare, so just propagate the first
+ // value to keep things simple.
+ used[n] = true
+ changed = true
+ }
+ }
+ }
+ if node == nil {
+ return
+ }
+ if addr[v] == nil {
+ // The address of an auto reaches this op.
+ addr[v] = node
+ changed = true
+ return
+ }
+ if addr[v] != node {
+ // This doesn't happen in practice, but catch it just in case.
+ used[node] = true
+ changed = true
+ }
+ return
+ }
+
+ iterations := 0
+ for {
+ if iterations == 4 {
+ // give up
+ return
+ }
+ iterations++
+ changed := false
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ changed = visit(v) || changed
+ }
+ // keep the auto if its address reaches a control value
+ for _, c := range b.ControlValues() {
+ if n, ok := addr[c]; ok && !used[n] {
+ used[n] = true
+ changed = true
+ }
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+
+ // Eliminate stores to unread autos.
+ for v, n := range elim {
+ if used[n] {
+ continue
+ }
+ // replace with OpCopy
+ v.SetArgs1(v.MemoryArg())
+ v.Aux = nil
+ v.AuxInt = 0
+ v.Op = OpCopy
+ }
+}
+
+// elimUnreadAutos deletes stores (and associated bookkeeping ops VarDef and VarKill)
+// to autos that are never read from.
+func elimUnreadAutos(f *Func) {
+ // Loop over all ops that affect autos taking note of which
+ // autos we need and also stores that we might be able to
+ // eliminate.
+ seen := make(map[GCNode]bool)
+ var stores []*Value
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ n, ok := v.Aux.(GCNode)
+ if !ok {
+ continue
+ }
+ if n.StorageClass() != ClassAuto {
+ continue
+ }
+
+ effect := v.Op.SymEffect()
+ switch effect {
+ case SymNone, SymWrite:
+ // If we haven't seen the auto yet
+ // then this might be a store we can
+ // eliminate.
+ if !seen[n] {
+ stores = append(stores, v)
+ }
+ default:
+ // Assume the auto is needed (loaded,
+ // has its address taken, etc.).
+ // Note we have to check the uses
+ // because dead loads haven't been
+ // eliminated yet.
+ if v.Uses > 0 {
+ seen[n] = true
+ }
+ }
+ }
+ }
+
+ // Eliminate stores to unread autos.
+ for _, store := range stores {
+ n, _ := store.Aux.(GCNode)
+ if seen[n] {
+ continue
+ }
+
+ // replace store with OpCopy
+ store.SetArgs1(store.MemoryArg())
+ store.Aux = nil
+ store.AuxInt = 0
+ store.Op = OpCopy
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/deadstore_test.go b/src/cmd/compile/internal/ssa/deadstore_test.go
new file mode 100644
index 0000000..33cb4b9
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/deadstore_test.go
@@ -0,0 +1,129 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestDeadStore(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ t.Logf("PTRTYPE %v", ptrType)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("addr2", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("addr3", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("zero1", OpZero, types.TypeMem, 1, c.config.Types.Bool, "addr3", "start"),
+ Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "zero1"),
+ Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr2", "v", "store1"),
+ Valu("store3", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "store2"),
+ Valu("store4", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr3", "v", "store3"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("store3")))
+
+ CheckFunc(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+
+ v1 := fun.values["store1"]
+ if v1.Op != OpCopy {
+ t.Errorf("dead store not removed")
+ }
+
+ v2 := fun.values["zero1"]
+ if v2.Op != OpCopy {
+ t.Errorf("dead store (zero) not removed")
+ }
+}
+func TestDeadStorePhi(t *testing.T) {
+ // make sure we don't get into an infinite loop with phi values.
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("addr", OpAddr, ptrType, 0, nil, "sb"),
+ Goto("loop")),
+ Bloc("loop",
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, "start", "store"),
+ Valu("store", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr", "v", "phi"),
+ If("v", "loop", "exit")),
+ Bloc("exit",
+ Exit("store")))
+
+ CheckFunc(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+}
+
+func TestDeadStoreTypes(t *testing.T) {
+ // Make sure a narrow store can't shadow a wider one. We test an even
+ // stronger restriction, that one store can't shadow another unless the
+ // types of the address fields are identical (where identicalness is
+ // decided by the CSE pass).
+ c := testConfig(t)
+ t1 := c.config.Types.UInt64.PtrTo()
+ t2 := c.config.Types.UInt32.PtrTo()
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("addr1", OpAddr, t1, 0, nil, "sb"),
+ Valu("addr2", OpAddr, t2, 0, nil, "sb"),
+ Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "start"),
+ Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr2", "v", "store1"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("store2")))
+
+ CheckFunc(fun.f)
+ cse(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+
+ v := fun.values["store1"]
+ if v.Op == OpCopy {
+ t.Errorf("store %s incorrectly removed", v)
+ }
+}
+
+func TestDeadStoreUnsafe(t *testing.T) {
+ // Make sure a narrow store can't shadow a wider one. The test above
+ // covers the case of two different types, but unsafe pointer casting
+ // can get to a point where the size is changed but type unchanged.
+ c := testConfig(t)
+ ptrType := c.config.Types.UInt64.PtrTo()
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "addr1", "v", "start"), // store 8 bytes
+ Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "store1"), // store 1 byte
+ Goto("exit")),
+ Bloc("exit",
+ Exit("store2")))
+
+ CheckFunc(fun.f)
+ cse(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+
+ v := fun.values["store1"]
+ if v.Op == OpCopy {
+ t.Errorf("store %s incorrectly removed", v)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
new file mode 100644
index 0000000..6353f72
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -0,0 +1,1187 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "encoding/hex"
+ "fmt"
+ "math/bits"
+ "sort"
+ "strings"
+)
+
+type SlotID int32
+type VarID int32
+
+// A FuncDebug contains all the debug information for the variables in a
+// function. Variables are identified by their LocalSlot, which may be the
+// result of decomposing a larger variable.
+type FuncDebug struct {
+ // Slots is all the slots used in the debug info, indexed by their SlotID.
+ Slots []LocalSlot
+ // The user variables, indexed by VarID.
+ Vars []GCNode
+ // The slots that make up each variable, indexed by VarID.
+ VarSlots [][]SlotID
+ // The location list data, indexed by VarID. Must be processed by PutLocationList.
+ LocationLists [][]byte
+
+ // Filled in by the user. Translates Block and Value ID to PC.
+ GetPC func(ID, ID) int64
+}
+
+type BlockDebug struct {
+ // Whether the block had any changes to user variables at all.
+ relevant bool
+ // State at the end of the block if it's fully processed. Immutable once initialized.
+ endState []liveSlot
+}
+
+// A liveSlot is a slot that's live in loc at entry/exit of a block.
+type liveSlot struct {
+ // An inlined VarLoc, so it packs into 16 bytes instead of 20.
+ Registers RegisterSet
+ StackOffset
+
+ slot SlotID
+}
+
+func (loc liveSlot) absent() bool {
+ return loc.Registers == 0 && !loc.onStack()
+}
+
+// StackOffset encodes whether a value is on the stack and if so, where. It is
+// a 31-bit integer followed by a presence flag at the low-order bit.
+type StackOffset int32
+
+func (s StackOffset) onStack() bool {
+ return s != 0
+}
+
+func (s StackOffset) stackOffsetValue() int32 {
+ return int32(s) >> 1
+}
+
+// stateAtPC is the current state of all variables at some point.
+type stateAtPC struct {
+ // The location of each known slot, indexed by SlotID.
+ slots []VarLoc
+ // The slots present in each register, indexed by register number.
+ registers [][]SlotID
+}
+
+// reset fills state with the live variables from live.
+func (state *stateAtPC) reset(live []liveSlot) {
+ slots, registers := state.slots, state.registers
+ for i := range slots {
+ slots[i] = VarLoc{}
+ }
+ for i := range registers {
+ registers[i] = registers[i][:0]
+ }
+ for _, live := range live {
+ slots[live.slot] = VarLoc{live.Registers, live.StackOffset}
+ if live.Registers == 0 {
+ continue
+ }
+
+ mask := uint64(live.Registers)
+ for {
+ if mask == 0 {
+ break
+ }
+ reg := uint8(bits.TrailingZeros64(mask))
+ mask &^= 1 << reg
+
+ registers[reg] = append(registers[reg], live.slot)
+ }
+ }
+ state.slots, state.registers = slots, registers
+}
+
+func (s *debugState) LocString(loc VarLoc) string {
+ if loc.absent() {
+ return "<nil>"
+ }
+
+ var storage []string
+ if loc.onStack() {
+ storage = append(storage, "stack")
+ }
+
+ mask := uint64(loc.Registers)
+ for {
+ if mask == 0 {
+ break
+ }
+ reg := uint8(bits.TrailingZeros64(mask))
+ mask &^= 1 << reg
+
+ storage = append(storage, s.registers[reg].String())
+ }
+ return strings.Join(storage, ",")
+}
+
+// A VarLoc describes the storage for part of a user variable.
+type VarLoc struct {
+ // The registers this variable is available in. There can be more than
+ // one in various situations, e.g. it's being moved between registers.
+ Registers RegisterSet
+
+ StackOffset
+}
+
+func (loc VarLoc) absent() bool {
+ return loc.Registers == 0 && !loc.onStack()
+}
+
+var BlockStart = &Value{
+ ID: -10000,
+ Op: OpInvalid,
+ Aux: "BlockStart",
+}
+
+var BlockEnd = &Value{
+ ID: -20000,
+ Op: OpInvalid,
+ Aux: "BlockEnd",
+}
+
+// RegisterSet is a bitmap of registers, indexed by Register.num.
+type RegisterSet uint64
+
+// logf prints debug-specific logging to stdout (always stdout) if the current
+// function is tagged by GOSSAFUNC (for ssa output directed either to stdout or html).
+func (s *debugState) logf(msg string, args ...interface{}) {
+ if s.f.PrintOrHtmlSSA {
+ fmt.Printf(msg, args...)
+ }
+}
+
+type debugState struct {
+ // See FuncDebug.
+ slots []LocalSlot
+ vars []GCNode
+ varSlots [][]SlotID
+ lists [][]byte
+
+ // The user variable that each slot rolls up to, indexed by SlotID.
+ slotVars []VarID
+
+ f *Func
+ loggingEnabled bool
+ registers []Register
+ stackOffset func(LocalSlot) int32
+ ctxt *obj.Link
+
+ // The names (slots) associated with each value, indexed by Value ID.
+ valueNames [][]SlotID
+
+ // The current state of whatever analysis is running.
+ currentState stateAtPC
+ liveCount []int
+ changedVars *sparseSet
+
+ // The pending location list entry for each user variable, indexed by VarID.
+ pendingEntries []pendingEntry
+
+ varParts map[GCNode][]SlotID
+ blockDebug []BlockDebug
+ pendingSlotLocs []VarLoc
+ liveSlots []liveSlot
+ liveSlotSliceBegin int
+ partsByVarOffset sort.Interface
+}
+
+func (state *debugState) initializeCache(f *Func, numVars, numSlots int) {
+ // One blockDebug per block. Initialized in allocBlock.
+ if cap(state.blockDebug) < f.NumBlocks() {
+ state.blockDebug = make([]BlockDebug, f.NumBlocks())
+ } else {
+ // This local variable, and the ones like it below, enable compiler
+ // optimizations. Don't inline them.
+ b := state.blockDebug[:f.NumBlocks()]
+ for i := range b {
+ b[i] = BlockDebug{}
+ }
+ }
+
+ // A list of slots per Value. Reuse the previous child slices.
+ if cap(state.valueNames) < f.NumValues() {
+ old := state.valueNames
+ state.valueNames = make([][]SlotID, f.NumValues())
+ copy(state.valueNames, old)
+ }
+ vn := state.valueNames[:f.NumValues()]
+ for i := range vn {
+ vn[i] = vn[i][:0]
+ }
+
+ // Slot and register contents for currentState. Cleared by reset().
+ if cap(state.currentState.slots) < numSlots {
+ state.currentState.slots = make([]VarLoc, numSlots)
+ } else {
+ state.currentState.slots = state.currentState.slots[:numSlots]
+ }
+ if cap(state.currentState.registers) < len(state.registers) {
+ state.currentState.registers = make([][]SlotID, len(state.registers))
+ } else {
+ state.currentState.registers = state.currentState.registers[:len(state.registers)]
+ }
+
+ // Used many times by mergePredecessors.
+ if cap(state.liveCount) < numSlots {
+ state.liveCount = make([]int, numSlots)
+ } else {
+ state.liveCount = state.liveCount[:numSlots]
+ }
+
+ // A relatively small slice, but used many times as the return from processValue.
+ state.changedVars = newSparseSet(numVars)
+
+ // A pending entry per user variable, with space to track each of its pieces.
+ numPieces := 0
+ for i := range state.varSlots {
+ numPieces += len(state.varSlots[i])
+ }
+ if cap(state.pendingSlotLocs) < numPieces {
+ state.pendingSlotLocs = make([]VarLoc, numPieces)
+ } else {
+ psl := state.pendingSlotLocs[:numPieces]
+ for i := range psl {
+ psl[i] = VarLoc{}
+ }
+ }
+ if cap(state.pendingEntries) < numVars {
+ state.pendingEntries = make([]pendingEntry, numVars)
+ }
+ pe := state.pendingEntries[:numVars]
+ freePieceIdx := 0
+ for varID, slots := range state.varSlots {
+ pe[varID] = pendingEntry{
+ pieces: state.pendingSlotLocs[freePieceIdx : freePieceIdx+len(slots)],
+ }
+ freePieceIdx += len(slots)
+ }
+ state.pendingEntries = pe
+
+ if cap(state.lists) < numVars {
+ state.lists = make([][]byte, numVars)
+ } else {
+ state.lists = state.lists[:numVars]
+ for i := range state.lists {
+ state.lists[i] = nil
+ }
+ }
+
+ state.liveSlots = state.liveSlots[:0]
+ state.liveSlotSliceBegin = 0
+}
+
+func (state *debugState) allocBlock(b *Block) *BlockDebug {
+ return &state.blockDebug[b.ID]
+}
+
+func (state *debugState) appendLiveSlot(ls liveSlot) {
+ state.liveSlots = append(state.liveSlots, ls)
+}
+
+func (state *debugState) getLiveSlotSlice() []liveSlot {
+ s := state.liveSlots[state.liveSlotSliceBegin:]
+ state.liveSlotSliceBegin = len(state.liveSlots)
+ return s
+}
+
+func (s *debugState) blockEndStateString(b *BlockDebug) string {
+ endState := stateAtPC{slots: make([]VarLoc, len(s.slots)), registers: make([][]SlotID, len(s.registers))}
+ endState.reset(b.endState)
+ return s.stateString(endState)
+}
+
+func (s *debugState) stateString(state stateAtPC) string {
+ var strs []string
+ for slotID, loc := range state.slots {
+ if !loc.absent() {
+ strs = append(strs, fmt.Sprintf("\t%v = %v\n", s.slots[slotID], s.LocString(loc)))
+ }
+ }
+
+ strs = append(strs, "\n")
+ for reg, slots := range state.registers {
+ if len(slots) != 0 {
+ var slotStrs []string
+ for _, slot := range slots {
+ slotStrs = append(slotStrs, s.slots[slot].String())
+ }
+ strs = append(strs, fmt.Sprintf("\t%v = %v\n", &s.registers[reg], slotStrs))
+ }
+ }
+
+ if len(strs) == 1 {
+ return "(no vars)\n"
+ }
+ return strings.Join(strs, "")
+}
+
+// BuildFuncDebug returns debug information for f.
+// f must be fully processed, so that each Value is where it will be when
+// machine code is emitted.
+func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32) *FuncDebug {
+ if f.RegAlloc == nil {
+ f.Fatalf("BuildFuncDebug on func %v that has not been fully processed", f)
+ }
+ state := &f.Cache.debugState
+ state.loggingEnabled = loggingEnabled
+ state.f = f
+ state.registers = f.Config.registers
+ state.stackOffset = stackOffset
+ state.ctxt = ctxt
+
+ if state.loggingEnabled {
+ state.logf("Generating location lists for function %q\n", f.Name)
+ }
+
+ if state.varParts == nil {
+ state.varParts = make(map[GCNode][]SlotID)
+ } else {
+ for n := range state.varParts {
+ delete(state.varParts, n)
+ }
+ }
+
+ // Recompose any decomposed variables, and establish the canonical
+ // IDs for each var and slot by filling out state.vars and state.slots.
+
+ state.slots = state.slots[:0]
+ state.vars = state.vars[:0]
+ for i, slot := range f.Names {
+ state.slots = append(state.slots, slot)
+ if slot.N.IsSynthetic() {
+ continue
+ }
+
+ topSlot := &slot
+ for topSlot.SplitOf != nil {
+ topSlot = topSlot.SplitOf
+ }
+ if _, ok := state.varParts[topSlot.N]; !ok {
+ state.vars = append(state.vars, topSlot.N)
+ }
+ state.varParts[topSlot.N] = append(state.varParts[topSlot.N], SlotID(i))
+ }
+
+ // Recreate the LocalSlot for each stack-only variable.
+ // This would probably be better as an output from stackframe.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpVarDef || v.Op == OpVarKill {
+ n := v.Aux.(GCNode)
+ if n.IsSynthetic() {
+ continue
+ }
+
+ if _, ok := state.varParts[n]; !ok {
+ slot := LocalSlot{N: n, Type: v.Type, Off: 0}
+ state.slots = append(state.slots, slot)
+ state.varParts[n] = []SlotID{SlotID(len(state.slots) - 1)}
+ state.vars = append(state.vars, n)
+ }
+ }
+ }
+ }
+
+ // Fill in the var<->slot mappings.
+ if cap(state.varSlots) < len(state.vars) {
+ state.varSlots = make([][]SlotID, len(state.vars))
+ } else {
+ state.varSlots = state.varSlots[:len(state.vars)]
+ for i := range state.varSlots {
+ state.varSlots[i] = state.varSlots[i][:0]
+ }
+ }
+ if cap(state.slotVars) < len(state.slots) {
+ state.slotVars = make([]VarID, len(state.slots))
+ } else {
+ state.slotVars = state.slotVars[:len(state.slots)]
+ }
+
+ if state.partsByVarOffset == nil {
+ state.partsByVarOffset = &partsByVarOffset{}
+ }
+ for varID, n := range state.vars {
+ parts := state.varParts[n]
+ state.varSlots[varID] = parts
+ for _, slotID := range parts {
+ state.slotVars[slotID] = VarID(varID)
+ }
+ *state.partsByVarOffset.(*partsByVarOffset) = partsByVarOffset{parts, state.slots}
+ sort.Sort(state.partsByVarOffset)
+ }
+
+ state.initializeCache(f, len(state.varParts), len(state.slots))
+
+ for i, slot := range f.Names {
+ if slot.N.IsSynthetic() {
+ continue
+ }
+ for _, value := range f.NamedValues[slot] {
+ state.valueNames[value.ID] = append(state.valueNames[value.ID], SlotID(i))
+ }
+ }
+
+ blockLocs := state.liveness()
+ state.buildLocationLists(blockLocs)
+
+ return &FuncDebug{
+ Slots: state.slots,
+ VarSlots: state.varSlots,
+ Vars: state.vars,
+ LocationLists: state.lists,
+ }
+}
+
+// liveness walks the function in control flow order, calculating the start
+// and end state of each block.
+func (state *debugState) liveness() []*BlockDebug {
+ blockLocs := make([]*BlockDebug, state.f.NumBlocks())
+
+ // Reverse postorder: visit a block after as many as possible of its
+ // predecessors have been visited.
+ po := state.f.Postorder()
+ for i := len(po) - 1; i >= 0; i-- {
+ b := po[i]
+
+ // Build the starting state for the block from the final
+ // state of its predecessors.
+ startState, startValid := state.mergePredecessors(b, blockLocs, nil)
+ changed := false
+ if state.loggingEnabled {
+ state.logf("Processing %v, initial state:\n%v", b, state.stateString(state.currentState))
+ }
+
+ // Update locs/registers with the effects of each Value.
+ for _, v := range b.Values {
+ slots := state.valueNames[v.ID]
+
+ // Loads and stores inherit the names of their sources.
+ var source *Value
+ switch v.Op {
+ case OpStoreReg:
+ source = v.Args[0]
+ case OpLoadReg:
+ switch a := v.Args[0]; a.Op {
+ case OpArg, OpPhi:
+ source = a
+ case OpStoreReg:
+ source = a.Args[0]
+ default:
+ if state.loggingEnabled {
+ state.logf("at %v: load with unexpected source op: %v (%v)\n", v, a.Op, a)
+ }
+ }
+ }
+ // Update valueNames with the source so that later steps
+ // don't need special handling.
+ if source != nil {
+ slots = append(slots, state.valueNames[source.ID]...)
+ state.valueNames[v.ID] = slots
+ }
+
+ reg, _ := state.f.getHome(v.ID).(*Register)
+ c := state.processValue(v, slots, reg)
+ changed = changed || c
+ }
+
+ if state.loggingEnabled {
+ state.f.Logf("Block %v done, locs:\n%v", b, state.stateString(state.currentState))
+ }
+
+ locs := state.allocBlock(b)
+ locs.relevant = changed
+ if !changed && startValid {
+ locs.endState = startState
+ } else {
+ for slotID, slotLoc := range state.currentState.slots {
+ if slotLoc.absent() {
+ continue
+ }
+ state.appendLiveSlot(liveSlot{slot: SlotID(slotID), Registers: slotLoc.Registers, StackOffset: slotLoc.StackOffset})
+ }
+ locs.endState = state.getLiveSlotSlice()
+ }
+ blockLocs[b.ID] = locs
+ }
+ return blockLocs
+}
+
+// mergePredecessors takes the end state of each of b's predecessors and
+// intersects them to form the starting state for b. It puts that state in
+// blockLocs, and fills state.currentState with it. If convenient, it returns
+// a reused []liveSlot, true that represents the starting state.
+// If previousBlock is non-nil, it registers changes vs. that block's end
+// state in state.changedVars. Note that previousBlock will often not be a
+// predecessor.
+func (state *debugState) mergePredecessors(b *Block, blockLocs []*BlockDebug, previousBlock *Block) ([]liveSlot, bool) {
+ // Filter out back branches.
+ var predsBuf [10]*Block
+ preds := predsBuf[:0]
+ for _, pred := range b.Preds {
+ if blockLocs[pred.b.ID] != nil {
+ preds = append(preds, pred.b)
+ }
+ }
+
+ if state.loggingEnabled {
+ // The logf below would cause preds to be heap-allocated if
+ // it were passed directly.
+ preds2 := make([]*Block, len(preds))
+ copy(preds2, preds)
+ state.logf("Merging %v into %v\n", preds2, b)
+ }
+
+ // TODO all the calls to this are overkill; only need to do this for slots that are not present in the merge.
+ markChangedVars := func(slots []liveSlot) {
+ for _, live := range slots {
+ state.changedVars.add(ID(state.slotVars[live.slot]))
+ }
+ }
+
+ if len(preds) == 0 {
+ if previousBlock != nil {
+ // Mark everything in previous block as changed because it is not a predecessor.
+ markChangedVars(blockLocs[previousBlock.ID].endState)
+ }
+ state.currentState.reset(nil)
+ return nil, true
+ }
+
+ p0 := blockLocs[preds[0].ID].endState
+ if len(preds) == 1 {
+ if previousBlock != nil && preds[0].ID != previousBlock.ID {
+ // Mark everything in previous block as changed because it is not a predecessor.
+ markChangedVars(blockLocs[previousBlock.ID].endState)
+ }
+ state.currentState.reset(p0)
+ return p0, true
+ }
+
+ baseID := preds[0].ID
+ baseState := p0
+
+ // If previous block is not a predecessor, its location information changes at boundary with this block.
+ previousBlockIsNotPredecessor := previousBlock != nil // If it's nil, no info to change.
+
+ if previousBlock != nil {
+ // Try to use previousBlock as the base state
+ // if possible.
+ for _, pred := range preds[1:] {
+ if pred.ID == previousBlock.ID {
+ baseID = pred.ID
+ baseState = blockLocs[pred.ID].endState
+ previousBlockIsNotPredecessor = false
+ break
+ }
+ }
+ }
+
+ if state.loggingEnabled {
+ state.logf("Starting %v with state from b%v:\n%v", b, baseID, state.blockEndStateString(blockLocs[baseID]))
+ }
+
+ slotLocs := state.currentState.slots
+ for _, predSlot := range baseState {
+ slotLocs[predSlot.slot] = VarLoc{predSlot.Registers, predSlot.StackOffset}
+ state.liveCount[predSlot.slot] = 1
+ }
+ for _, pred := range preds {
+ if pred.ID == baseID {
+ continue
+ }
+ if state.loggingEnabled {
+ state.logf("Merging in state from %v:\n%v", pred, state.blockEndStateString(blockLocs[pred.ID]))
+ }
+ for _, predSlot := range blockLocs[pred.ID].endState {
+ state.liveCount[predSlot.slot]++
+ liveLoc := slotLocs[predSlot.slot]
+ if !liveLoc.onStack() || !predSlot.onStack() || liveLoc.StackOffset != predSlot.StackOffset {
+ liveLoc.StackOffset = 0
+ }
+ liveLoc.Registers &= predSlot.Registers
+ slotLocs[predSlot.slot] = liveLoc
+ }
+ }
+
+ // Check if the final state is the same as the first predecessor's
+ // final state, and reuse it if so. In principle it could match any,
+ // but it's probably not worth checking more than the first.
+ unchanged := true
+ for _, predSlot := range baseState {
+ if state.liveCount[predSlot.slot] != len(preds) ||
+ slotLocs[predSlot.slot].Registers != predSlot.Registers ||
+ slotLocs[predSlot.slot].StackOffset != predSlot.StackOffset {
+ unchanged = false
+ break
+ }
+ }
+ if unchanged {
+ if state.loggingEnabled {
+ state.logf("After merge, %v matches b%v exactly.\n", b, baseID)
+ }
+ if previousBlockIsNotPredecessor {
+ // Mark everything in previous block as changed because it is not a predecessor.
+ markChangedVars(blockLocs[previousBlock.ID].endState)
+ }
+ state.currentState.reset(baseState)
+ return baseState, true
+ }
+
+ for reg := range state.currentState.registers {
+ state.currentState.registers[reg] = state.currentState.registers[reg][:0]
+ }
+
+ // A slot is live if it was seen in all predecessors, and they all had
+ // some storage in common.
+ for _, predSlot := range baseState {
+ slotLoc := slotLocs[predSlot.slot]
+
+ if state.liveCount[predSlot.slot] != len(preds) {
+ // Seen in only some predecessors. Clear it out.
+ slotLocs[predSlot.slot] = VarLoc{}
+ continue
+ }
+
+ // Present in all predecessors.
+ mask := uint64(slotLoc.Registers)
+ for {
+ if mask == 0 {
+ break
+ }
+ reg := uint8(bits.TrailingZeros64(mask))
+ mask &^= 1 << reg
+ state.currentState.registers[reg] = append(state.currentState.registers[reg], predSlot.slot)
+ }
+ }
+
+ if previousBlockIsNotPredecessor {
+ // Mark everything in previous block as changed because it is not a predecessor.
+ markChangedVars(blockLocs[previousBlock.ID].endState)
+
+ }
+ return nil, false
+}
+
+// processValue updates locs and state.registerContents to reflect v, a value with
+// the names in vSlots and homed in vReg. "v" becomes visible after execution of
+// the instructions evaluating it. It returns which VarIDs were modified by the
+// Value's execution.
+func (state *debugState) processValue(v *Value, vSlots []SlotID, vReg *Register) bool {
+ locs := state.currentState
+ changed := false
+ setSlot := func(slot SlotID, loc VarLoc) {
+ changed = true
+ state.changedVars.add(ID(state.slotVars[slot]))
+ state.currentState.slots[slot] = loc
+ }
+
+ // Handle any register clobbering. Call operations, for example,
+ // clobber all registers even though they don't explicitly write to
+ // them.
+ clobbers := uint64(opcodeTable[v.Op].reg.clobbers)
+ for {
+ if clobbers == 0 {
+ break
+ }
+ reg := uint8(bits.TrailingZeros64(clobbers))
+ clobbers &^= 1 << reg
+
+ for _, slot := range locs.registers[reg] {
+ if state.loggingEnabled {
+ state.logf("at %v: %v clobbered out of %v\n", v, state.slots[slot], &state.registers[reg])
+ }
+
+ last := locs.slots[slot]
+ if last.absent() {
+ state.f.Fatalf("at %v: slot %v in register %v with no location entry", v, state.slots[slot], &state.registers[reg])
+ continue
+ }
+ regs := last.Registers &^ (1 << reg)
+ setSlot(slot, VarLoc{regs, last.StackOffset})
+ }
+
+ locs.registers[reg] = locs.registers[reg][:0]
+ }
+
+ switch {
+ case v.Op == OpVarDef, v.Op == OpVarKill:
+ n := v.Aux.(GCNode)
+ if n.IsSynthetic() {
+ break
+ }
+
+ slotID := state.varParts[n][0]
+ var stackOffset StackOffset
+ if v.Op == OpVarDef {
+ stackOffset = StackOffset(state.stackOffset(state.slots[slotID])<<1 | 1)
+ }
+ setSlot(slotID, VarLoc{0, stackOffset})
+ if state.loggingEnabled {
+ if v.Op == OpVarDef {
+ state.logf("at %v: stack-only var %v now live\n", v, state.slots[slotID])
+ } else {
+ state.logf("at %v: stack-only var %v now dead\n", v, state.slots[slotID])
+ }
+ }
+
+ case v.Op == OpArg:
+ home := state.f.getHome(v.ID).(LocalSlot)
+ stackOffset := state.stackOffset(home)<<1 | 1
+ for _, slot := range vSlots {
+ if state.loggingEnabled {
+ state.logf("at %v: arg %v now on stack in location %v\n", v, state.slots[slot], home)
+ if last := locs.slots[slot]; !last.absent() {
+ state.logf("at %v: unexpected arg op on already-live slot %v\n", v, state.slots[slot])
+ }
+ }
+
+ setSlot(slot, VarLoc{0, StackOffset(stackOffset)})
+ }
+
+ case v.Op == OpStoreReg:
+ home := state.f.getHome(v.ID).(LocalSlot)
+ stackOffset := state.stackOffset(home)<<1 | 1
+ for _, slot := range vSlots {
+ last := locs.slots[slot]
+ if last.absent() {
+ if state.loggingEnabled {
+ state.logf("at %v: unexpected spill of unnamed register %s\n", v, vReg)
+ }
+ break
+ }
+
+ setSlot(slot, VarLoc{last.Registers, StackOffset(stackOffset)})
+ if state.loggingEnabled {
+ state.logf("at %v: %v spilled to stack location %v\n", v, state.slots[slot], home)
+ }
+ }
+
+ case vReg != nil:
+ if state.loggingEnabled {
+ newSlots := make([]bool, len(state.slots))
+ for _, slot := range vSlots {
+ newSlots[slot] = true
+ }
+
+ for _, slot := range locs.registers[vReg.num] {
+ if !newSlots[slot] {
+ state.logf("at %v: overwrote %v in register %v\n", v, state.slots[slot], vReg)
+ }
+ }
+ }
+
+ for _, slot := range locs.registers[vReg.num] {
+ last := locs.slots[slot]
+ setSlot(slot, VarLoc{last.Registers &^ (1 << uint8(vReg.num)), last.StackOffset})
+ }
+ locs.registers[vReg.num] = locs.registers[vReg.num][:0]
+ locs.registers[vReg.num] = append(locs.registers[vReg.num], vSlots...)
+ for _, slot := range vSlots {
+ if state.loggingEnabled {
+ state.logf("at %v: %v now in %s\n", v, state.slots[slot], vReg)
+ }
+
+ last := locs.slots[slot]
+ setSlot(slot, VarLoc{1<<uint8(vReg.num) | last.Registers, last.StackOffset})
+ }
+ }
+ return changed
+}
+
+// varOffset returns the offset of slot within the user variable it was
+// decomposed from. This has nothing to do with its stack offset.
+func varOffset(slot LocalSlot) int64 {
+ offset := slot.Off
+ s := &slot
+ for ; s.SplitOf != nil; s = s.SplitOf {
+ offset += s.SplitOffset
+ }
+ return offset
+}
+
+type partsByVarOffset struct {
+ slotIDs []SlotID
+ slots []LocalSlot
+}
+
+func (a partsByVarOffset) Len() int { return len(a.slotIDs) }
+func (a partsByVarOffset) Less(i, j int) bool {
+ return varOffset(a.slots[a.slotIDs[i]]) < varOffset(a.slots[a.slotIDs[j]])
+}
+func (a partsByVarOffset) Swap(i, j int) { a.slotIDs[i], a.slotIDs[j] = a.slotIDs[j], a.slotIDs[i] }
+
+// A pendingEntry represents the beginning of a location list entry, missing
+// only its end coordinate.
+type pendingEntry struct {
+ present bool
+ startBlock, startValue ID
+ // The location of each piece of the variable, in the same order as the
+ // SlotIDs in varParts.
+ pieces []VarLoc
+}
+
+func (e *pendingEntry) clear() {
+ e.present = false
+ e.startBlock = 0
+ e.startValue = 0
+ for i := range e.pieces {
+ e.pieces[i] = VarLoc{}
+ }
+}
+
+// canMerge reports whether the location description for new is the same as
+// pending.
+func canMerge(pending, new VarLoc) bool {
+ if pending.absent() && new.absent() {
+ return true
+ }
+ if pending.absent() || new.absent() {
+ return false
+ }
+ if pending.onStack() {
+ return pending.StackOffset == new.StackOffset
+ }
+ if pending.Registers != 0 && new.Registers != 0 {
+ return firstReg(pending.Registers) == firstReg(new.Registers)
+ }
+ return false
+}
+
+// firstReg returns the first register in set that is present.
+func firstReg(set RegisterSet) uint8 {
+ if set == 0 {
+ // This is wrong, but there seem to be some situations where we
+ // produce locations with no storage.
+ return 0
+ }
+ return uint8(bits.TrailingZeros64(uint64(set)))
+}
+
+// buildLocationLists builds location lists for all the user variables in
+// state.f, using the information about block state in blockLocs.
+// The returned location lists are not fully complete. They are in terms of
+// SSA values rather than PCs, and have no base address/end entries. They will
+// be finished by PutLocationList.
+func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) {
+ // Run through the function in program text order, building up location
+ // lists as we go. The heavy lifting has mostly already been done.
+
+ var prevBlock *Block
+ for _, b := range state.f.Blocks {
+ state.mergePredecessors(b, blockLocs, prevBlock)
+
+ if !blockLocs[b.ID].relevant {
+ // Handle any differences among predecessor blocks and previous block (perhaps not a predecessor)
+ for _, varID := range state.changedVars.contents() {
+ state.updateVar(VarID(varID), b, BlockStart)
+ }
+ continue
+ }
+
+ zeroWidthPending := false
+ apcChangedSize := 0 // size of changedVars for leading Args, Phi, ClosurePtr
+ // expect to see values in pattern (apc)* (zerowidth|real)*
+ for _, v := range b.Values {
+ slots := state.valueNames[v.ID]
+ reg, _ := state.f.getHome(v.ID).(*Register)
+ changed := state.processValue(v, slots, reg) // changed == added to state.changedVars
+
+ if opcodeTable[v.Op].zeroWidth {
+ if changed {
+ if v.Op == OpArg || v.Op == OpPhi || v.Op.isLoweredGetClosurePtr() {
+ // These ranges begin at true beginning of block, not after first instruction
+ if zeroWidthPending {
+ b.Func.Fatalf("Unexpected op mixed with OpArg/OpPhi/OpLoweredGetClosurePtr at beginning of block %s in %s\n%s", b, b.Func.Name, b.Func)
+ }
+ apcChangedSize = len(state.changedVars.contents())
+ continue
+ }
+ // Other zero-width ops must wait on a "real" op.
+ zeroWidthPending = true
+ }
+ continue
+ }
+
+ if !changed && !zeroWidthPending {
+ continue
+ }
+ // Not zero-width; i.e., a "real" instruction.
+
+ zeroWidthPending = false
+ for i, varID := range state.changedVars.contents() {
+ if i < apcChangedSize { // buffered true start-of-block changes
+ state.updateVar(VarID(varID), v.Block, BlockStart)
+ } else {
+ state.updateVar(VarID(varID), v.Block, v)
+ }
+ }
+ state.changedVars.clear()
+ apcChangedSize = 0
+ }
+ for i, varID := range state.changedVars.contents() {
+ if i < apcChangedSize { // buffered true start-of-block changes
+ state.updateVar(VarID(varID), b, BlockStart)
+ } else {
+ state.updateVar(VarID(varID), b, BlockEnd)
+ }
+ }
+
+ prevBlock = b
+ }
+
+ if state.loggingEnabled {
+ state.logf("location lists:\n")
+ }
+
+ // Flush any leftover entries live at the end of the last block.
+ for varID := range state.lists {
+ state.writePendingEntry(VarID(varID), state.f.Blocks[len(state.f.Blocks)-1].ID, BlockEnd.ID)
+ list := state.lists[varID]
+ if state.loggingEnabled {
+ if len(list) == 0 {
+ state.logf("\t%v : empty list\n", state.vars[varID])
+ } else {
+ state.logf("\t%v : %q\n", state.vars[varID], hex.EncodeToString(state.lists[varID]))
+ }
+ }
+ }
+}
+
+// updateVar updates the pending location list entry for varID to
+// reflect the new locations in curLoc, beginning at v in block b.
+// v may be one of the special values indicating block start or end.
+func (state *debugState) updateVar(varID VarID, b *Block, v *Value) {
+ curLoc := state.currentState.slots
+ // Assemble the location list entry with whatever's live.
+ empty := true
+ for _, slotID := range state.varSlots[varID] {
+ if !curLoc[slotID].absent() {
+ empty = false
+ break
+ }
+ }
+ pending := &state.pendingEntries[varID]
+ if empty {
+ state.writePendingEntry(varID, b.ID, v.ID)
+ pending.clear()
+ return
+ }
+
+ // Extend the previous entry if possible.
+ if pending.present {
+ merge := true
+ for i, slotID := range state.varSlots[varID] {
+ if !canMerge(pending.pieces[i], curLoc[slotID]) {
+ merge = false
+ break
+ }
+ }
+ if merge {
+ return
+ }
+ }
+
+ state.writePendingEntry(varID, b.ID, v.ID)
+ pending.present = true
+ pending.startBlock = b.ID
+ pending.startValue = v.ID
+ for i, slot := range state.varSlots[varID] {
+ pending.pieces[i] = curLoc[slot]
+ }
+}
+
+// writePendingEntry writes out the pending entry for varID, if any,
+// terminated at endBlock/Value.
+func (state *debugState) writePendingEntry(varID VarID, endBlock, endValue ID) {
+ pending := state.pendingEntries[varID]
+ if !pending.present {
+ return
+ }
+
+ // Pack the start/end coordinates into the start/end addresses
+ // of the entry, for decoding by PutLocationList.
+ start, startOK := encodeValue(state.ctxt, pending.startBlock, pending.startValue)
+ end, endOK := encodeValue(state.ctxt, endBlock, endValue)
+ if !startOK || !endOK {
+ // If someone writes a function that uses >65K values,
+ // they get incomplete debug info on 32-bit platforms.
+ return
+ }
+ if start == end {
+ if state.loggingEnabled {
+ // Printf not logf so not gated by GOSSAFUNC; this should fire very rarely.
+ fmt.Printf("Skipping empty location list for %v in %s\n", state.vars[varID], state.f.Name)
+ }
+ return
+ }
+
+ list := state.lists[varID]
+ list = appendPtr(state.ctxt, list, start)
+ list = appendPtr(state.ctxt, list, end)
+ // Where to write the length of the location description once
+ // we know how big it is.
+ sizeIdx := len(list)
+ list = list[:len(list)+2]
+
+ if state.loggingEnabled {
+ var partStrs []string
+ for i, slot := range state.varSlots[varID] {
+ partStrs = append(partStrs, fmt.Sprintf("%v@%v", state.slots[slot], state.LocString(pending.pieces[i])))
+ }
+ state.logf("Add entry for %v: \tb%vv%v-b%vv%v = \t%v\n", state.vars[varID], pending.startBlock, pending.startValue, endBlock, endValue, strings.Join(partStrs, " "))
+ }
+
+ for i, slotID := range state.varSlots[varID] {
+ loc := pending.pieces[i]
+ slot := state.slots[slotID]
+
+ if !loc.absent() {
+ if loc.onStack() {
+ if loc.stackOffsetValue() == 0 {
+ list = append(list, dwarf.DW_OP_call_frame_cfa)
+ } else {
+ list = append(list, dwarf.DW_OP_fbreg)
+ list = dwarf.AppendSleb128(list, int64(loc.stackOffsetValue()))
+ }
+ } else {
+ regnum := state.ctxt.Arch.DWARFRegisters[state.registers[firstReg(loc.Registers)].ObjNum()]
+ if regnum < 32 {
+ list = append(list, dwarf.DW_OP_reg0+byte(regnum))
+ } else {
+ list = append(list, dwarf.DW_OP_regx)
+ list = dwarf.AppendUleb128(list, uint64(regnum))
+ }
+ }
+ }
+
+ if len(state.varSlots[varID]) > 1 {
+ list = append(list, dwarf.DW_OP_piece)
+ list = dwarf.AppendUleb128(list, uint64(slot.Type.Size()))
+ }
+ }
+ state.ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2))
+ state.lists[varID] = list
+}
+
+// PutLocationList adds list (a location list in its intermediate representation) to listSym.
+func (debugInfo *FuncDebug) PutLocationList(list []byte, ctxt *obj.Link, listSym, startPC *obj.LSym) {
+ getPC := debugInfo.GetPC
+
+ if ctxt.UseBASEntries {
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, ^0)
+ listSym.WriteAddr(ctxt, listSym.Size, ctxt.Arch.PtrSize, startPC, 0)
+ }
+
+ // Re-read list, translating its address from block/value ID to PC.
+ for i := 0; i < len(list); {
+ begin := getPC(decodeValue(ctxt, readPtr(ctxt, list[i:])))
+ end := getPC(decodeValue(ctxt, readPtr(ctxt, list[i+ctxt.Arch.PtrSize:])))
+
+ // Horrible hack. If a range contains only zero-width
+ // instructions, e.g. an Arg, and it's at the beginning of the
+ // function, this would be indistinguishable from an
+ // end entry. Fudge it.
+ if begin == 0 && end == 0 {
+ end = 1
+ }
+
+ if ctxt.UseBASEntries {
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(begin))
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(end))
+ } else {
+ listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(begin))
+ listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(end))
+ }
+
+ i += 2 * ctxt.Arch.PtrSize
+ datalen := 2 + int(ctxt.Arch.ByteOrder.Uint16(list[i:]))
+ listSym.WriteBytes(ctxt, listSym.Size, list[i:i+datalen]) // copy datalen and location encoding
+ i += datalen
+ }
+
+ // Location list contents, now with real PCs.
+ // End entry.
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, 0)
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, 0)
+}
+
+// Pack a value and block ID into an address-sized uint, returning ~0 if they
+// don't fit.
+func encodeValue(ctxt *obj.Link, b, v ID) (uint64, bool) {
+ if ctxt.Arch.PtrSize == 8 {
+ result := uint64(b)<<32 | uint64(uint32(v))
+ //ctxt.Logf("b %#x (%d) v %#x (%d) -> %#x\n", b, b, v, v, result)
+ return result, true
+ }
+ if ctxt.Arch.PtrSize != 4 {
+ panic("unexpected pointer size")
+ }
+ if ID(int16(b)) != b || ID(int16(v)) != v {
+ return 0, false
+ }
+ return uint64(b)<<16 | uint64(uint16(v)), true
+}
+
+// Unpack a value and block ID encoded by encodeValue.
+func decodeValue(ctxt *obj.Link, word uint64) (ID, ID) {
+ if ctxt.Arch.PtrSize == 8 {
+ b, v := ID(word>>32), ID(word)
+ //ctxt.Logf("%#x -> b %#x (%d) v %#x (%d)\n", word, b, b, v, v)
+ return b, v
+ }
+ if ctxt.Arch.PtrSize != 4 {
+ panic("unexpected pointer size")
+ }
+ return ID(word >> 16), ID(int16(word))
+}
+
+// Append a pointer-sized uint to buf.
+func appendPtr(ctxt *obj.Link, buf []byte, word uint64) []byte {
+ if cap(buf) < len(buf)+20 {
+ b := make([]byte, len(buf), 20+cap(buf)*2)
+ copy(b, buf)
+ buf = b
+ }
+ writeAt := len(buf)
+ buf = buf[0 : len(buf)+ctxt.Arch.PtrSize]
+ writePtr(ctxt, buf[writeAt:], word)
+ return buf
+}
+
+// Write a pointer-sized uint to the beginning of buf.
+func writePtr(ctxt *obj.Link, buf []byte, word uint64) {
+ switch ctxt.Arch.PtrSize {
+ case 4:
+ ctxt.Arch.ByteOrder.PutUint32(buf, uint32(word))
+ case 8:
+ ctxt.Arch.ByteOrder.PutUint64(buf, word)
+ default:
+ panic("unexpected pointer size")
+ }
+
+}
+
+// Read a pointer-sized uint from the beginning of buf.
+func readPtr(ctxt *obj.Link, buf []byte) uint64 {
+ switch ctxt.Arch.PtrSize {
+ case 4:
+ return uint64(ctxt.Arch.ByteOrder.Uint32(buf))
+ case 8:
+ return ctxt.Arch.ByteOrder.Uint64(buf)
+ default:
+ panic("unexpected pointer size")
+ }
+
+}
diff --git a/src/cmd/compile/internal/ssa/debug_test.go b/src/cmd/compile/internal/ssa/debug_test.go
new file mode 100644
index 0000000..3346312
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/debug_test.go
@@ -0,0 +1,1020 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+var (
+ update = flag.Bool("u", false, "update test reference files")
+ verbose = flag.Bool("v", false, "print debugger interactions (very verbose)")
+ dryrun = flag.Bool("n", false, "just print the command line and first debugging bits")
+ useGdb = flag.Bool("g", false, "use Gdb instead of Delve (dlv), use gdb reference files")
+ force = flag.Bool("f", false, "force run under not linux-amd64; also do not use tempdir")
+ repeats = flag.Bool("r", false, "detect repeats in debug steps and don't ignore them")
+ inlines = flag.Bool("i", false, "do inlining for gdb (makes testing flaky till inlining info is correct)")
+)
+
+var (
+ hexRe = regexp.MustCompile("0x[a-zA-Z0-9]+")
+ numRe = regexp.MustCompile("-?[0-9]+")
+ stringRe = regexp.MustCompile("\"([^\\\"]|(\\.))*\"")
+ leadingDollarNumberRe = regexp.MustCompile("^[$][0-9]+")
+ optOutGdbRe = regexp.MustCompile("[<]optimized out[>]")
+ numberColonRe = regexp.MustCompile("^ *[0-9]+:")
+)
+
+var gdb = "gdb" // Might be "ggdb" on Darwin, because gdb no longer part of XCode
+var debugger = "dlv" // For naming files, etc.
+
+var gogcflags = os.Getenv("GO_GCFLAGS")
+
+// optimizedLibs usually means "not running in a noopt test builder".
+var optimizedLibs = (!strings.Contains(gogcflags, "-N") && !strings.Contains(gogcflags, "-l"))
+
+// TestNexting go-builds a file, then uses a debugger (default delve, optionally gdb)
+// to next through the generated executable, recording each line landed at, and
+// then compares those lines with reference file(s).
+// Flag -u updates the reference file(s).
+// Flag -g changes the debugger to gdb (and uses gdb-specific reference files)
+// Flag -v is ever-so-slightly verbose.
+// Flag -n is for dry-run, and prints the shell and first debug commands.
+//
+// Because this test (combined with existing compiler deficiencies) is flaky,
+// for gdb-based testing by default inlining is disabled
+// (otherwise output depends on library internals)
+// and for both gdb and dlv by default repeated lines in the next stream are ignored
+// (because this appears to be timing-dependent in gdb, and the cleanest fix is in code common to gdb and dlv).
+//
+// Also by default, any source code outside of .../testdata/ is not mentioned
+// in the debugging histories. This deals both with inlined library code once
+// the compiler is generating clean inline records, and also deals with
+// runtime code between return from main and process exit. This is hidden
+// so that those files (in the runtime/library) can change without affecting
+// this test.
+//
+// These choices can be reversed with -i (inlining on) and -r (repeats detected) which
+// will also cause their own failures against the expected outputs. Note that if the compiler
+// and debugger were behaving properly, the inlined code and repeated lines would not appear,
+// so the expected output is closer to what we hope to see, though it also encodes all our
+// current bugs.
+//
+// The file being tested may contain comments of the form
+// //DBG-TAG=(v1,v2,v3)
+// where DBG = {gdb,dlv} and TAG={dbg,opt}
+// each variable may optionally be followed by a / and one or more of S,A,N,O
+// to indicate normalization of Strings, (hex) addresses, and numbers.
+// "O" is an explicit indication that we expect it to be optimized out.
+// For example:
+//
+// if len(os.Args) > 1 { //gdb-dbg=(hist/A,cannedInput/A) //dlv-dbg=(hist/A,cannedInput/A)
+//
+// TODO: not implemented for Delve yet, but this is the plan
+//
+// After a compiler change that causes a difference in the debug behavior, check
+// to see if it is sensible or not, and if it is, update the reference files with
+// go test debug_test.go -args -u
+// (for Delve)
+// go test debug_test.go -args -u -d
+//
+func TestNexting(t *testing.T) {
+ testenv.SkipFlaky(t, 37404)
+
+ skipReasons := "" // Many possible skip reasons, list all that apply
+ if testing.Short() {
+ skipReasons = "not run in short mode; "
+ }
+ testenv.MustHaveGoBuild(t)
+
+ if *useGdb && !*force && !(runtime.GOOS == "linux" && runtime.GOARCH == "amd64") {
+ // Running gdb on OSX/darwin is very flaky.
+ // Sometimes it is called ggdb, depending on how it is installed.
+ // It also sometimes requires an admin password typed into a dialog box.
+ // Various architectures tend to differ slightly sometimes, and keeping them
+ // all in sync is a pain for people who don't have them all at hand,
+ // so limit testing to amd64 (for now)
+ skipReasons += "not run when testing gdb (-g) unless forced (-f) or linux-amd64; "
+ }
+
+ if !*useGdb && !*force && testenv.Builder() == "linux-386-longtest" {
+ // The latest version of Delve does support linux/386. However, the version currently
+ // installed in the linux-386-longtest builder does not. See golang.org/issue/39309.
+ skipReasons += "not run when testing delve on linux-386-longtest builder unless forced (-f); "
+ }
+
+ if *useGdb {
+ debugger = "gdb"
+ _, err := exec.LookPath(gdb)
+ if err != nil {
+ if runtime.GOOS != "darwin" {
+ skipReasons += "not run because gdb not on path; "
+ } else {
+ // On Darwin, MacPorts installs gdb as "ggdb".
+ _, err = exec.LookPath("ggdb")
+ if err != nil {
+ skipReasons += "not run because gdb (and also ggdb) request by -g option not on path; "
+ } else {
+ gdb = "ggdb"
+ }
+ }
+ }
+ } else { // Delve
+ debugger = "dlv"
+ _, err := exec.LookPath("dlv")
+ if err != nil {
+ skipReasons += "not run because dlv not on path; "
+ }
+ }
+
+ if skipReasons != "" {
+ t.Skip(skipReasons[:len(skipReasons)-2])
+ }
+
+ optFlags := "" // Whatever flags are needed to test debugging of optimized code.
+ dbgFlags := "-N -l"
+ if *useGdb && !*inlines {
+ // For gdb (default), disable inlining so that a compiler test does not depend on library code.
+ // TODO: Technically not necessary in 1.10 and later, but it causes a largish regression that needs investigation.
+ optFlags += " -l"
+ }
+
+ moreargs := []string{}
+ if *useGdb && (runtime.GOOS == "darwin" || runtime.GOOS == "windows") {
+ // gdb and lldb on Darwin do not deal with compressed dwarf.
+ // also, Windows.
+ moreargs = append(moreargs, "-ldflags=-compressdwarf=false")
+ }
+
+ subTest(t, debugger+"-dbg", "hist", dbgFlags, moreargs...)
+ subTest(t, debugger+"-dbg", "scopes", dbgFlags, moreargs...)
+ subTest(t, debugger+"-dbg", "i22558", dbgFlags, moreargs...)
+
+ subTest(t, debugger+"-dbg-race", "i22600", dbgFlags, append(moreargs, "-race")...)
+
+ optSubTest(t, debugger+"-opt", "hist", optFlags, 1000, moreargs...)
+ optSubTest(t, debugger+"-opt", "scopes", optFlags, 1000, moreargs...)
+
+ // Was optSubtest, this test is observed flaky on Linux in Docker on (busy) macOS, probably because of timing
+ // glitches in this harness.
+ // TODO get rid of timing glitches in this harness.
+ skipSubTest(t, debugger+"-opt", "infloop", optFlags, 10, moreargs...)
+
+}
+
+// subTest creates a subtest that compiles basename.go with the specified gcflags and additional compiler arguments,
+// then runs the debugger on the resulting binary, with any comment-specified actions matching tag triggered.
+func subTest(t *testing.T, tag string, basename string, gcflags string, moreargs ...string) {
+ t.Run(tag+"-"+basename, func(t *testing.T) {
+ if t.Name() == "TestNexting/gdb-dbg-i22558" {
+ testenv.SkipFlaky(t, 31263)
+ }
+ testNexting(t, basename, tag, gcflags, 1000, moreargs...)
+ })
+}
+
+// skipSubTest is the same as subTest except that it skips the test if execution is not forced (-f)
+func skipSubTest(t *testing.T, tag string, basename string, gcflags string, count int, moreargs ...string) {
+ t.Run(tag+"-"+basename, func(t *testing.T) {
+ if *force {
+ testNexting(t, basename, tag, gcflags, count, moreargs...)
+ } else {
+ t.Skip("skipping flaky test becaused not forced (-f)")
+ }
+ })
+}
+
+// optSubTest is the same as subTest except that it skips the test if the runtime and libraries
+// were not compiled with optimization turned on. (The skip may not be necessary with Go 1.10 and later)
+func optSubTest(t *testing.T, tag string, basename string, gcflags string, count int, moreargs ...string) {
+ // If optimized test is run with unoptimized libraries (compiled with -N -l), it is very likely to fail.
+ // This occurs in the noopt builders (for example).
+ t.Run(tag+"-"+basename, func(t *testing.T) {
+ if *force || optimizedLibs {
+ testNexting(t, basename, tag, gcflags, count, moreargs...)
+ } else {
+ t.Skip("skipping for unoptimized stdlib/runtime")
+ }
+ })
+}
+
+func testNexting(t *testing.T, base, tag, gcflags string, count int, moreArgs ...string) {
+ // (1) In testdata, build sample.go into test-sample.<tag>
+ // (2) Run debugger gathering a history
+ // (3) Read expected history from testdata/sample.<tag>.nexts
+ // optionally, write out testdata/sample.<tag>.nexts
+
+ testbase := filepath.Join("testdata", base) + "." + tag
+ tmpbase := filepath.Join("testdata", "test-"+base+"."+tag)
+
+ // Use a temporary directory unless -f is specified
+ if !*force {
+ tmpdir, err := ioutil.TempDir("", "debug_test")
+ if err != nil {
+ panic(fmt.Sprintf("Problem creating TempDir, error %v\n", err))
+ }
+ tmpbase = filepath.Join(tmpdir, "test-"+base+"."+tag)
+ if *verbose {
+ fmt.Printf("Tempdir is %s\n", tmpdir)
+ }
+ defer os.RemoveAll(tmpdir)
+ }
+ exe := tmpbase
+
+ runGoArgs := []string{"build", "-o", exe, "-gcflags=all=" + gcflags}
+ runGoArgs = append(runGoArgs, moreArgs...)
+ runGoArgs = append(runGoArgs, filepath.Join("testdata", base+".go"))
+
+ runGo(t, "", runGoArgs...)
+
+ nextlog := testbase + ".nexts"
+ tmplog := tmpbase + ".nexts"
+ var dbg dbgr
+ if *useGdb {
+ dbg = newGdb(tag, exe)
+ } else {
+ dbg = newDelve(tag, exe)
+ }
+ h1 := runDbgr(dbg, count)
+ if *dryrun {
+ fmt.Printf("# Tag for above is %s\n", dbg.tag())
+ return
+ }
+ if *update {
+ h1.write(nextlog)
+ } else {
+ h0 := &nextHist{}
+ h0.read(nextlog)
+ if !h0.equals(h1) {
+ // Be very noisy about exactly what's wrong to simplify debugging.
+ h1.write(tmplog)
+ cmd := exec.Command("diff", "-u", nextlog, tmplog)
+ line := asCommandLine("", cmd)
+ bytes, err := cmd.CombinedOutput()
+ if err != nil && len(bytes) == 0 {
+ t.Fatalf("step/next histories differ, diff command %s failed with error=%v", line, err)
+ }
+ t.Fatalf("step/next histories differ, diff=\n%s", string(bytes))
+ }
+ }
+}
+
+type dbgr interface {
+ start()
+ stepnext(s string) bool // step or next, possible with parameter, gets line etc. returns true for success, false for unsure response
+ quit()
+ hist() *nextHist
+ tag() string
+}
+
+func runDbgr(dbg dbgr, maxNext int) *nextHist {
+ dbg.start()
+ if *dryrun {
+ return nil
+ }
+ for i := 0; i < maxNext; i++ {
+ if !dbg.stepnext("n") {
+ break
+ }
+ }
+ dbg.quit()
+ h := dbg.hist()
+ return h
+}
+
+func runGo(t *testing.T, dir string, args ...string) string {
+ var stdout, stderr bytes.Buffer
+ cmd := exec.Command(testenv.GoToolPath(t), args...)
+ cmd.Dir = dir
+ if *dryrun {
+ fmt.Printf("%s\n", asCommandLine("", cmd))
+ return ""
+ }
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("error running cmd (%s): %v\nstdout:\n%sstderr:\n%s\n", asCommandLine("", cmd), err, stdout.String(), stderr.String())
+ }
+
+ if s := stderr.String(); s != "" {
+ t.Fatalf("Stderr = %s\nWant empty", s)
+ }
+
+ return stdout.String()
+}
+
+// tstring provides two strings, o (stdout) and e (stderr)
+type tstring struct {
+ o string
+ e string
+}
+
+func (t tstring) String() string {
+ return t.o + t.e
+}
+
+type pos struct {
+ line uint32
+ file uint8 // Artifact of plans to implement differencing instead of calling out to diff.
+}
+
+type nextHist struct {
+ f2i map[string]uint8
+ fs []string
+ ps []pos
+ texts []string
+ vars [][]string
+}
+
+func (h *nextHist) write(filename string) {
+ file, err := os.Create(filename)
+ if err != nil {
+ panic(fmt.Sprintf("Problem opening %s, error %v\n", filename, err))
+ }
+ defer file.Close()
+ var lastfile uint8
+ for i, x := range h.texts {
+ p := h.ps[i]
+ if lastfile != p.file {
+ fmt.Fprintf(file, " %s\n", h.fs[p.file-1])
+ lastfile = p.file
+ }
+ fmt.Fprintf(file, "%d:%s\n", p.line, x)
+ // TODO, normalize between gdb and dlv into a common, comparable format.
+ for _, y := range h.vars[i] {
+ y = strings.TrimSpace(y)
+ fmt.Fprintf(file, "%s\n", y)
+ }
+ }
+ file.Close()
+}
+
+func (h *nextHist) read(filename string) {
+ h.f2i = make(map[string]uint8)
+ bytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ panic(fmt.Sprintf("Problem reading %s, error %v\n", filename, err))
+ }
+ var lastfile string
+ lines := strings.Split(string(bytes), "\n")
+ for i, l := range lines {
+ if len(l) > 0 && l[0] != '#' {
+ if l[0] == ' ' {
+ // file -- first two characters expected to be " "
+ lastfile = strings.TrimSpace(l)
+ } else if numberColonRe.MatchString(l) {
+ // line number -- <number>:<line>
+ colonPos := strings.Index(l, ":")
+ if colonPos == -1 {
+ panic(fmt.Sprintf("Line %d (%s) in file %s expected to contain '<number>:' but does not.\n", i+1, l, filename))
+ }
+ h.add(lastfile, l[0:colonPos], l[colonPos+1:])
+ } else {
+ h.addVar(l)
+ }
+ }
+ }
+}
+
+// add appends file (name), line (number) and text (string) to the history,
+// provided that the file+line combo does not repeat the previous position,
+// and provided that the file is within the testdata directory. The return
+// value indicates whether the append occurred.
+func (h *nextHist) add(file, line, text string) bool {
+ // Only record source code in testdata unless the inlines flag is set
+ if !*inlines && !strings.Contains(file, "/testdata/") {
+ return false
+ }
+ fi := h.f2i[file]
+ if fi == 0 {
+ h.fs = append(h.fs, file)
+ fi = uint8(len(h.fs))
+ h.f2i[file] = fi
+ }
+
+ line = strings.TrimSpace(line)
+ var li int
+ var err error
+ if line != "" {
+ li, err = strconv.Atoi(line)
+ if err != nil {
+ panic(fmt.Sprintf("Non-numeric line: %s, error %v\n", line, err))
+ }
+ }
+ l := len(h.ps)
+ p := pos{line: uint32(li), file: fi}
+
+ if l == 0 || *repeats || h.ps[l-1] != p {
+ h.ps = append(h.ps, p)
+ h.texts = append(h.texts, text)
+ h.vars = append(h.vars, []string{})
+ return true
+ }
+ return false
+}
+
+func (h *nextHist) addVar(text string) {
+ l := len(h.texts)
+ h.vars[l-1] = append(h.vars[l-1], text)
+}
+
+func invertMapSU8(hf2i map[string]uint8) map[uint8]string {
+ hi2f := make(map[uint8]string)
+ for hs, i := range hf2i {
+ hi2f[i] = hs
+ }
+ return hi2f
+}
+
+func (h *nextHist) equals(k *nextHist) bool {
+ if len(h.f2i) != len(k.f2i) {
+ return false
+ }
+ if len(h.ps) != len(k.ps) {
+ return false
+ }
+ hi2f := invertMapSU8(h.f2i)
+ ki2f := invertMapSU8(k.f2i)
+
+ for i, hs := range hi2f {
+ if hs != ki2f[i] {
+ return false
+ }
+ }
+
+ for i, x := range h.ps {
+ if k.ps[i] != x {
+ return false
+ }
+ }
+
+ for i, hv := range h.vars {
+ kv := k.vars[i]
+ if len(hv) != len(kv) {
+ return false
+ }
+ for j, hvt := range hv {
+ if hvt != kv[j] {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// canonFileName strips everything before "/src/" from a filename.
+// This makes file names portable across different machines,
+// home directories, and temporary directories.
+func canonFileName(f string) string {
+ i := strings.Index(f, "/src/")
+ if i != -1 {
+ f = f[i+1:]
+ }
+ return f
+}
+
+/* Delve */
+
+type delveState struct {
+ cmd *exec.Cmd
+ tagg string
+ *ioState
+ atLineRe *regexp.Regexp // "\n =>"
+ funcFileLinePCre *regexp.Regexp // "^> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)"
+ line string
+ file string
+ function string
+}
+
+func newDelve(tag, executable string, args ...string) dbgr {
+ cmd := exec.Command("dlv", "exec", executable)
+ cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb")
+ if len(args) > 0 {
+ cmd.Args = append(cmd.Args, "--")
+ cmd.Args = append(cmd.Args, args...)
+ }
+ s := &delveState{tagg: tag, cmd: cmd}
+ // HAHA Delve has control characters embedded to change the color of the => and the line number
+ // that would be '(\\x1b\\[[0-9;]+m)?' OR TERM=dumb
+ s.atLineRe = regexp.MustCompile("\n=>[[:space:]]+[0-9]+:(.*)")
+ s.funcFileLinePCre = regexp.MustCompile("> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)[)]\n")
+ s.ioState = newIoState(s.cmd)
+ return s
+}
+
+func (s *delveState) tag() string {
+ return s.tagg
+}
+
+func (s *delveState) stepnext(ss string) bool {
+ x := s.ioState.writeReadExpect(ss+"\n", "[(]dlv[)] ")
+ excerpts := s.atLineRe.FindStringSubmatch(x.o)
+ locations := s.funcFileLinePCre.FindStringSubmatch(x.o)
+ excerpt := ""
+ if len(excerpts) > 1 {
+ excerpt = excerpts[1]
+ }
+ if len(locations) > 0 {
+ fn := canonFileName(locations[2])
+ if *verbose {
+ if s.file != fn {
+ fmt.Printf("%s\n", locations[2]) // don't canonocalize verbose logging
+ }
+ fmt.Printf(" %s\n", locations[3])
+ }
+ s.line = locations[3]
+ s.file = fn
+ s.function = locations[1]
+ s.ioState.history.add(s.file, s.line, excerpt)
+ // TODO: here is where variable processing will be added. See gdbState.stepnext as a guide.
+ // Adding this may require some amount of normalization so that logs are comparable.
+ return true
+ }
+ if *verbose {
+ fmt.Printf("DID NOT MATCH EXPECTED NEXT OUTPUT\nO='%s'\nE='%s'\n", x.o, x.e)
+ }
+ return false
+}
+
+func (s *delveState) start() {
+ if *dryrun {
+ fmt.Printf("%s\n", asCommandLine("", s.cmd))
+ fmt.Printf("b main.test\n")
+ fmt.Printf("c\n")
+ return
+ }
+ err := s.cmd.Start()
+ if err != nil {
+ line := asCommandLine("", s.cmd)
+ panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err))
+ }
+ s.ioState.readExpecting(-1, 5000, "Type 'help' for list of commands.")
+ s.ioState.writeReadExpect("b main.test\n", "[(]dlv[)] ")
+ s.stepnext("c")
+}
+
+func (s *delveState) quit() {
+ expect("", s.ioState.writeRead("q\n"))
+}
+
+/* Gdb */
+
+type gdbState struct {
+ cmd *exec.Cmd
+ tagg string
+ args []string
+ *ioState
+ atLineRe *regexp.Regexp
+ funcFileLinePCre *regexp.Regexp
+ line string
+ file string
+ function string
+}
+
+func newGdb(tag, executable string, args ...string) dbgr {
+ // Turn off shell, necessary for Darwin apparently
+ cmd := exec.Command(gdb, "-nx",
+ "-iex", fmt.Sprintf("add-auto-load-safe-path %s/src/runtime", runtime.GOROOT()),
+ "-ex", "set startup-with-shell off", executable)
+ cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb")
+ s := &gdbState{tagg: tag, cmd: cmd, args: args}
+ s.atLineRe = regexp.MustCompile("(^|\n)([0-9]+)(.*)")
+ s.funcFileLinePCre = regexp.MustCompile(
+ "([^ ]+) [(][^)]*[)][ \\t\\n]+at ([^:]+):([0-9]+)")
+ // runtime.main () at /Users/drchase/GoogleDrive/work/go/src/runtime/proc.go:201
+ // function file line
+ // Thread 2 hit Breakpoint 1, main.main () at /Users/drchase/GoogleDrive/work/debug/hist.go:18
+ s.ioState = newIoState(s.cmd)
+ return s
+}
+
+func (s *gdbState) tag() string {
+ return s.tagg
+}
+
+func (s *gdbState) start() {
+ run := "run"
+ for _, a := range s.args {
+ run += " " + a // Can't quote args for gdb, it will pass them through including the quotes
+ }
+ if *dryrun {
+ fmt.Printf("%s\n", asCommandLine("", s.cmd))
+ fmt.Printf("tbreak main.test\n")
+ fmt.Printf("%s\n", run)
+ return
+ }
+ err := s.cmd.Start()
+ if err != nil {
+ line := asCommandLine("", s.cmd)
+ panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err))
+ }
+ s.ioState.readSimpleExpecting("[(]gdb[)] ")
+ x := s.ioState.writeReadExpect("b main.test\n", "[(]gdb[)] ")
+ expect("Breakpoint [0-9]+ at", x)
+ s.stepnext(run)
+}
+
+func (s *gdbState) stepnext(ss string) bool {
+ x := s.ioState.writeReadExpect(ss+"\n", "[(]gdb[)] ")
+ excerpts := s.atLineRe.FindStringSubmatch(x.o)
+ locations := s.funcFileLinePCre.FindStringSubmatch(x.o)
+ excerpt := ""
+ addedLine := false
+ if len(excerpts) == 0 && len(locations) == 0 {
+ if *verbose {
+ fmt.Printf("DID NOT MATCH %s", x.o)
+ }
+ return false
+ }
+ if len(excerpts) > 0 {
+ excerpt = excerpts[3]
+ }
+ if len(locations) > 0 {
+ fn := canonFileName(locations[2])
+ if *verbose {
+ if s.file != fn {
+ fmt.Printf("%s\n", locations[2])
+ }
+ fmt.Printf(" %s\n", locations[3])
+ }
+ s.line = locations[3]
+ s.file = fn
+ s.function = locations[1]
+ addedLine = s.ioState.history.add(s.file, s.line, excerpt)
+ }
+ if len(excerpts) > 0 {
+ if *verbose {
+ fmt.Printf(" %s\n", excerpts[2])
+ }
+ s.line = excerpts[2]
+ addedLine = s.ioState.history.add(s.file, s.line, excerpt)
+ }
+
+ if !addedLine {
+ // True if this was a repeat line
+ return true
+ }
+ // Look for //gdb-<tag>=(v1,v2,v3) and print v1, v2, v3
+ vars := varsToPrint(excerpt, "//"+s.tag()+"=(")
+ for _, v := range vars {
+ response := printVariableAndNormalize(v, func(v string) string {
+ return s.ioState.writeReadExpect("p "+v+"\n", "[(]gdb[)] ").String()
+ })
+ s.ioState.history.addVar(response)
+ }
+ return true
+}
+
+// printVariableAndNormalize extracts any slash-indicated normalizing requests from the variable
+// name, then uses printer to get the value of the variable from the debugger, and then
+// normalizes and returns the response.
+func printVariableAndNormalize(v string, printer func(v string) string) string {
+ slashIndex := strings.Index(v, "/")
+ substitutions := ""
+ if slashIndex != -1 {
+ substitutions = v[slashIndex:]
+ v = v[:slashIndex]
+ }
+ response := printer(v)
+ // expect something like "$1 = ..."
+ dollar := strings.Index(response, "$")
+ cr := strings.Index(response, "\n")
+
+ if dollar == -1 { // some not entirely expected response, whine and carry on.
+ if cr == -1 {
+ response = strings.TrimSpace(response) // discards trailing newline
+ response = strings.Replace(response, "\n", "<BR>", -1)
+ return "$ Malformed response " + response
+ }
+ response = strings.TrimSpace(response[:cr])
+ return "$ " + response
+ }
+ if cr == -1 {
+ cr = len(response)
+ }
+ // Convert the leading $<number> into the variable name to enhance readability
+ // and reduce scope of diffs if an earlier print-variable is added.
+ response = strings.TrimSpace(response[dollar:cr])
+ response = leadingDollarNumberRe.ReplaceAllString(response, v)
+
+ // Normalize value as requested.
+ if strings.Contains(substitutions, "A") {
+ response = hexRe.ReplaceAllString(response, "<A>")
+ }
+ if strings.Contains(substitutions, "N") {
+ response = numRe.ReplaceAllString(response, "<N>")
+ }
+ if strings.Contains(substitutions, "S") {
+ response = stringRe.ReplaceAllString(response, "<S>")
+ }
+ if strings.Contains(substitutions, "O") {
+ response = optOutGdbRe.ReplaceAllString(response, "<Optimized out, as expected>")
+ }
+ return response
+}
+
+// varsToPrint takes a source code line, and extracts the comma-separated variable names
+// found between lookfor and the next ")".
+// For example, if line includes "... //gdb-foo=(v1,v2,v3)" and
+// lookfor="//gdb-foo=(", then varsToPrint returns ["v1", "v2", "v3"]
+func varsToPrint(line, lookfor string) []string {
+ var vars []string
+ if strings.Contains(line, lookfor) {
+ x := line[strings.Index(line, lookfor)+len(lookfor):]
+ end := strings.Index(x, ")")
+ if end == -1 {
+ panic(fmt.Sprintf("Saw variable list begin %s in %s but no closing ')'", lookfor, line))
+ }
+ vars = strings.Split(x[:end], ",")
+ for i, y := range vars {
+ vars[i] = strings.TrimSpace(y)
+ }
+ }
+ return vars
+}
+
+func (s *gdbState) quit() {
+ response := s.ioState.writeRead("q\n")
+ if strings.Contains(response.o, "Quit anyway? (y or n)") {
+ defer func() {
+ if r := recover(); r != nil {
+ if s, ok := r.(string); !(ok && strings.Contains(s, "'Y\n'")) {
+ // Not the panic that was expected.
+ fmt.Printf("Expected a broken pipe panic, but saw the following panic instead")
+ panic(r)
+ }
+ }
+ }()
+ s.ioState.writeRead("Y\n")
+ }
+}
+
+type ioState struct {
+ stdout io.ReadCloser
+ stderr io.ReadCloser
+ stdin io.WriteCloser
+ outChan chan string
+ errChan chan string
+ last tstring // Output of previous step
+ history *nextHist
+}
+
+func newIoState(cmd *exec.Cmd) *ioState {
+ var err error
+ s := &ioState{}
+ s.history = &nextHist{}
+ s.history.f2i = make(map[string]uint8)
+ s.stdout, err = cmd.StdoutPipe()
+ line := asCommandLine("", cmd)
+ if err != nil {
+ panic(fmt.Sprintf("There was an error [stdoutpipe] running '%s', %v\n", line, err))
+ }
+ s.stderr, err = cmd.StderrPipe()
+ if err != nil {
+ panic(fmt.Sprintf("There was an error [stdouterr] running '%s', %v\n", line, err))
+ }
+ s.stdin, err = cmd.StdinPipe()
+ if err != nil {
+ panic(fmt.Sprintf("There was an error [stdinpipe] running '%s', %v\n", line, err))
+ }
+
+ s.outChan = make(chan string, 1)
+ s.errChan = make(chan string, 1)
+ go func() {
+ buffer := make([]byte, 4096)
+ for {
+ n, err := s.stdout.Read(buffer)
+ if n > 0 {
+ s.outChan <- string(buffer[0:n])
+ }
+ if err == io.EOF || n == 0 {
+ break
+ }
+ if err != nil {
+ fmt.Printf("Saw an error forwarding stdout")
+ break
+ }
+ }
+ close(s.outChan)
+ s.stdout.Close()
+ }()
+
+ go func() {
+ buffer := make([]byte, 4096)
+ for {
+ n, err := s.stderr.Read(buffer)
+ if n > 0 {
+ s.errChan <- string(buffer[0:n])
+ }
+ if err == io.EOF || n == 0 {
+ break
+ }
+ if err != nil {
+ fmt.Printf("Saw an error forwarding stderr")
+ break
+ }
+ }
+ close(s.errChan)
+ s.stderr.Close()
+ }()
+ return s
+}
+
+func (s *ioState) hist() *nextHist {
+ return s.history
+}
+
+// writeRead writes ss, then reads stdout and stderr, waiting 500ms to
+// be sure all the output has appeared.
+func (s *ioState) writeRead(ss string) tstring {
+ if *verbose {
+ fmt.Printf("=> %s", ss)
+ }
+ _, err := io.WriteString(s.stdin, ss)
+ if err != nil {
+ panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err))
+ }
+ return s.readExpecting(-1, 500, "")
+}
+
+// writeReadExpect writes ss, then reads stdout and stderr until something
+// that matches expectRE appears. expectRE should not be ""
+func (s *ioState) writeReadExpect(ss, expectRE string) tstring {
+ if *verbose {
+ fmt.Printf("=> %s", ss)
+ }
+ if expectRE == "" {
+ panic("expectRE should not be empty; use .* instead")
+ }
+ _, err := io.WriteString(s.stdin, ss)
+ if err != nil {
+ panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err))
+ }
+ return s.readSimpleExpecting(expectRE)
+}
+
+func (s *ioState) readExpecting(millis, interlineTimeout int, expectedRE string) tstring {
+ timeout := time.Millisecond * time.Duration(millis)
+ interline := time.Millisecond * time.Duration(interlineTimeout)
+ s.last = tstring{}
+ var re *regexp.Regexp
+ if expectedRE != "" {
+ re = regexp.MustCompile(expectedRE)
+ }
+loop:
+ for {
+ var timer <-chan time.Time
+ if timeout > 0 {
+ timer = time.After(timeout)
+ }
+ select {
+ case x, ok := <-s.outChan:
+ if !ok {
+ s.outChan = nil
+ }
+ s.last.o += x
+ case x, ok := <-s.errChan:
+ if !ok {
+ s.errChan = nil
+ }
+ s.last.e += x
+ case <-timer:
+ break loop
+ }
+ if re != nil {
+ if re.MatchString(s.last.o) {
+ break
+ }
+ if re.MatchString(s.last.e) {
+ break
+ }
+ }
+ timeout = interline
+ }
+ if *verbose {
+ fmt.Printf("<= %s%s", s.last.o, s.last.e)
+ }
+ return s.last
+}
+
+func (s *ioState) readSimpleExpecting(expectedRE string) tstring {
+ s.last = tstring{}
+ var re *regexp.Regexp
+ if expectedRE != "" {
+ re = regexp.MustCompile(expectedRE)
+ }
+ for {
+ select {
+ case x, ok := <-s.outChan:
+ if !ok {
+ s.outChan = nil
+ }
+ s.last.o += x
+ case x, ok := <-s.errChan:
+ if !ok {
+ s.errChan = nil
+ }
+ s.last.e += x
+ }
+ if re != nil {
+ if re.MatchString(s.last.o) {
+ break
+ }
+ if re.MatchString(s.last.e) {
+ break
+ }
+ }
+ }
+ if *verbose {
+ fmt.Printf("<= %s%s", s.last.o, s.last.e)
+ }
+ return s.last
+}
+
+// replaceEnv returns a new environment derived from env
+// by removing any existing definition of ev and adding ev=evv.
+func replaceEnv(env []string, ev string, evv string) []string {
+ evplus := ev + "="
+ var found bool
+ for i, v := range env {
+ if strings.HasPrefix(v, evplus) {
+ found = true
+ env[i] = evplus + evv
+ }
+ }
+ if !found {
+ env = append(env, evplus+evv)
+ }
+ return env
+}
+
+// asCommandLine renders cmd as something that could be copy-and-pasted into a command line
+// If cwd is not empty and different from the command's directory, prepend an appropriate "cd"
+func asCommandLine(cwd string, cmd *exec.Cmd) string {
+ s := "("
+ if cmd.Dir != "" && cmd.Dir != cwd {
+ s += "cd" + escape(cmd.Dir) + ";"
+ }
+ for _, e := range cmd.Env {
+ if !strings.HasPrefix(e, "PATH=") &&
+ !strings.HasPrefix(e, "HOME=") &&
+ !strings.HasPrefix(e, "USER=") &&
+ !strings.HasPrefix(e, "SHELL=") {
+ s += escape(e)
+ }
+ }
+ for _, a := range cmd.Args {
+ s += escape(a)
+ }
+ s += " )"
+ return s
+}
+
+// escape inserts escapes appropriate for use in a shell command line
+func escape(s string) string {
+ s = strings.Replace(s, "\\", "\\\\", -1)
+ s = strings.Replace(s, "'", "\\'", -1)
+ // Conservative guess at characters that will force quoting
+ if strings.ContainsAny(s, "\\ ;#*&$~?!|[]()<>{}`") {
+ s = " '" + s + "'"
+ } else {
+ s = " " + s
+ }
+ return s
+}
+
+func expect(want string, got tstring) {
+ if want != "" {
+ match, err := regexp.MatchString(want, got.o)
+ if err != nil {
+ panic(fmt.Sprintf("Error for regexp %s, %v\n", want, err))
+ }
+ if match {
+ return
+ }
+ // Ignore error as we have already checked for it before
+ match, _ = regexp.MatchString(want, got.e)
+ if match {
+ return
+ }
+ fmt.Printf("EXPECTED '%s'\n GOT O='%s'\nAND E='%s'\n", want, got.o, got.e)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go
new file mode 100644
index 0000000..bf7f1e8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/decompose.go
@@ -0,0 +1,449 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "sort"
+)
+
+// decompose converts phi ops on compound builtin types into phi
+// ops on simple types, then invokes rewrite rules to decompose
+// other ops on those types.
+func decomposeBuiltIn(f *Func) {
+ // Decompose phis
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ decomposeBuiltInPhi(v)
+ }
+ }
+
+ // Decompose other values
+ // Note: deadcode is false because we need to keep the original
+ // values around so the name component resolution below can still work.
+ applyRewrite(f, rewriteBlockdec, rewriteValuedec, leaveDeadValues)
+ if f.Config.RegSize == 4 {
+ applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, leaveDeadValues)
+ }
+
+ // Split up named values into their components.
+ // accumulate old names for aggregates (that are decomposed) in toDelete for efficient bulk deletion,
+ // accumulate new LocalSlots in newNames for addition after the iteration. This decomposition is for
+ // builtin types with leaf components, and thus there is no need to reprocess the newly create LocalSlots.
+ var toDelete []namedVal
+ var newNames []LocalSlot
+ for i, name := range f.Names {
+ t := name.Type
+ switch {
+ case t.IsInteger() && t.Size() > f.Config.RegSize:
+ hiName, loName := f.fe.SplitInt64(name)
+ newNames = append(newNames, hiName, loName)
+ for j, v := range f.NamedValues[name] {
+ if v.Op != OpInt64Make {
+ continue
+ }
+ f.NamedValues[hiName] = append(f.NamedValues[hiName], v.Args[0])
+ f.NamedValues[loName] = append(f.NamedValues[loName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsComplex():
+ rName, iName := f.fe.SplitComplex(name)
+ newNames = append(newNames, rName, iName)
+ for j, v := range f.NamedValues[name] {
+ if v.Op != OpComplexMake {
+ continue
+ }
+ f.NamedValues[rName] = append(f.NamedValues[rName], v.Args[0])
+ f.NamedValues[iName] = append(f.NamedValues[iName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsString():
+ ptrName, lenName := f.fe.SplitString(name)
+ newNames = append(newNames, ptrName, lenName)
+ for j, v := range f.NamedValues[name] {
+ if v.Op != OpStringMake {
+ continue
+ }
+ f.NamedValues[ptrName] = append(f.NamedValues[ptrName], v.Args[0])
+ f.NamedValues[lenName] = append(f.NamedValues[lenName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsSlice():
+ ptrName, lenName, capName := f.fe.SplitSlice(name)
+ newNames = append(newNames, ptrName, lenName, capName)
+ for j, v := range f.NamedValues[name] {
+ if v.Op != OpSliceMake {
+ continue
+ }
+ f.NamedValues[ptrName] = append(f.NamedValues[ptrName], v.Args[0])
+ f.NamedValues[lenName] = append(f.NamedValues[lenName], v.Args[1])
+ f.NamedValues[capName] = append(f.NamedValues[capName], v.Args[2])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsInterface():
+ typeName, dataName := f.fe.SplitInterface(name)
+ newNames = append(newNames, typeName, dataName)
+ for j, v := range f.NamedValues[name] {
+ if v.Op != OpIMake {
+ continue
+ }
+ f.NamedValues[typeName] = append(f.NamedValues[typeName], v.Args[0])
+ f.NamedValues[dataName] = append(f.NamedValues[dataName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsFloat():
+ // floats are never decomposed, even ones bigger than RegSize
+ case t.Size() > f.Config.RegSize:
+ f.Fatalf("undecomposed named type %s %v", name, t)
+ }
+ }
+
+ deleteNamedVals(f, toDelete)
+ f.Names = append(f.Names, newNames...)
+}
+
+func decomposeBuiltInPhi(v *Value) {
+ switch {
+ case v.Type.IsInteger() && v.Type.Size() > v.Block.Func.Config.RegSize:
+ decomposeInt64Phi(v)
+ case v.Type.IsComplex():
+ decomposeComplexPhi(v)
+ case v.Type.IsString():
+ decomposeStringPhi(v)
+ case v.Type.IsSlice():
+ decomposeSlicePhi(v)
+ case v.Type.IsInterface():
+ decomposeInterfacePhi(v)
+ case v.Type.IsFloat():
+ // floats are never decomposed, even ones bigger than RegSize
+ case v.Type.Size() > v.Block.Func.Config.RegSize:
+ v.Fatalf("undecomposed type %s", v.Type)
+ }
+}
+
+func decomposeStringPhi(v *Value) {
+ types := &v.Block.Func.Config.Types
+ ptrType := types.BytePtr
+ lenType := types.Int
+
+ ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
+ len := v.Block.NewValue0(v.Pos, OpPhi, lenType)
+ for _, a := range v.Args {
+ ptr.AddArg(a.Block.NewValue1(v.Pos, OpStringPtr, ptrType, a))
+ len.AddArg(a.Block.NewValue1(v.Pos, OpStringLen, lenType, a))
+ }
+ v.reset(OpStringMake)
+ v.AddArg(ptr)
+ v.AddArg(len)
+}
+
+func decomposeSlicePhi(v *Value) {
+ types := &v.Block.Func.Config.Types
+ ptrType := v.Type.Elem().PtrTo()
+ lenType := types.Int
+
+ ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
+ len := v.Block.NewValue0(v.Pos, OpPhi, lenType)
+ cap := v.Block.NewValue0(v.Pos, OpPhi, lenType)
+ for _, a := range v.Args {
+ ptr.AddArg(a.Block.NewValue1(v.Pos, OpSlicePtr, ptrType, a))
+ len.AddArg(a.Block.NewValue1(v.Pos, OpSliceLen, lenType, a))
+ cap.AddArg(a.Block.NewValue1(v.Pos, OpSliceCap, lenType, a))
+ }
+ v.reset(OpSliceMake)
+ v.AddArg(ptr)
+ v.AddArg(len)
+ v.AddArg(cap)
+}
+
+func decomposeInt64Phi(v *Value) {
+ cfgtypes := &v.Block.Func.Config.Types
+ var partType *types.Type
+ if v.Type.IsSigned() {
+ partType = cfgtypes.Int32
+ } else {
+ partType = cfgtypes.UInt32
+ }
+
+ hi := v.Block.NewValue0(v.Pos, OpPhi, partType)
+ lo := v.Block.NewValue0(v.Pos, OpPhi, cfgtypes.UInt32)
+ for _, a := range v.Args {
+ hi.AddArg(a.Block.NewValue1(v.Pos, OpInt64Hi, partType, a))
+ lo.AddArg(a.Block.NewValue1(v.Pos, OpInt64Lo, cfgtypes.UInt32, a))
+ }
+ v.reset(OpInt64Make)
+ v.AddArg(hi)
+ v.AddArg(lo)
+}
+
+func decomposeComplexPhi(v *Value) {
+ cfgtypes := &v.Block.Func.Config.Types
+ var partType *types.Type
+ switch z := v.Type.Size(); z {
+ case 8:
+ partType = cfgtypes.Float32
+ case 16:
+ partType = cfgtypes.Float64
+ default:
+ v.Fatalf("decomposeComplexPhi: bad complex size %d", z)
+ }
+
+ real := v.Block.NewValue0(v.Pos, OpPhi, partType)
+ imag := v.Block.NewValue0(v.Pos, OpPhi, partType)
+ for _, a := range v.Args {
+ real.AddArg(a.Block.NewValue1(v.Pos, OpComplexReal, partType, a))
+ imag.AddArg(a.Block.NewValue1(v.Pos, OpComplexImag, partType, a))
+ }
+ v.reset(OpComplexMake)
+ v.AddArg(real)
+ v.AddArg(imag)
+}
+
+func decomposeInterfacePhi(v *Value) {
+ uintptrType := v.Block.Func.Config.Types.Uintptr
+ ptrType := v.Block.Func.Config.Types.BytePtr
+
+ itab := v.Block.NewValue0(v.Pos, OpPhi, uintptrType)
+ data := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
+ for _, a := range v.Args {
+ itab.AddArg(a.Block.NewValue1(v.Pos, OpITab, uintptrType, a))
+ data.AddArg(a.Block.NewValue1(v.Pos, OpIData, ptrType, a))
+ }
+ v.reset(OpIMake)
+ v.AddArg(itab)
+ v.AddArg(data)
+}
+
+func decomposeArgs(f *Func) {
+ applyRewrite(f, rewriteBlockdecArgs, rewriteValuedecArgs, removeDeadValues)
+}
+
+func decomposeUser(f *Func) {
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ decomposeUserPhi(v)
+ }
+ }
+ // Split up named values into their components.
+ i := 0
+ var newNames []LocalSlot
+ for _, name := range f.Names {
+ t := name.Type
+ switch {
+ case t.IsStruct():
+ newNames = decomposeUserStructInto(f, name, newNames)
+ case t.IsArray():
+ newNames = decomposeUserArrayInto(f, name, newNames)
+ default:
+ f.Names[i] = name
+ i++
+ }
+ }
+ f.Names = f.Names[:i]
+ f.Names = append(f.Names, newNames...)
+}
+
+// decomposeUserArrayInto creates names for the element(s) of arrays referenced
+// by name where possible, and appends those new names to slots, which is then
+// returned.
+func decomposeUserArrayInto(f *Func, name LocalSlot, slots []LocalSlot) []LocalSlot {
+ t := name.Type
+ if t.NumElem() == 0 {
+ // TODO(khr): Not sure what to do here. Probably nothing.
+ // Names for empty arrays aren't important.
+ return slots
+ }
+ if t.NumElem() != 1 {
+ // shouldn't get here due to CanSSA
+ f.Fatalf("array not of size 1")
+ }
+ elemName := f.fe.SplitArray(name)
+ var keep []*Value
+ for _, v := range f.NamedValues[name] {
+ if v.Op != OpArrayMake1 {
+ keep = append(keep, v)
+ continue
+ }
+ f.NamedValues[elemName] = append(f.NamedValues[elemName], v.Args[0])
+ }
+ if len(keep) == 0 {
+ // delete the name for the array as a whole
+ delete(f.NamedValues, name)
+ } else {
+ f.NamedValues[name] = keep
+ }
+
+ if t.Elem().IsArray() {
+ return decomposeUserArrayInto(f, elemName, slots)
+ } else if t.Elem().IsStruct() {
+ return decomposeUserStructInto(f, elemName, slots)
+ }
+
+ return append(slots, elemName)
+}
+
+// decomposeUserStructInto creates names for the fields(s) of structs referenced
+// by name where possible, and appends those new names to slots, which is then
+// returned.
+func decomposeUserStructInto(f *Func, name LocalSlot, slots []LocalSlot) []LocalSlot {
+ fnames := []LocalSlot{} // slots for struct in name
+ t := name.Type
+ n := t.NumFields()
+
+ for i := 0; i < n; i++ {
+ fs := f.fe.SplitStruct(name, i)
+ fnames = append(fnames, fs)
+ // arrays and structs will be decomposed further, so
+ // there's no need to record a name
+ if !fs.Type.IsArray() && !fs.Type.IsStruct() {
+ slots = append(slots, fs)
+ }
+ }
+
+ makeOp := StructMakeOp(n)
+ var keep []*Value
+ // create named values for each struct field
+ for _, v := range f.NamedValues[name] {
+ if v.Op != makeOp {
+ keep = append(keep, v)
+ continue
+ }
+ for i := 0; i < len(fnames); i++ {
+ f.NamedValues[fnames[i]] = append(f.NamedValues[fnames[i]], v.Args[i])
+ }
+ }
+ if len(keep) == 0 {
+ // delete the name for the struct as a whole
+ delete(f.NamedValues, name)
+ } else {
+ f.NamedValues[name] = keep
+ }
+
+ // now that this f.NamedValues contains values for the struct
+ // fields, recurse into nested structs
+ for i := 0; i < n; i++ {
+ if name.Type.FieldType(i).IsStruct() {
+ slots = decomposeUserStructInto(f, fnames[i], slots)
+ delete(f.NamedValues, fnames[i])
+ } else if name.Type.FieldType(i).IsArray() {
+ slots = decomposeUserArrayInto(f, fnames[i], slots)
+ delete(f.NamedValues, fnames[i])
+ }
+ }
+ return slots
+}
+func decomposeUserPhi(v *Value) {
+ switch {
+ case v.Type.IsStruct():
+ decomposeStructPhi(v)
+ case v.Type.IsArray():
+ decomposeArrayPhi(v)
+ }
+}
+
+// decomposeStructPhi replaces phi-of-struct with structmake(phi-for-each-field),
+// and then recursively decomposes the phis for each field.
+func decomposeStructPhi(v *Value) {
+ t := v.Type
+ n := t.NumFields()
+ var fields [MaxStruct]*Value
+ for i := 0; i < n; i++ {
+ fields[i] = v.Block.NewValue0(v.Pos, OpPhi, t.FieldType(i))
+ }
+ for _, a := range v.Args {
+ for i := 0; i < n; i++ {
+ fields[i].AddArg(a.Block.NewValue1I(v.Pos, OpStructSelect, t.FieldType(i), int64(i), a))
+ }
+ }
+ v.reset(StructMakeOp(n))
+ v.AddArgs(fields[:n]...)
+
+ // Recursively decompose phis for each field.
+ for _, f := range fields[:n] {
+ decomposeUserPhi(f)
+ }
+}
+
+// decomposeArrayPhi replaces phi-of-array with arraymake(phi-of-array-element),
+// and then recursively decomposes the element phi.
+func decomposeArrayPhi(v *Value) {
+ t := v.Type
+ if t.NumElem() == 0 {
+ v.reset(OpArrayMake0)
+ return
+ }
+ if t.NumElem() != 1 {
+ v.Fatalf("SSAable array must have no more than 1 element")
+ }
+ elem := v.Block.NewValue0(v.Pos, OpPhi, t.Elem())
+ for _, a := range v.Args {
+ elem.AddArg(a.Block.NewValue1I(v.Pos, OpArraySelect, t.Elem(), 0, a))
+ }
+ v.reset(OpArrayMake1)
+ v.AddArg(elem)
+
+ // Recursively decompose elem phi.
+ decomposeUserPhi(elem)
+}
+
+// MaxStruct is the maximum number of fields a struct
+// can have and still be SSAable.
+const MaxStruct = 4
+
+// StructMakeOp returns the opcode to construct a struct with the
+// given number of fields.
+func StructMakeOp(nf int) Op {
+ switch nf {
+ case 0:
+ return OpStructMake0
+ case 1:
+ return OpStructMake1
+ case 2:
+ return OpStructMake2
+ case 3:
+ return OpStructMake3
+ case 4:
+ return OpStructMake4
+ }
+ panic("too many fields in an SSAable struct")
+}
+
+type namedVal struct {
+ locIndex, valIndex int // f.NamedValues[f.Names[locIndex]][valIndex] = key
+}
+
+// deleteNamedVals removes particular values with debugger names from f's naming data structures
+func deleteNamedVals(f *Func, toDelete []namedVal) {
+ // Arrange to delete from larger indices to smaller, to ensure swap-with-end deletion does not invalid pending indices.
+ sort.Slice(toDelete, func(i, j int) bool {
+ if toDelete[i].locIndex != toDelete[j].locIndex {
+ return toDelete[i].locIndex > toDelete[j].locIndex
+ }
+ return toDelete[i].valIndex > toDelete[j].valIndex
+
+ })
+
+ // Get rid of obsolete names
+ for _, d := range toDelete {
+ loc := f.Names[d.locIndex]
+ vals := f.NamedValues[loc]
+ l := len(vals) - 1
+ if l > 0 {
+ vals[d.valIndex] = vals[l]
+ f.NamedValues[loc] = vals[:l]
+ } else {
+ delete(f.NamedValues, loc)
+ l = len(f.Names) - 1
+ f.Names[d.locIndex] = f.Names[l]
+ f.Names = f.Names[:l]
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/dom.go b/src/cmd/compile/internal/ssa/dom.go
new file mode 100644
index 0000000..f31e7df
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/dom.go
@@ -0,0 +1,302 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file contains code to compute the dominator tree
+// of a control-flow graph.
+
+// postorder computes a postorder traversal ordering for the
+// basic blocks in f. Unreachable blocks will not appear.
+func postorder(f *Func) []*Block {
+ return postorderWithNumbering(f, nil)
+}
+
+type blockAndIndex struct {
+ b *Block
+ index int // index is the number of successor edges of b that have already been explored.
+}
+
+// postorderWithNumbering provides a DFS postordering.
+// This seems to make loop-finding more robust.
+func postorderWithNumbering(f *Func, ponums []int32) []*Block {
+ seen := make([]bool, f.NumBlocks())
+
+ // result ordering
+ order := make([]*Block, 0, len(f.Blocks))
+
+ // stack of blocks and next child to visit
+ // A constant bound allows this to be stack-allocated. 32 is
+ // enough to cover almost every postorderWithNumbering call.
+ s := make([]blockAndIndex, 0, 32)
+ s = append(s, blockAndIndex{b: f.Entry})
+ seen[f.Entry.ID] = true
+ for len(s) > 0 {
+ tos := len(s) - 1
+ x := s[tos]
+ b := x.b
+ if i := x.index; i < len(b.Succs) {
+ s[tos].index++
+ bb := b.Succs[i].Block()
+ if !seen[bb.ID] {
+ seen[bb.ID] = true
+ s = append(s, blockAndIndex{b: bb})
+ }
+ continue
+ }
+ s = s[:tos]
+ if ponums != nil {
+ ponums[b.ID] = int32(len(order))
+ }
+ order = append(order, b)
+ }
+ return order
+}
+
+type linkedBlocks func(*Block) []Edge
+
+const nscratchslices = 7
+
+// experimentally, functions with 512 or fewer blocks account
+// for 75% of memory (size) allocation for dominator computation
+// in make.bash.
+const minscratchblocks = 512
+
+func (cache *Cache) scratchBlocksForDom(maxBlockID int) (a, b, c, d, e, f, g []ID) {
+ tot := maxBlockID * nscratchslices
+ scratch := cache.domblockstore
+ if len(scratch) < tot {
+ // req = min(1.5*tot, nscratchslices*minscratchblocks)
+ // 50% padding allows for graph growth in later phases.
+ req := (tot * 3) >> 1
+ if req < nscratchslices*minscratchblocks {
+ req = nscratchslices * minscratchblocks
+ }
+ scratch = make([]ID, req)
+ cache.domblockstore = scratch
+ } else {
+ // Clear as much of scratch as we will (re)use
+ scratch = scratch[0:tot]
+ for i := range scratch {
+ scratch[i] = 0
+ }
+ }
+
+ a = scratch[0*maxBlockID : 1*maxBlockID]
+ b = scratch[1*maxBlockID : 2*maxBlockID]
+ c = scratch[2*maxBlockID : 3*maxBlockID]
+ d = scratch[3*maxBlockID : 4*maxBlockID]
+ e = scratch[4*maxBlockID : 5*maxBlockID]
+ f = scratch[5*maxBlockID : 6*maxBlockID]
+ g = scratch[6*maxBlockID : 7*maxBlockID]
+
+ return
+}
+
+func dominators(f *Func) []*Block {
+ preds := func(b *Block) []Edge { return b.Preds }
+ succs := func(b *Block) []Edge { return b.Succs }
+
+ //TODO: benchmark and try to find criteria for swapping between
+ // dominatorsSimple and dominatorsLT
+ return f.dominatorsLTOrig(f.Entry, preds, succs)
+}
+
+// dominatorsLTOrig runs Lengauer-Tarjan to compute a dominator tree starting at
+// entry and using predFn/succFn to find predecessors/successors to allow
+// computing both dominator and post-dominator trees.
+func (f *Func) dominatorsLTOrig(entry *Block, predFn linkedBlocks, succFn linkedBlocks) []*Block {
+ // Adapted directly from the original TOPLAS article's "simple" algorithm
+
+ maxBlockID := entry.Func.NumBlocks()
+ semi, vertex, label, parent, ancestor, bucketHead, bucketLink := f.Cache.scratchBlocksForDom(maxBlockID)
+
+ // This version uses integers for most of the computation,
+ // to make the work arrays smaller and pointer-free.
+ // fromID translates from ID to *Block where that is needed.
+ fromID := make([]*Block, maxBlockID)
+ for _, v := range f.Blocks {
+ fromID[v.ID] = v
+ }
+ idom := make([]*Block, maxBlockID)
+
+ // Step 1. Carry out a depth first search of the problem graph. Number
+ // the vertices from 1 to n as they are reached during the search.
+ n := f.dfsOrig(entry, succFn, semi, vertex, label, parent)
+
+ for i := n; i >= 2; i-- {
+ w := vertex[i]
+
+ // step2 in TOPLAS paper
+ for _, e := range predFn(fromID[w]) {
+ v := e.b
+ if semi[v.ID] == 0 {
+ // skip unreachable predecessor
+ // not in original, but we're using existing pred instead of building one.
+ continue
+ }
+ u := evalOrig(v.ID, ancestor, semi, label)
+ if semi[u] < semi[w] {
+ semi[w] = semi[u]
+ }
+ }
+
+ // add w to bucket[vertex[semi[w]]]
+ // implement bucket as a linked list implemented
+ // in a pair of arrays.
+ vsw := vertex[semi[w]]
+ bucketLink[w] = bucketHead[vsw]
+ bucketHead[vsw] = w
+
+ linkOrig(parent[w], w, ancestor)
+
+ // step3 in TOPLAS paper
+ for v := bucketHead[parent[w]]; v != 0; v = bucketLink[v] {
+ u := evalOrig(v, ancestor, semi, label)
+ if semi[u] < semi[v] {
+ idom[v] = fromID[u]
+ } else {
+ idom[v] = fromID[parent[w]]
+ }
+ }
+ }
+ // step 4 in toplas paper
+ for i := ID(2); i <= n; i++ {
+ w := vertex[i]
+ if idom[w].ID != vertex[semi[w]] {
+ idom[w] = idom[idom[w].ID]
+ }
+ }
+
+ return idom
+}
+
+// dfs performs a depth first search over the blocks starting at entry block
+// (in arbitrary order). This is a de-recursed version of dfs from the
+// original Tarjan-Lengauer TOPLAS article. It's important to return the
+// same values for parent as the original algorithm.
+func (f *Func) dfsOrig(entry *Block, succFn linkedBlocks, semi, vertex, label, parent []ID) ID {
+ n := ID(0)
+ s := make([]*Block, 0, 256)
+ s = append(s, entry)
+
+ for len(s) > 0 {
+ v := s[len(s)-1]
+ s = s[:len(s)-1]
+ // recursing on v
+
+ if semi[v.ID] != 0 {
+ continue // already visited
+ }
+ n++
+ semi[v.ID] = n
+ vertex[n] = v.ID
+ label[v.ID] = v.ID
+ // ancestor[v] already zero
+ for _, e := range succFn(v) {
+ w := e.b
+ // if it has a dfnum, we've already visited it
+ if semi[w.ID] == 0 {
+ // yes, w can be pushed multiple times.
+ s = append(s, w)
+ parent[w.ID] = v.ID // keep overwriting this till it is visited.
+ }
+ }
+ }
+ return n
+}
+
+// compressOrig is the "simple" compress function from LT paper
+func compressOrig(v ID, ancestor, semi, label []ID) {
+ if ancestor[ancestor[v]] != 0 {
+ compressOrig(ancestor[v], ancestor, semi, label)
+ if semi[label[ancestor[v]]] < semi[label[v]] {
+ label[v] = label[ancestor[v]]
+ }
+ ancestor[v] = ancestor[ancestor[v]]
+ }
+}
+
+// evalOrig is the "simple" eval function from LT paper
+func evalOrig(v ID, ancestor, semi, label []ID) ID {
+ if ancestor[v] == 0 {
+ return v
+ }
+ compressOrig(v, ancestor, semi, label)
+ return label[v]
+}
+
+func linkOrig(v, w ID, ancestor []ID) {
+ ancestor[w] = v
+}
+
+// dominators computes the dominator tree for f. It returns a slice
+// which maps block ID to the immediate dominator of that block.
+// Unreachable blocks map to nil. The entry block maps to nil.
+func dominatorsSimple(f *Func) []*Block {
+ // A simple algorithm for now
+ // Cooper, Harvey, Kennedy
+ idom := make([]*Block, f.NumBlocks())
+
+ // Compute postorder walk
+ post := f.postorder()
+
+ // Make map from block id to order index (for intersect call)
+ postnum := make([]int, f.NumBlocks())
+ for i, b := range post {
+ postnum[b.ID] = i
+ }
+
+ // Make the entry block a self-loop
+ idom[f.Entry.ID] = f.Entry
+ if postnum[f.Entry.ID] != len(post)-1 {
+ f.Fatalf("entry block %v not last in postorder", f.Entry)
+ }
+
+ // Compute relaxation of idom entries
+ for {
+ changed := false
+
+ for i := len(post) - 2; i >= 0; i-- {
+ b := post[i]
+ var d *Block
+ for _, e := range b.Preds {
+ p := e.b
+ if idom[p.ID] == nil {
+ continue
+ }
+ if d == nil {
+ d = p
+ continue
+ }
+ d = intersect(d, p, postnum, idom)
+ }
+ if d != idom[b.ID] {
+ idom[b.ID] = d
+ changed = true
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+ // Set idom of entry block to nil instead of itself.
+ idom[f.Entry.ID] = nil
+ return idom
+}
+
+// intersect finds the closest dominator of both b and c.
+// It requires a postorder numbering of all the blocks.
+func intersect(b, c *Block, postnum []int, idom []*Block) *Block {
+ // TODO: This loop is O(n^2). It used to be used in nilcheck,
+ // see BenchmarkNilCheckDeep*.
+ for b != c {
+ if postnum[b.ID] < postnum[c.ID] {
+ b = idom[b.ID]
+ } else {
+ c = idom[c.ID]
+ }
+ }
+ return b
+}
diff --git a/src/cmd/compile/internal/ssa/dom_test.go b/src/cmd/compile/internal/ssa/dom_test.go
new file mode 100644
index 0000000..fa51718
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/dom_test.go
@@ -0,0 +1,608 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func BenchmarkDominatorsLinear(b *testing.B) { benchmarkDominators(b, 10000, genLinear) }
+func BenchmarkDominatorsFwdBack(b *testing.B) { benchmarkDominators(b, 10000, genFwdBack) }
+func BenchmarkDominatorsManyPred(b *testing.B) { benchmarkDominators(b, 10000, genManyPred) }
+func BenchmarkDominatorsMaxPred(b *testing.B) { benchmarkDominators(b, 10000, genMaxPred) }
+func BenchmarkDominatorsMaxPredVal(b *testing.B) { benchmarkDominators(b, 10000, genMaxPredValue) }
+
+type blockGen func(size int) []bloc
+
+// genLinear creates an array of blocks that succeed one another
+// b_n -> [b_n+1].
+func genLinear(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto(blockn(0)),
+ ),
+ )
+ for i := 0; i < size; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ Goto(blockn(i+1))))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genLinear creates an array of blocks that alternate between
+// b_n -> [b_n+1], b_n -> [b_n+1, b_n-1] , b_n -> [b_n+1, b_n+2]
+func genFwdBack(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(0)),
+ ),
+ )
+ for i := 0; i < size; i++ {
+ switch i % 2 {
+ case 0:
+ blocs = append(blocs, Bloc(blockn(i),
+ If("p", blockn(i+1), blockn(i+2))))
+ case 1:
+ blocs = append(blocs, Bloc(blockn(i),
+ If("p", blockn(i+1), blockn(i-1))))
+ }
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genManyPred creates an array of blocks where 1/3rd have a successor of the
+// first block, 1/3rd the last block, and the remaining third are plain.
+func genManyPred(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(0)),
+ ),
+ )
+
+ // We want predecessor lists to be long, so 2/3rds of the blocks have a
+ // successor of the first or last block.
+ for i := 0; i < size; i++ {
+ switch i % 3 {
+ case 0:
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(i+1))))
+ case 1:
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", blockn(i+1), blockn(0))))
+ case 2:
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", blockn(i+1), blockn(size))))
+ }
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genMaxPred maximizes the size of the 'exit' predecessor list.
+func genMaxPred(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(0)),
+ ),
+ )
+
+ for i := 0; i < size; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ If("p", blockn(i+1), "exit")))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genMaxPredValue is identical to genMaxPred but contains an
+// additional value.
+func genMaxPredValue(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(0)),
+ ),
+ )
+
+ for i := 0; i < size; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", blockn(i+1), "exit")))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// sink for benchmark
+var domBenchRes []*Block
+
+func benchmarkDominators(b *testing.B, size int, bg blockGen) {
+ c := testConfig(b)
+ fun := c.Fun("entry", bg(size)...)
+
+ CheckFunc(fun.f)
+ b.SetBytes(int64(size))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ domBenchRes = dominators(fun.f)
+ }
+}
+
+type domFunc func(f *Func) []*Block
+
+// verifyDominators verifies that the dominators of fut (function under test)
+// as determined by domFn, match the map node->dominator
+func verifyDominators(t *testing.T, fut fun, domFn domFunc, doms map[string]string) {
+ blockNames := map[*Block]string{}
+ for n, b := range fut.blocks {
+ blockNames[b] = n
+ }
+
+ calcDom := domFn(fut.f)
+
+ for n, d := range doms {
+ nblk, ok := fut.blocks[n]
+ if !ok {
+ t.Errorf("invalid block name %s", n)
+ }
+ dblk, ok := fut.blocks[d]
+ if !ok {
+ t.Errorf("invalid block name %s", d)
+ }
+
+ domNode := calcDom[nblk.ID]
+ switch {
+ case calcDom[nblk.ID] == dblk:
+ calcDom[nblk.ID] = nil
+ continue
+ case calcDom[nblk.ID] != dblk:
+ t.Errorf("expected %s as dominator of %s, found %s", d, n, blockNames[domNode])
+ default:
+ t.Fatal("unexpected dominator condition")
+ }
+ }
+
+ for id, d := range calcDom {
+ // If nil, we've already verified it
+ if d == nil {
+ continue
+ }
+ for _, b := range fut.blocks {
+ if int(b.ID) == id {
+ t.Errorf("unexpected dominator of %s for %s", blockNames[d], blockNames[b])
+ }
+ }
+ }
+
+}
+
+func TestDominatorsSingleBlock(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Exit("mem")))
+
+ doms := map[string]string{}
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+
+}
+
+func TestDominatorsSimple(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("a")),
+ Bloc("a",
+ Goto("b")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "a": "entry",
+ "b": "a",
+ "c": "b",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+
+}
+
+func TestDominatorsMultPredFwd(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", "a", "c")),
+ Bloc("a",
+ If("p", "b", "c")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "a": "entry",
+ "b": "a",
+ "c": "entry",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestDominatorsDeadCode(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 0, nil),
+ If("p", "b3", "b5")),
+ Bloc("b2", Exit("mem")),
+ Bloc("b3", Goto("b2")),
+ Bloc("b4", Goto("b2")),
+ Bloc("b5", Goto("b2")))
+
+ doms := map[string]string{
+ "b2": "entry",
+ "b3": "entry",
+ "b5": "entry",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestDominatorsMultPredRev(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Goto("first")),
+ Bloc("first",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto("a")),
+ Bloc("a",
+ If("p", "b", "first")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ If("p", "exit", "b")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "first": "entry",
+ "a": "first",
+ "b": "a",
+ "c": "b",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestDominatorsMultPred(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", "a", "c")),
+ Bloc("a",
+ If("p", "b", "c")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ If("p", "b", "exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "a": "entry",
+ "b": "entry",
+ "c": "entry",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestInfiniteLoop(t *testing.T) {
+ c := testConfig(t)
+ // note lack of an exit block
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto("a")),
+ Bloc("a",
+ Goto("b")),
+ Bloc("b",
+ Goto("a")))
+
+ CheckFunc(fun.f)
+ doms := map[string]string{"a": "entry",
+ "b": "a"}
+ verifyDominators(t, fun, dominators, doms)
+}
+
+func TestDomTricky(t *testing.T) {
+ doms := map[string]string{
+ "4": "1",
+ "2": "4",
+ "5": "4",
+ "11": "4",
+ "15": "4", // the incorrect answer is "5"
+ "10": "15",
+ "19": "15",
+ }
+
+ if4 := [2]string{"2", "5"}
+ if5 := [2]string{"15", "11"}
+ if15 := [2]string{"19", "10"}
+
+ for i := 0; i < 8; i++ {
+ a := 1 & i
+ b := 1 & i >> 1
+ c := 1 & i >> 2
+
+ cfg := testConfig(t)
+ fun := cfg.Fun("1",
+ Bloc("1",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto("4")),
+ Bloc("2",
+ Goto("11")),
+ Bloc("4",
+ If("p", if4[a], if4[1-a])), // 2, 5
+ Bloc("5",
+ If("p", if5[b], if5[1-b])), //15, 11
+ Bloc("10",
+ Exit("mem")),
+ Bloc("11",
+ Goto("15")),
+ Bloc("15",
+ If("p", if15[c], if15[1-c])), //19, 10
+ Bloc("19",
+ Goto("10")))
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+ }
+}
+
+// generateDominatorMap uses dominatorsSimple to obtain a
+// reference dominator tree for testing faster algorithms.
+func generateDominatorMap(fut fun) map[string]string {
+ blockNames := map[*Block]string{}
+ for n, b := range fut.blocks {
+ blockNames[b] = n
+ }
+ referenceDom := dominatorsSimple(fut.f)
+ doms := make(map[string]string)
+ for _, b := range fut.f.Blocks {
+ if d := referenceDom[b.ID]; d != nil {
+ doms[blockNames[b]] = blockNames[d]
+ }
+ }
+ return doms
+}
+
+func TestDominatorsPostTrickyA(t *testing.T) {
+ testDominatorsPostTricky(t, "b8", "b11", "b10", "b8", "b14", "b15")
+}
+
+func TestDominatorsPostTrickyB(t *testing.T) {
+ testDominatorsPostTricky(t, "b11", "b8", "b10", "b8", "b14", "b15")
+}
+
+func TestDominatorsPostTrickyC(t *testing.T) {
+ testDominatorsPostTricky(t, "b8", "b11", "b8", "b10", "b14", "b15")
+}
+
+func TestDominatorsPostTrickyD(t *testing.T) {
+ testDominatorsPostTricky(t, "b11", "b8", "b8", "b10", "b14", "b15")
+}
+
+func TestDominatorsPostTrickyE(t *testing.T) {
+ testDominatorsPostTricky(t, "b8", "b11", "b10", "b8", "b15", "b14")
+}
+
+func TestDominatorsPostTrickyF(t *testing.T) {
+ testDominatorsPostTricky(t, "b11", "b8", "b10", "b8", "b15", "b14")
+}
+
+func TestDominatorsPostTrickyG(t *testing.T) {
+ testDominatorsPostTricky(t, "b8", "b11", "b8", "b10", "b15", "b14")
+}
+
+func TestDominatorsPostTrickyH(t *testing.T) {
+ testDominatorsPostTricky(t, "b11", "b8", "b8", "b10", "b15", "b14")
+}
+
+func testDominatorsPostTricky(t *testing.T, b7then, b7else, b12then, b12else, b13then, b13else string) {
+ c := testConfig(t)
+ fun := c.Fun("b1",
+ Bloc("b1",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", "b3", "b2")),
+ Bloc("b3",
+ If("p", "b5", "b6")),
+ Bloc("b5",
+ Goto("b7")),
+ Bloc("b7",
+ If("p", b7then, b7else)),
+ Bloc("b8",
+ Goto("b13")),
+ Bloc("b13",
+ If("p", b13then, b13else)),
+ Bloc("b14",
+ Goto("b10")),
+ Bloc("b15",
+ Goto("b16")),
+ Bloc("b16",
+ Goto("b9")),
+ Bloc("b9",
+ Goto("b7")),
+ Bloc("b11",
+ Goto("b12")),
+ Bloc("b12",
+ If("p", b12then, b12else)),
+ Bloc("b10",
+ Goto("b6")),
+ Bloc("b6",
+ Goto("b17")),
+ Bloc("b17",
+ Goto("b18")),
+ Bloc("b18",
+ If("p", "b22", "b19")),
+ Bloc("b22",
+ Goto("b23")),
+ Bloc("b23",
+ If("p", "b21", "b19")),
+ Bloc("b19",
+ If("p", "b24", "b25")),
+ Bloc("b24",
+ Goto("b26")),
+ Bloc("b26",
+ Goto("b25")),
+ Bloc("b25",
+ If("p", "b27", "b29")),
+ Bloc("b27",
+ Goto("b30")),
+ Bloc("b30",
+ Goto("b28")),
+ Bloc("b29",
+ Goto("b31")),
+ Bloc("b31",
+ Goto("b28")),
+ Bloc("b28",
+ If("p", "b32", "b33")),
+ Bloc("b32",
+ Goto("b21")),
+ Bloc("b21",
+ Goto("b47")),
+ Bloc("b47",
+ If("p", "b45", "b46")),
+ Bloc("b45",
+ Goto("b48")),
+ Bloc("b48",
+ Goto("b49")),
+ Bloc("b49",
+ If("p", "b50", "b51")),
+ Bloc("b50",
+ Goto("b52")),
+ Bloc("b52",
+ Goto("b53")),
+ Bloc("b53",
+ Goto("b51")),
+ Bloc("b51",
+ Goto("b54")),
+ Bloc("b54",
+ Goto("b46")),
+ Bloc("b46",
+ Exit("mem")),
+ Bloc("b33",
+ Goto("b34")),
+ Bloc("b34",
+ Goto("b37")),
+ Bloc("b37",
+ If("p", "b35", "b36")),
+ Bloc("b35",
+ Goto("b38")),
+ Bloc("b38",
+ Goto("b39")),
+ Bloc("b39",
+ If("p", "b40", "b41")),
+ Bloc("b40",
+ Goto("b42")),
+ Bloc("b42",
+ Goto("b43")),
+ Bloc("b43",
+ Goto("b41")),
+ Bloc("b41",
+ Goto("b44")),
+ Bloc("b44",
+ Goto("b36")),
+ Bloc("b36",
+ Goto("b20")),
+ Bloc("b20",
+ Goto("b18")),
+ Bloc("b2",
+ Goto("b4")),
+ Bloc("b4",
+ Exit("mem")))
+ CheckFunc(fun.f)
+ doms := generateDominatorMap(fun)
+ verifyDominators(t, fun, dominators, doms)
+}
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
new file mode 100644
index 0000000..679ee8a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -0,0 +1,975 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "sort"
+)
+
+type selKey struct {
+ from *Value
+ offset int64
+ size int64
+ typ *types.Type
+}
+
+type offsetKey struct {
+ from *Value
+ offset int64
+ pt *types.Type
+}
+
+// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form
+// that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into
+// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are
+// reached. On the callee side, OpArg nodes are not decomposed until this phase is run.
+// TODO results should not be lowered until this phase.
+func expandCalls(f *Func) {
+ // Calls that need lowering have some number of inputs, including a memory input,
+ // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
+
+ // With the current ABI those inputs need to be converted into stores to memory,
+ // rethreading the call's memory input to the first, and the new call now receiving the last.
+
+ // With the current ABI, the outputs need to be converted to loads, which will all use the call's
+ // memory output as their input.
+ if !LateCallExpansionEnabledWithin(f) {
+ return
+ }
+ debug := f.pass.debug > 0
+
+ if debug {
+ fmt.Printf("\nexpandsCalls(%s)\n", f.Name)
+ }
+
+ canSSAType := f.fe.CanSSA
+ regSize := f.Config.RegSize
+ sp, _ := f.spSb()
+ typ := &f.Config.Types
+ ptrSize := f.Config.PtrSize
+
+ // For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
+ var hiOffset, lowOffset int64
+ if f.Config.BigEndian {
+ lowOffset = 4
+ } else {
+ hiOffset = 4
+ }
+
+ namedSelects := make(map[*Value][]namedVal)
+
+ sdom := f.Sdom()
+
+ common := make(map[selKey]*Value)
+
+ // intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
+ // that has no 64-bit integer registers.
+ intPairTypes := func(et types.EType) (tHi, tLo *types.Type) {
+ tHi = typ.UInt32
+ if et == types.TINT64 {
+ tHi = typ.Int32
+ }
+ tLo = typ.UInt32
+ return
+ }
+
+ // isAlreadyExpandedAggregateType returns whether a type is an SSA-able "aggregate" (multiple register) type
+ // that was expanded in an earlier phase (currently, expand_calls is intended to run after decomposeBuiltin,
+ // so this is all aggregate types -- small struct and array, complex, interface, string, slice, and 64-bit
+ // integer on 32-bit).
+ isAlreadyExpandedAggregateType := func(t *types.Type) bool {
+ if !canSSAType(t) {
+ return false
+ }
+ return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() ||
+ t.Size() > regSize && t.IsInteger()
+ }
+
+ offsets := make(map[offsetKey]*Value)
+
+ // offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
+ // TODO should also optimize offsets from SB?
+ offsetFrom := func(from *Value, offset int64, pt *types.Type) *Value {
+ if offset == 0 && from.Type == pt { // this is not actually likely
+ return from
+ }
+ // Simplify, canonicalize
+ for from.Op == OpOffPtr {
+ offset += from.AuxInt
+ from = from.Args[0]
+ }
+ if from == sp {
+ return f.ConstOffPtrSP(pt, offset, sp)
+ }
+ key := offsetKey{from, offset, pt}
+ v := offsets[key]
+ if v != nil {
+ return v
+ }
+ v = from.Block.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from)
+ offsets[key] = v
+ return v
+ }
+
+ // splitSlots splits one "field" (specified by sfx, offset, and ty) out of the LocalSlots in ls and returns the new LocalSlots this generates.
+ splitSlots := func(ls []LocalSlot, sfx string, offset int64, ty *types.Type) []LocalSlot {
+ var locs []LocalSlot
+ for i := range ls {
+ locs = append(locs, f.fe.SplitSlot(&ls[i], sfx, offset, ty))
+ }
+ return locs
+ }
+
+ // removeTrivialWrapperTypes unwraps layers of
+ // struct { singleField SomeType } and [1]SomeType
+ // until a non-wrapper type is reached. This is useful
+ // for working with assignments to/from interface data
+ // fields (either second operand to OpIMake or OpIData)
+ // where the wrapping or type conversion can be elided
+ // because of type conversions/assertions in source code
+ // that do not appear in SSA.
+ removeTrivialWrapperTypes := func(t *types.Type) *types.Type {
+ for {
+ if t.IsStruct() && t.NumFields() == 1 {
+ t = t.Field(0).Type
+ continue
+ }
+ if t.IsArray() && t.NumElem() == 1 {
+ t = t.Elem()
+ continue
+ }
+ break
+ }
+ return t
+ }
+
+ // Calls that need lowering have some number of inputs, including a memory input,
+ // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
+
+ // With the current ABI those inputs need to be converted into stores to memory,
+ // rethreading the call's memory input to the first, and the new call now receiving the last.
+
+ // With the current ABI, the outputs need to be converted to loads, which will all use the call's
+ // memory output as their input.
+
+ // rewriteSelect recursively walks from leaf selector to a root (OpSelectN, OpLoad, OpArg)
+ // through a chain of Struct/Array/builtin Select operations. If the chain of selectors does not
+ // end in an expected root, it does nothing (this can happen depending on compiler phase ordering).
+ // The "leaf" provides the type, the root supplies the container, and the leaf-to-root path
+ // accumulates the offset.
+ // It emits the code necessary to implement the leaf select operation that leads to the root.
+ //
+ // TODO when registers really arrive, must also decompose anything split across two registers or registers and memory.
+ var rewriteSelect func(leaf *Value, selector *Value, offset int64) []LocalSlot
+ rewriteSelect = func(leaf *Value, selector *Value, offset int64) []LocalSlot {
+ if debug {
+ fmt.Printf("rewriteSelect(%s, %s, %d)\n", leaf.LongString(), selector.LongString(), offset)
+ }
+ var locs []LocalSlot
+ leafType := leaf.Type
+ if len(selector.Args) > 0 {
+ w := selector.Args[0]
+ if w.Op == OpCopy {
+ for w.Op == OpCopy {
+ w = w.Args[0]
+ }
+ selector.SetArg(0, w)
+ }
+ }
+ switch selector.Op {
+ case OpArg:
+ if !isAlreadyExpandedAggregateType(selector.Type) {
+ if leafType == selector.Type { // OpIData leads us here, sometimes.
+ leaf.copyOf(selector)
+ } else {
+ f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString())
+ }
+ if debug {
+ fmt.Printf("\tOpArg, break\n")
+ }
+ break
+ }
+ switch leaf.Op {
+ case OpIData, OpStructSelect, OpArraySelect:
+ leafType = removeTrivialWrapperTypes(leaf.Type)
+ }
+ aux := selector.Aux
+ auxInt := selector.AuxInt + offset
+ if leaf.Block == selector.Block {
+ leaf.reset(OpArg)
+ leaf.Aux = aux
+ leaf.AuxInt = auxInt
+ leaf.Type = leafType
+ } else {
+ w := selector.Block.NewValue0IA(leaf.Pos, OpArg, leafType, auxInt, aux)
+ leaf.copyOf(w)
+ if debug {
+ fmt.Printf("\tnew %s\n", w.LongString())
+ }
+ }
+ for _, s := range namedSelects[selector] {
+ locs = append(locs, f.Names[s.locIndex])
+ }
+
+ case OpLoad: // We end up here because of IData of immediate structures.
+ // Failure case:
+ // (note the failure case is very rare; w/o this case, make.bash and run.bash both pass, as well as
+ // the hard cases of building {syscall,math,math/cmplx,math/bits,go/constant} on ppc64le and mips-softfloat).
+ //
+ // GOSSAFUNC='(*dumper).dump' go build -gcflags=-l -tags=math_big_pure_go cmd/compile/internal/gc
+ // cmd/compile/internal/gc/dump.go:136:14: internal compiler error: '(*dumper).dump': not lowered: v827, StructSelect PTR PTR
+ // b2: ← b1
+ // v20 (+142) = StaticLECall <interface {},mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v8 v1
+ // v21 (142) = SelectN <mem> [1] v20
+ // v22 (142) = SelectN <interface {}> [0] v20
+ // b15: ← b8
+ // v71 (+143) = IData <Nodes> v22 (v[Nodes])
+ // v73 (+146) = StaticLECall <[]*Node,mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v71 v21
+ //
+ // translates (w/o the "case OpLoad:" above) to:
+ //
+ // b2: ← b1
+ // v20 (+142) = StaticCall <mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v715
+ // v23 (142) = Load <*uintptr> v19 v20
+ // v823 (142) = IsNonNil <bool> v23
+ // v67 (+143) = Load <*[]*Node> v880 v20
+ // b15: ← b8
+ // v827 (146) = StructSelect <*[]*Node> [0] v67
+ // v846 (146) = Store <mem> {*[]*Node} v769 v827 v20
+ // v73 (+146) = StaticCall <mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v846
+ // i.e., the struct select is generated and remains in because it is not applied to an actual structure.
+ // The OpLoad was created to load the single field of the IData
+ // This case removes that StructSelect.
+ if leafType != selector.Type {
+ f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString())
+ }
+ leaf.copyOf(selector)
+ for _, s := range namedSelects[selector] {
+ locs = append(locs, f.Names[s.locIndex])
+ }
+
+ case OpSelectN:
+ // TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there.
+ call := selector.Args[0]
+ aux := call.Aux.(*AuxCall)
+ which := selector.AuxInt
+ if which == aux.NResults() { // mem is after the results.
+ // rewrite v as a Copy of call -- the replacement call will produce a mem.
+ leaf.copyOf(call)
+ } else {
+ leafType := removeTrivialWrapperTypes(leaf.Type)
+ if canSSAType(leafType) {
+ pt := types.NewPtr(leafType)
+ off := offsetFrom(sp, offset+aux.OffsetOfResult(which), pt)
+ // Any selection right out of the arg area/registers has to be same Block as call, use call as mem input.
+ if leaf.Block == call.Block {
+ leaf.reset(OpLoad)
+ leaf.SetArgs2(off, call)
+ leaf.Type = leafType
+ } else {
+ w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call)
+ leaf.copyOf(w)
+ if debug {
+ fmt.Printf("\tnew %s\n", w.LongString())
+ }
+ }
+ for _, s := range namedSelects[selector] {
+ locs = append(locs, f.Names[s.locIndex])
+ }
+ } else {
+ f.Fatalf("Should not have non-SSA-able OpSelectN, selector=%s", selector.LongString())
+ }
+ }
+
+ case OpStructSelect:
+ w := selector.Args[0]
+ var ls []LocalSlot
+ if w.Type.Etype != types.TSTRUCT { // IData artifact
+ ls = rewriteSelect(leaf, w, offset)
+ } else {
+ ls = rewriteSelect(leaf, w, offset+w.Type.FieldOff(int(selector.AuxInt)))
+ if w.Op != OpIData {
+ for _, l := range ls {
+ locs = append(locs, f.fe.SplitStruct(l, int(selector.AuxInt)))
+ }
+ }
+ }
+
+ case OpArraySelect:
+ w := selector.Args[0]
+ rewriteSelect(leaf, w, offset+selector.Type.Size()*selector.AuxInt)
+
+ case OpInt64Hi:
+ w := selector.Args[0]
+ ls := rewriteSelect(leaf, w, offset+hiOffset)
+ locs = splitSlots(ls, ".hi", hiOffset, leafType)
+
+ case OpInt64Lo:
+ w := selector.Args[0]
+ ls := rewriteSelect(leaf, w, offset+lowOffset)
+ locs = splitSlots(ls, ".lo", lowOffset, leafType)
+
+ case OpStringPtr:
+ ls := rewriteSelect(leaf, selector.Args[0], offset)
+ locs = splitSlots(ls, ".ptr", 0, typ.BytePtr)
+
+ case OpSlicePtr:
+ w := selector.Args[0]
+ ls := rewriteSelect(leaf, w, offset)
+ locs = splitSlots(ls, ".ptr", 0, types.NewPtr(w.Type.Elem()))
+
+ case OpITab:
+ w := selector.Args[0]
+ ls := rewriteSelect(leaf, w, offset)
+ sfx := ".itab"
+ if w.Type.IsEmptyInterface() {
+ sfx = ".type"
+ }
+ locs = splitSlots(ls, sfx, 0, typ.Uintptr)
+
+ case OpComplexReal:
+ ls := rewriteSelect(leaf, selector.Args[0], offset)
+ locs = splitSlots(ls, ".real", 0, leafType)
+
+ case OpComplexImag:
+ ls := rewriteSelect(leaf, selector.Args[0], offset+leafType.Width) // result is FloatNN, width of result is offset of imaginary part.
+ locs = splitSlots(ls, ".imag", leafType.Width, leafType)
+
+ case OpStringLen, OpSliceLen:
+ ls := rewriteSelect(leaf, selector.Args[0], offset+ptrSize)
+ locs = splitSlots(ls, ".len", ptrSize, leafType)
+
+ case OpIData:
+ ls := rewriteSelect(leaf, selector.Args[0], offset+ptrSize)
+ locs = splitSlots(ls, ".data", ptrSize, leafType)
+
+ case OpSliceCap:
+ ls := rewriteSelect(leaf, selector.Args[0], offset+2*ptrSize)
+ locs = splitSlots(ls, ".cap", 2*ptrSize, leafType)
+
+ case OpCopy: // If it's an intermediate result, recurse
+ locs = rewriteSelect(leaf, selector.Args[0], offset)
+ for _, s := range namedSelects[selector] {
+ // this copy may have had its own name, preserve that, too.
+ locs = append(locs, f.Names[s.locIndex])
+ }
+
+ default:
+ // Ignore dead ends. These can occur if this phase is run before decompose builtin (which is not intended, but allowed).
+ }
+
+ return locs
+ }
+
+ // storeArgOrLoad converts stores of SSA-able aggregate arguments (passed to a call) into a series of primitive-typed
+ // stores of non-aggregate types. It recursively walks up a chain of selectors until it reaches a Load or an Arg.
+ // If it does not reach a Load or an Arg, nothing happens; this allows a little freedom in phase ordering.
+ var storeArgOrLoad func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64) *Value
+
+ // decomposeArgOrLoad is a helper for storeArgOrLoad.
+ // It decomposes a Load or an Arg into smaller parts, parameterized by the decomposeOne and decomposeTwo functions
+ // passed to it, and returns the new mem. If the type does not match one of the expected aggregate types, it returns nil instead.
+ decomposeArgOrLoad := func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64,
+ decomposeOne func(pos src.XPos, b *Block, base, source, mem *Value, t1 *types.Type, offArg, offStore int64) *Value,
+ decomposeTwo func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value) *Value {
+ u := source.Type
+ switch u.Etype {
+ case types.TARRAY:
+ elem := u.Elem()
+ for i := int64(0); i < u.NumElem(); i++ {
+ elemOff := i * elem.Size()
+ mem = decomposeOne(pos, b, base, source, mem, elem, source.AuxInt+elemOff, offset+elemOff)
+ pos = pos.WithNotStmt()
+ }
+ return mem
+ case types.TSTRUCT:
+ for i := 0; i < u.NumFields(); i++ {
+ fld := u.Field(i)
+ mem = decomposeOne(pos, b, base, source, mem, fld.Type, source.AuxInt+fld.Offset, offset+fld.Offset)
+ pos = pos.WithNotStmt()
+ }
+ return mem
+ case types.TINT64, types.TUINT64:
+ if t.Width == regSize {
+ break
+ }
+ tHi, tLo := intPairTypes(t.Etype)
+ mem = decomposeOne(pos, b, base, source, mem, tHi, source.AuxInt+hiOffset, offset+hiOffset)
+ pos = pos.WithNotStmt()
+ return decomposeOne(pos, b, base, source, mem, tLo, source.AuxInt+lowOffset, offset+lowOffset)
+ case types.TINTER:
+ return decomposeTwo(pos, b, base, source, mem, typ.Uintptr, typ.BytePtr, source.AuxInt, offset)
+ case types.TSTRING:
+ return decomposeTwo(pos, b, base, source, mem, typ.BytePtr, typ.Int, source.AuxInt, offset)
+ case types.TCOMPLEX64:
+ return decomposeTwo(pos, b, base, source, mem, typ.Float32, typ.Float32, source.AuxInt, offset)
+ case types.TCOMPLEX128:
+ return decomposeTwo(pos, b, base, source, mem, typ.Float64, typ.Float64, source.AuxInt, offset)
+ case types.TSLICE:
+ mem = decomposeTwo(pos, b, base, source, mem, typ.BytePtr, typ.Int, source.AuxInt, offset)
+ return decomposeOne(pos, b, base, source, mem, typ.Int, source.AuxInt+2*ptrSize, offset+2*ptrSize)
+ }
+ return nil
+ }
+
+ // storeOneArg creates a decomposed (one step) arg that is then stored.
+ // pos and b locate the store instruction, base is the base of the store target, source is the "base" of the value input,
+ // mem is the input mem, t is the type in question, and offArg and offStore are the offsets from the respective bases.
+ storeOneArg := func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value {
+ w := common[selKey{source, offArg, t.Width, t}]
+ if w == nil {
+ w = source.Block.NewValue0IA(source.Pos, OpArg, t, offArg, source.Aux)
+ common[selKey{source, offArg, t.Width, t}] = w
+ }
+ return storeArgOrLoad(pos, b, base, w, mem, t, offStore)
+ }
+
+ // storeOneLoad creates a decomposed (one step) load that is then stored.
+ storeOneLoad := func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value {
+ from := offsetFrom(source.Args[0], offArg, types.NewPtr(t))
+ w := source.Block.NewValue2(source.Pos, OpLoad, t, from, mem)
+ return storeArgOrLoad(pos, b, base, w, mem, t, offStore)
+ }
+
+ storeTwoArg := func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value {
+ mem = storeOneArg(pos, b, base, source, mem, t1, offArg, offStore)
+ pos = pos.WithNotStmt()
+ t1Size := t1.Size()
+ return storeOneArg(pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size)
+ }
+
+ storeTwoLoad := func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value {
+ mem = storeOneLoad(pos, b, base, source, mem, t1, offArg, offStore)
+ pos = pos.WithNotStmt()
+ t1Size := t1.Size()
+ return storeOneLoad(pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size)
+ }
+
+ storeArgOrLoad = func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64) *Value {
+ if debug {
+ fmt.Printf("\tstoreArgOrLoad(%s; %s; %s; %s; %d)\n", base.LongString(), source.LongString(), mem.String(), t.String(), offset)
+ }
+
+ switch source.Op {
+ case OpCopy:
+ return storeArgOrLoad(pos, b, base, source.Args[0], mem, t, offset)
+
+ case OpLoad:
+ ret := decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneLoad, storeTwoLoad)
+ if ret != nil {
+ return ret
+ }
+
+ case OpArg:
+ ret := decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneArg, storeTwoArg)
+ if ret != nil {
+ return ret
+ }
+
+ case OpArrayMake0, OpStructMake0:
+ return mem
+
+ case OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4:
+ for i := 0; i < t.NumFields(); i++ {
+ fld := t.Field(i)
+ mem = storeArgOrLoad(pos, b, base, source.Args[i], mem, fld.Type, offset+fld.Offset)
+ pos = pos.WithNotStmt()
+ }
+ return mem
+
+ case OpArrayMake1:
+ return storeArgOrLoad(pos, b, base, source.Args[0], mem, t.Elem(), offset)
+
+ case OpInt64Make:
+ tHi, tLo := intPairTypes(t.Etype)
+ mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, tHi, offset+hiOffset)
+ pos = pos.WithNotStmt()
+ return storeArgOrLoad(pos, b, base, source.Args[1], mem, tLo, offset+lowOffset)
+
+ case OpComplexMake:
+ tPart := typ.Float32
+ wPart := t.Width / 2
+ if wPart == 8 {
+ tPart = typ.Float64
+ }
+ mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, tPart, offset)
+ pos = pos.WithNotStmt()
+ return storeArgOrLoad(pos, b, base, source.Args[1], mem, tPart, offset+wPart)
+
+ case OpIMake:
+ mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, typ.Uintptr, offset)
+ pos = pos.WithNotStmt()
+ return storeArgOrLoad(pos, b, base, source.Args[1], mem, typ.BytePtr, offset+ptrSize)
+
+ case OpStringMake:
+ mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, typ.BytePtr, offset)
+ pos = pos.WithNotStmt()
+ return storeArgOrLoad(pos, b, base, source.Args[1], mem, typ.Int, offset+ptrSize)
+
+ case OpSliceMake:
+ mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, typ.BytePtr, offset)
+ pos = pos.WithNotStmt()
+ mem = storeArgOrLoad(pos, b, base, source.Args[1], mem, typ.Int, offset+ptrSize)
+ return storeArgOrLoad(pos, b, base, source.Args[2], mem, typ.Int, offset+2*ptrSize)
+ }
+
+ // For nodes that cannot be taken apart -- OpSelectN, other structure selectors.
+ switch t.Etype {
+ case types.TARRAY:
+ elt := t.Elem()
+ if source.Type != t && t.NumElem() == 1 && elt.Width == t.Width && t.Width == regSize {
+ t = removeTrivialWrapperTypes(t)
+ // it could be a leaf type, but the "leaf" could be complex64 (for example)
+ return storeArgOrLoad(pos, b, base, source, mem, t, offset)
+ }
+ for i := int64(0); i < t.NumElem(); i++ {
+ sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source)
+ mem = storeArgOrLoad(pos, b, base, sel, mem, elt, offset+i*elt.Width)
+ pos = pos.WithNotStmt()
+ }
+ return mem
+
+ case types.TSTRUCT:
+ if source.Type != t && t.NumFields() == 1 && t.Field(0).Type.Width == t.Width && t.Width == regSize {
+ // This peculiar test deals with accesses to immediate interface data.
+ // It works okay because everything is the same size.
+ // Example code that triggers this can be found in go/constant/value.go, function ToComplex
+ // v119 (+881) = IData <intVal> v6
+ // v121 (+882) = StaticLECall <floatVal,mem> {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1
+ // This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)"
+ // Guard against "struct{struct{*foo}}"
+ // Other rewriting phases create minor glitches when they transform IData, for instance the
+ // interface-typed Arg "x" of ToFloat in go/constant/value.go
+ // v6 (858) = Arg <Value> {x} (x[Value], x[Value])
+ // is rewritten by decomposeArgs into
+ // v141 (858) = Arg <uintptr> {x}
+ // v139 (858) = Arg <*uint8> {x} [8]
+ // because of a type case clause on line 862 of go/constant/value.go
+ // case intVal:
+ // return itof(x)
+ // v139 is later stored as an intVal == struct{val *big.Int} which naively requires the fields of
+ // of a *uint8, which does not succeed.
+ t = removeTrivialWrapperTypes(t)
+ // it could be a leaf type, but the "leaf" could be complex64 (for example)
+ return storeArgOrLoad(pos, b, base, source, mem, t, offset)
+ }
+
+ for i := 0; i < t.NumFields(); i++ {
+ fld := t.Field(i)
+ sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
+ mem = storeArgOrLoad(pos, b, base, sel, mem, fld.Type, offset+fld.Offset)
+ pos = pos.WithNotStmt()
+ }
+ return mem
+
+ case types.TINT64, types.TUINT64:
+ if t.Width == regSize {
+ break
+ }
+ tHi, tLo := intPairTypes(t.Etype)
+ sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source)
+ mem = storeArgOrLoad(pos, b, base, sel, mem, tHi, offset+hiOffset)
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source)
+ return storeArgOrLoad(pos, b, base, sel, mem, tLo, offset+lowOffset)
+
+ case types.TINTER:
+ sel := source.Block.NewValue1(pos, OpITab, typ.BytePtr, source)
+ mem = storeArgOrLoad(pos, b, base, sel, mem, typ.BytePtr, offset)
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpIData, typ.BytePtr, source)
+ return storeArgOrLoad(pos, b, base, sel, mem, typ.BytePtr, offset+ptrSize)
+
+ case types.TSTRING:
+ sel := source.Block.NewValue1(pos, OpStringPtr, typ.BytePtr, source)
+ mem = storeArgOrLoad(pos, b, base, sel, mem, typ.BytePtr, offset)
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpStringLen, typ.Int, source)
+ return storeArgOrLoad(pos, b, base, sel, mem, typ.Int, offset+ptrSize)
+
+ case types.TSLICE:
+ et := types.NewPtr(t.Elem())
+ sel := source.Block.NewValue1(pos, OpSlicePtr, et, source)
+ mem = storeArgOrLoad(pos, b, base, sel, mem, et, offset)
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpSliceLen, typ.Int, source)
+ mem = storeArgOrLoad(pos, b, base, sel, mem, typ.Int, offset+ptrSize)
+ sel = source.Block.NewValue1(pos, OpSliceCap, typ.Int, source)
+ return storeArgOrLoad(pos, b, base, sel, mem, typ.Int, offset+2*ptrSize)
+
+ case types.TCOMPLEX64:
+ sel := source.Block.NewValue1(pos, OpComplexReal, typ.Float32, source)
+ mem = storeArgOrLoad(pos, b, base, sel, mem, typ.Float32, offset)
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpComplexImag, typ.Float32, source)
+ return storeArgOrLoad(pos, b, base, sel, mem, typ.Float32, offset+4)
+
+ case types.TCOMPLEX128:
+ sel := source.Block.NewValue1(pos, OpComplexReal, typ.Float64, source)
+ mem = storeArgOrLoad(pos, b, base, sel, mem, typ.Float64, offset)
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpComplexImag, typ.Float64, source)
+ return storeArgOrLoad(pos, b, base, sel, mem, typ.Float64, offset+8)
+ }
+
+ dst := offsetFrom(base, offset, types.NewPtr(t))
+ x := b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, source, mem)
+ if debug {
+ fmt.Printf("\t\tstoreArg returns %s\n", x.LongString())
+ }
+ return x
+ }
+
+ // rewriteArgs removes all the Args from a call and converts the call args into appropriate
+ // stores (or later, register movement). Extra args for interface and closure calls are ignored,
+ // but removed.
+ rewriteArgs := func(v *Value, firstArg int) *Value {
+ // Thread the stores on the memory arg
+ aux := v.Aux.(*AuxCall)
+ pos := v.Pos.WithNotStmt()
+ m0 := v.Args[len(v.Args)-1]
+ mem := m0
+ for i, a := range v.Args {
+ if i < firstArg {
+ continue
+ }
+ if a == m0 { // mem is last.
+ break
+ }
+ auxI := int64(i - firstArg)
+ if a.Op == OpDereference {
+ if a.MemoryArg() != m0 {
+ f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString())
+ }
+ // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
+ // TODO this will be more complicated with registers in the picture.
+ source := a.Args[0]
+ dst := f.ConstOffPtrSP(source.Type, aux.OffsetOfArg(auxI), sp)
+ if a.Uses == 1 && a.Block == v.Block {
+ a.reset(OpMove)
+ a.Pos = pos
+ a.Type = types.TypeMem
+ a.Aux = aux.TypeOfArg(auxI)
+ a.AuxInt = aux.SizeOfArg(auxI)
+ a.SetArgs3(dst, source, mem)
+ mem = a
+ } else {
+ mem = v.Block.NewValue3A(pos, OpMove, types.TypeMem, aux.TypeOfArg(auxI), dst, source, mem)
+ mem.AuxInt = aux.SizeOfArg(auxI)
+ }
+ } else {
+ if debug {
+ fmt.Printf("storeArg %s, %v, %d\n", a.LongString(), aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI))
+ }
+ mem = storeArgOrLoad(pos, v.Block, sp, a, mem, aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI))
+ }
+ }
+ v.resetArgs()
+ return mem
+ }
+
+ // TODO if too slow, whole program iteration can be replaced w/ slices of appropriate values, accumulated in first loop here.
+
+ // Step 0: rewrite the calls to convert incoming args to stores.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpStaticLECall:
+ mem := rewriteArgs(v, 0)
+ v.SetArgs1(mem)
+ case OpClosureLECall:
+ code := v.Args[0]
+ context := v.Args[1]
+ mem := rewriteArgs(v, 2)
+ v.SetArgs3(code, context, mem)
+ case OpInterLECall:
+ code := v.Args[0]
+ mem := rewriteArgs(v, 1)
+ v.SetArgs2(code, mem)
+ }
+ }
+ }
+
+ for i, name := range f.Names {
+ t := name.Type
+ if isAlreadyExpandedAggregateType(t) {
+ for j, v := range f.NamedValues[name] {
+ if v.Op == OpSelectN || v.Op == OpArg && isAlreadyExpandedAggregateType(v.Type) {
+ ns := namedSelects[v]
+ namedSelects[v] = append(ns, namedVal{locIndex: i, valIndex: j})
+ }
+ }
+ }
+ }
+
+ // Step 1: any stores of aggregates remaining are believed to be sourced from call results or args.
+ // Decompose those stores into a series of smaller stores, adding selection ops as necessary.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpStore {
+ t := v.Aux.(*types.Type)
+ source := v.Args[1]
+ tSrc := source.Type
+ iAEATt := isAlreadyExpandedAggregateType(t)
+
+ if !iAEATt {
+ // guarding against store immediate struct into interface data field -- store type is *uint8
+ // TODO can this happen recursively?
+ iAEATt = isAlreadyExpandedAggregateType(tSrc)
+ if iAEATt {
+ t = tSrc
+ }
+ }
+ if iAEATt {
+ if debug {
+ fmt.Printf("Splitting store %s\n", v.LongString())
+ }
+ dst, mem := v.Args[0], v.Args[2]
+ mem = storeArgOrLoad(v.Pos, b, dst, source, mem, t, 0)
+ v.copyOf(mem)
+ }
+ }
+ }
+ }
+
+ val2Preds := make(map[*Value]int32) // Used to accumulate dependency graph of selection operations for topological ordering.
+
+ // Step 2: transform or accumulate selection operations for rewrite in topological order.
+ //
+ // Aggregate types that have already (in earlier phases) been transformed must be lowered comprehensively to finish
+ // the transformation (user-defined structs and arrays, slices, strings, interfaces, complex, 64-bit on 32-bit architectures),
+ //
+ // Any select-for-addressing applied to call results can be transformed directly.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ // Accumulate chains of selectors for processing in topological order
+ switch v.Op {
+ case OpStructSelect, OpArraySelect,
+ OpIData, OpITab,
+ OpStringPtr, OpStringLen,
+ OpSlicePtr, OpSliceLen, OpSliceCap,
+ OpComplexReal, OpComplexImag,
+ OpInt64Hi, OpInt64Lo:
+ w := v.Args[0]
+ switch w.Op {
+ case OpStructSelect, OpArraySelect, OpSelectN, OpArg:
+ val2Preds[w] += 1
+ if debug {
+ fmt.Printf("v2p[%s] = %d\n", w.LongString(), val2Preds[w])
+ }
+ }
+ fallthrough
+
+ case OpSelectN:
+ if _, ok := val2Preds[v]; !ok {
+ val2Preds[v] = 0
+ if debug {
+ fmt.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
+ }
+ }
+
+ case OpArg:
+ if !isAlreadyExpandedAggregateType(v.Type) {
+ continue
+ }
+ if _, ok := val2Preds[v]; !ok {
+ val2Preds[v] = 0
+ if debug {
+ fmt.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
+ }
+ }
+
+ case OpSelectNAddr:
+ // Do these directly, there are no chains of selectors.
+ call := v.Args[0]
+ which := v.AuxInt
+ aux := call.Aux.(*AuxCall)
+ pt := v.Type
+ off := offsetFrom(sp, aux.OffsetOfResult(which), pt)
+ v.copyOf(off)
+ }
+ }
+ }
+
+ // Step 3: Compute topological order of selectors,
+ // then process it in reverse to eliminate duplicates,
+ // then forwards to rewrite selectors.
+ //
+ // All chains of selectors end up in same block as the call.
+
+ // Compilation must be deterministic, so sort after extracting first zeroes from map.
+ // Sorting allows dominators-last order within each batch,
+ // so that the backwards scan for duplicates will most often find copies from dominating blocks (it is best-effort).
+ var toProcess []*Value
+ less := func(i, j int) bool {
+ vi, vj := toProcess[i], toProcess[j]
+ bi, bj := vi.Block, vj.Block
+ if bi == bj {
+ return vi.ID < vj.ID
+ }
+ return sdom.domorder(bi) > sdom.domorder(bj) // reverse the order to put dominators last.
+ }
+
+ // Accumulate order in allOrdered
+ var allOrdered []*Value
+ for v, n := range val2Preds {
+ if n == 0 {
+ allOrdered = append(allOrdered, v)
+ }
+ }
+ last := 0 // allOrdered[0:last] has been top-sorted and processed
+ for len(val2Preds) > 0 {
+ toProcess = allOrdered[last:]
+ last = len(allOrdered)
+ sort.SliceStable(toProcess, less)
+ for _, v := range toProcess {
+ delete(val2Preds, v)
+ if v.Op == OpArg {
+ continue // no Args[0], hence done.
+ }
+ w := v.Args[0]
+ n, ok := val2Preds[w]
+ if !ok {
+ continue
+ }
+ if n == 1 {
+ allOrdered = append(allOrdered, w)
+ delete(val2Preds, w)
+ continue
+ }
+ val2Preds[w] = n - 1
+ }
+ }
+
+ common = make(map[selKey]*Value)
+ // Rewrite duplicate selectors as copies where possible.
+ for i := len(allOrdered) - 1; i >= 0; i-- {
+ v := allOrdered[i]
+ if v.Op == OpArg {
+ continue
+ }
+ w := v.Args[0]
+ if w.Op == OpCopy {
+ for w.Op == OpCopy {
+ w = w.Args[0]
+ }
+ v.SetArg(0, w)
+ }
+ typ := v.Type
+ if typ.IsMemory() {
+ continue // handled elsewhere, not an indexable result
+ }
+ size := typ.Width
+ offset := int64(0)
+ switch v.Op {
+ case OpStructSelect:
+ if w.Type.Etype == types.TSTRUCT {
+ offset = w.Type.FieldOff(int(v.AuxInt))
+ } else { // Immediate interface data artifact, offset is zero.
+ f.Fatalf("Expand calls interface data problem, func %s, v=%s, w=%s\n", f.Name, v.LongString(), w.LongString())
+ }
+ case OpArraySelect:
+ offset = size * v.AuxInt
+ case OpSelectN:
+ offset = w.Aux.(*AuxCall).OffsetOfResult(v.AuxInt)
+ case OpInt64Hi:
+ offset = hiOffset
+ case OpInt64Lo:
+ offset = lowOffset
+ case OpStringLen, OpSliceLen, OpIData:
+ offset = ptrSize
+ case OpSliceCap:
+ offset = 2 * ptrSize
+ case OpComplexImag:
+ offset = size
+ }
+ sk := selKey{from: w, size: size, offset: offset, typ: typ}
+ dupe := common[sk]
+ if dupe == nil {
+ common[sk] = v
+ } else if sdom.IsAncestorEq(dupe.Block, v.Block) {
+ v.copyOf(dupe)
+ } else {
+ // Because values are processed in dominator order, the old common[s] will never dominate after a miss is seen.
+ // Installing the new value might match some future values.
+ common[sk] = v
+ }
+ }
+
+ // Indices of entries in f.Names that need to be deleted.
+ var toDelete []namedVal
+
+ // Rewrite selectors.
+ for i, v := range allOrdered {
+ if debug {
+ b := v.Block
+ fmt.Printf("allOrdered[%d] = b%d, %s, uses=%d\n", i, b.ID, v.LongString(), v.Uses)
+ }
+ if v.Uses == 0 {
+ v.reset(OpInvalid)
+ continue
+ }
+ if v.Op == OpCopy {
+ continue
+ }
+ locs := rewriteSelect(v, v, 0)
+ // Install new names.
+ if v.Type.IsMemory() {
+ continue
+ }
+ // Leaf types may have debug locations
+ if !isAlreadyExpandedAggregateType(v.Type) {
+ for _, l := range locs {
+ f.NamedValues[l] = append(f.NamedValues[l], v)
+ }
+ f.Names = append(f.Names, locs...)
+ continue
+ }
+ // Not-leaf types that had debug locations need to lose them.
+ if ns, ok := namedSelects[v]; ok {
+ toDelete = append(toDelete, ns...)
+ }
+ }
+
+ deleteNamedVals(f, toDelete)
+
+ // Step 4: rewrite the calls themselves, correcting the type
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpStaticLECall:
+ v.Op = OpStaticCall
+ v.Type = types.TypeMem
+ case OpClosureLECall:
+ v.Op = OpClosureCall
+ v.Type = types.TypeMem
+ case OpInterLECall:
+ v.Op = OpInterCall
+ v.Type = types.TypeMem
+ }
+ }
+ }
+
+ // Step 5: elide any copies introduced.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if a.Op != OpCopy {
+ continue
+ }
+ aa := copySource(a)
+ v.SetArg(i, aa)
+ for a.Uses == 0 {
+ b := a.Args[0]
+ a.reset(OpInvalid)
+ a = b
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
new file mode 100644
index 0000000..b4c3e5c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -0,0 +1,209 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm64"
+ "cmd/internal/obj/s390x"
+ "cmd/internal/obj/x86"
+ "cmd/internal/src"
+ "fmt"
+ "testing"
+)
+
+var CheckFunc = checkFunc
+var Opt = opt
+var Deadcode = deadcode
+var Copyelim = copyelim
+
+var testCtxts = map[string]*obj.Link{
+ "amd64": obj.Linknew(&x86.Linkamd64),
+ "s390x": obj.Linknew(&s390x.Links390x),
+ "arm64": obj.Linknew(&arm64.Linkarm64),
+}
+
+func testConfig(tb testing.TB) *Conf { return testConfigArch(tb, "amd64") }
+func testConfigS390X(tb testing.TB) *Conf { return testConfigArch(tb, "s390x") }
+func testConfigARM64(tb testing.TB) *Conf { return testConfigArch(tb, "arm64") }
+
+func testConfigArch(tb testing.TB, arch string) *Conf {
+ ctxt, ok := testCtxts[arch]
+ if !ok {
+ tb.Fatalf("unknown arch %s", arch)
+ }
+ if ctxt.Arch.PtrSize != 8 {
+ tb.Fatal("dummyTypes is 64-bit only")
+ }
+ c := &Conf{
+ config: NewConfig(arch, dummyTypes, ctxt, true),
+ tb: tb,
+ }
+ return c
+}
+
+type Conf struct {
+ config *Config
+ tb testing.TB
+ fe Frontend
+}
+
+func (c *Conf) Frontend() Frontend {
+ if c.fe == nil {
+ c.fe = DummyFrontend{t: c.tb, ctxt: c.config.ctxt}
+ }
+ return c.fe
+}
+
+// DummyFrontend is a test-only frontend.
+// It assumes 64 bit integers and pointers.
+type DummyFrontend struct {
+ t testing.TB
+ ctxt *obj.Link
+}
+
+type DummyAuto struct {
+ t *types.Type
+ s string
+}
+
+func (d *DummyAuto) Typ() *types.Type {
+ return d.t
+}
+
+func (d *DummyAuto) String() string {
+ return d.s
+}
+
+func (d *DummyAuto) StorageClass() StorageClass {
+ return ClassAuto
+}
+
+func (d *DummyAuto) IsSynthetic() bool {
+ return false
+}
+
+func (d *DummyAuto) IsAutoTmp() bool {
+ return true
+}
+
+func (DummyFrontend) StringData(s string) *obj.LSym {
+ return nil
+}
+func (DummyFrontend) Auto(pos src.XPos, t *types.Type) GCNode {
+ return &DummyAuto{t: t, s: "aDummyAuto"}
+}
+func (d DummyFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
+ return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8}
+}
+func (d DummyFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) {
+ return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off + 8}
+}
+func (d DummyFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) {
+ return LocalSlot{N: s.N, Type: s.Type.Elem().PtrTo(), Off: s.Off},
+ LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8},
+ LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 16}
+}
+func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
+ if s.Type.Size() == 16 {
+ return LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off + 8}
+ }
+ return LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off + 4}
+}
+func (d DummyFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) {
+ if s.Type.IsSigned() {
+ return LocalSlot{N: s.N, Type: dummyTypes.Int32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off}
+ }
+ return LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off}
+}
+func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
+ return LocalSlot{N: s.N, Type: s.Type.FieldType(i), Off: s.Off + s.Type.FieldOff(i)}
+}
+func (d DummyFrontend) SplitArray(s LocalSlot) LocalSlot {
+ return LocalSlot{N: s.N, Type: s.Type.Elem(), Off: s.Off}
+}
+
+func (d DummyFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
+ return LocalSlot{N: parent.N, Type: t, Off: offset}
+}
+func (DummyFrontend) Line(_ src.XPos) string {
+ return "unknown.go:0"
+}
+func (DummyFrontend) AllocFrame(f *Func) {
+}
+func (d DummyFrontend) Syslook(s string) *obj.LSym {
+ return d.ctxt.Lookup(s)
+}
+func (DummyFrontend) UseWriteBarrier() bool {
+ return true // only writebarrier_test cares
+}
+func (DummyFrontend) SetWBPos(pos src.XPos) {
+}
+
+func (d DummyFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) }
+func (d DummyFrontend) Log() bool { return true }
+
+func (d DummyFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
+func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) }
+func (d DummyFrontend) Debug_checknil() bool { return false }
+
+func (d DummyFrontend) MyImportPath() string {
+ return "my/import/path"
+}
+
+var dummyTypes Types
+
+func init() {
+ // Initialize just enough of the universe and the types package to make our tests function.
+ // TODO(josharian): move universe initialization to the types package,
+ // so this test setup can share it.
+
+ types.Tconv = func(t *types.Type, flag, mode int) string {
+ return t.Etype.String()
+ }
+ types.Sconv = func(s *types.Sym, flag, mode int) string {
+ return "sym"
+ }
+ types.FormatSym = func(sym *types.Sym, s fmt.State, verb rune, mode int) {
+ fmt.Fprintf(s, "sym")
+ }
+ types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) {
+ fmt.Fprintf(s, "%v", t.Etype)
+ }
+ types.Dowidth = func(t *types.Type) {}
+
+ for _, typ := range [...]struct {
+ width int64
+ et types.EType
+ }{
+ {1, types.TINT8},
+ {1, types.TUINT8},
+ {1, types.TBOOL},
+ {2, types.TINT16},
+ {2, types.TUINT16},
+ {4, types.TINT32},
+ {4, types.TUINT32},
+ {4, types.TFLOAT32},
+ {4, types.TFLOAT64},
+ {8, types.TUINT64},
+ {8, types.TINT64},
+ {8, types.TINT},
+ {8, types.TUINTPTR},
+ } {
+ t := types.New(typ.et)
+ t.Width = typ.width
+ t.Align = uint8(typ.width)
+ types.Types[typ.et] = t
+ }
+ dummyTypes.SetTypPtrs()
+}
+
+func (d DummyFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
+
+func (d DummyFrontend) CanSSA(t *types.Type) bool {
+ // There are no un-SSAable types in dummy land.
+ return true
+}
diff --git a/src/cmd/compile/internal/ssa/flagalloc.go b/src/cmd/compile/internal/ssa/flagalloc.go
new file mode 100644
index 0000000..61c45a6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/flagalloc.go
@@ -0,0 +1,269 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// flagalloc allocates the flag register among all the flag-generating
+// instructions. Flag values are recomputed if they need to be
+// spilled/restored.
+func flagalloc(f *Func) {
+ // Compute the in-register flag value we want at the end of
+ // each block. This is basically a best-effort live variable
+ // analysis, so it can be much simpler than a full analysis.
+ end := make([]*Value, f.NumBlocks())
+ po := f.postorder()
+ for n := 0; n < 2; n++ {
+ for _, b := range po {
+ // Walk values backwards to figure out what flag
+ // value we want in the flag register at the start
+ // of the block.
+ var flag *Value
+ for _, c := range b.ControlValues() {
+ if c.Type.IsFlags() {
+ if flag != nil {
+ panic("cannot have multiple controls using flags")
+ }
+ flag = c
+ }
+ }
+ if flag == nil {
+ flag = end[b.ID]
+ }
+ for j := len(b.Values) - 1; j >= 0; j-- {
+ v := b.Values[j]
+ if v == flag {
+ flag = nil
+ }
+ if v.clobbersFlags() {
+ flag = nil
+ }
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ flag = a
+ }
+ }
+ }
+ if flag != nil {
+ for _, e := range b.Preds {
+ p := e.b
+ end[p.ID] = flag
+ }
+ }
+ }
+ }
+
+ // For blocks which have a flags control value, that's the only value
+ // we can leave in the flags register at the end of the block. (There
+ // is no place to put a flag regeneration instruction.)
+ for _, b := range f.Blocks {
+ if b.Kind == BlockDefer {
+ // Defer blocks internally use/clobber the flags value.
+ end[b.ID] = nil
+ continue
+ }
+ for _, v := range b.ControlValues() {
+ if v.Type.IsFlags() && end[b.ID] != v {
+ end[b.ID] = nil
+ }
+ }
+ }
+
+ // Compute which flags values will need to be spilled.
+ spill := map[ID]bool{}
+ for _, b := range f.Blocks {
+ var flag *Value
+ if len(b.Preds) > 0 {
+ flag = end[b.Preds[0].b.ID]
+ }
+ for _, v := range b.Values {
+ for _, a := range v.Args {
+ if !a.Type.IsFlags() {
+ continue
+ }
+ if a == flag {
+ continue
+ }
+ // a will need to be restored here.
+ spill[a.ID] = true
+ flag = a
+ }
+ if v.clobbersFlags() {
+ flag = nil
+ }
+ if v.Type.IsFlags() {
+ flag = v
+ }
+ }
+ for _, v := range b.ControlValues() {
+ if v != flag && v.Type.IsFlags() {
+ spill[v.ID] = true
+ }
+ }
+ if v := end[b.ID]; v != nil && v != flag {
+ spill[v.ID] = true
+ }
+ }
+
+ // Add flag spill and recomputation where they are needed.
+ var remove []*Value // values that should be checked for possible removal
+ var oldSched []*Value
+ for _, b := range f.Blocks {
+ oldSched = append(oldSched[:0], b.Values...)
+ b.Values = b.Values[:0]
+ // The current live flag value (the pre-flagalloc copy).
+ var flag *Value
+ if len(b.Preds) > 0 {
+ flag = end[b.Preds[0].b.ID]
+ // Note: the following condition depends on the lack of critical edges.
+ for _, e := range b.Preds[1:] {
+ p := e.b
+ if end[p.ID] != flag {
+ f.Fatalf("live flag in %s's predecessors not consistent", b)
+ }
+ }
+ }
+ for _, v := range oldSched {
+ if v.Op == OpPhi && v.Type.IsFlags() {
+ f.Fatalf("phi of flags not supported: %s", v.LongString())
+ }
+
+ // If v will be spilled, and v uses memory, then we must split it
+ // into a load + a flag generator.
+ if spill[v.ID] && v.MemoryArg() != nil {
+ remove = append(remove, v)
+ if !f.Config.splitLoad(v) {
+ f.Fatalf("can't split flag generator: %s", v.LongString())
+ }
+ }
+
+ // Make sure any flag arg of v is in the flags register.
+ // If not, recompute it.
+ for i, a := range v.Args {
+ if !a.Type.IsFlags() {
+ continue
+ }
+ if a == flag {
+ continue
+ }
+ // Recalculate a
+ c := copyFlags(a, b)
+ // Update v.
+ v.SetArg(i, c)
+ // Remember the most-recently computed flag value.
+ flag = a
+ }
+ // Issue v.
+ b.Values = append(b.Values, v)
+ if v.clobbersFlags() {
+ flag = nil
+ }
+ if v.Type.IsFlags() {
+ flag = v
+ }
+ }
+ for i, v := range b.ControlValues() {
+ if v != flag && v.Type.IsFlags() {
+ // Recalculate control value.
+ remove = append(remove, v)
+ c := copyFlags(v, b)
+ b.ReplaceControl(i, c)
+ flag = v
+ }
+ }
+ if v := end[b.ID]; v != nil && v != flag {
+ // Need to reissue flag generator for use by
+ // subsequent blocks.
+ remove = append(remove, v)
+ copyFlags(v, b)
+ // Note: this flag generator is not properly linked up
+ // with the flag users. This breaks the SSA representation.
+ // We could fix up the users with another pass, but for now
+ // we'll just leave it. (Regalloc has the same issue for
+ // standard regs, and it runs next.)
+ // For this reason, take care not to add this flag
+ // generator to the remove list.
+ }
+ }
+
+ // Save live flag state for later.
+ for _, b := range f.Blocks {
+ b.FlagsLiveAtEnd = end[b.ID] != nil
+ }
+
+ // Remove any now-dead values.
+ // The number of values to remove is likely small,
+ // and removing them requires processing all values in a block,
+ // so minimize the number of blocks that we touch.
+
+ // Shrink remove to contain only dead values, and clobber those dead values.
+ for i := 0; i < len(remove); i++ {
+ v := remove[i]
+ if v.Uses == 0 {
+ v.reset(OpInvalid)
+ continue
+ }
+ // Remove v.
+ last := len(remove) - 1
+ remove[i] = remove[last]
+ remove[last] = nil
+ remove = remove[:last]
+ i-- // reprocess value at i
+ }
+
+ if len(remove) == 0 {
+ return
+ }
+
+ removeBlocks := f.newSparseSet(f.NumBlocks())
+ defer f.retSparseSet(removeBlocks)
+ for _, v := range remove {
+ removeBlocks.add(v.Block.ID)
+ }
+
+ // Process affected blocks, preserving value order.
+ for _, b := range f.Blocks {
+ if !removeBlocks.contains(b.ID) {
+ continue
+ }
+ i := 0
+ for j := 0; j < len(b.Values); j++ {
+ v := b.Values[j]
+ if v.Op == OpInvalid {
+ continue
+ }
+ b.Values[i] = v
+ i++
+ }
+ b.truncateValues(i)
+ }
+}
+
+func (v *Value) clobbersFlags() bool {
+ if opcodeTable[v.Op].clobberFlags {
+ return true
+ }
+ if v.Type.IsTuple() && (v.Type.FieldType(0).IsFlags() || v.Type.FieldType(1).IsFlags()) {
+ // This case handles the possibility where a flag value is generated but never used.
+ // In that case, there's no corresponding Select to overwrite the flags value,
+ // so we must consider flags clobbered by the tuple-generating instruction.
+ return true
+ }
+ return false
+}
+
+// copyFlags copies v (flag generator) into b, returns the copy.
+// If v's arg is also flags, copy recursively.
+func copyFlags(v *Value, b *Block) *Value {
+ flagsArgs := make(map[int]*Value)
+ for i, a := range v.Args {
+ if a.Type.IsFlags() || a.Type.IsTuple() {
+ flagsArgs[i] = copyFlags(a, b)
+ }
+ }
+ c := v.copyInto(b)
+ for i, a := range flagsArgs {
+ c.SetArg(i, a)
+ }
+ return c
+}
diff --git a/src/cmd/compile/internal/ssa/flags_amd64_test.s b/src/cmd/compile/internal/ssa/flags_amd64_test.s
new file mode 100644
index 0000000..8bd8701
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/flags_amd64_test.s
@@ -0,0 +1,31 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64
+
+#include "textflag.h"
+
+TEXT ·asmAddFlags(SB),NOSPLIT,$0-24
+ MOVQ x+0(FP), AX
+ ADDQ y+8(FP), AX
+ PUSHFQ
+ POPQ AX
+ MOVQ AX, ret+16(FP)
+ RET
+
+TEXT ·asmSubFlags(SB),NOSPLIT,$0-24
+ MOVQ x+0(FP), AX
+ SUBQ y+8(FP), AX
+ PUSHFQ
+ POPQ AX
+ MOVQ AX, ret+16(FP)
+ RET
+
+TEXT ·asmAndFlags(SB),NOSPLIT,$0-24
+ MOVQ x+0(FP), AX
+ ANDQ y+8(FP), AX
+ PUSHFQ
+ POPQ AX
+ MOVQ AX, ret+16(FP)
+ RET
diff --git a/src/cmd/compile/internal/ssa/flags_arm64_test.s b/src/cmd/compile/internal/ssa/flags_arm64_test.s
new file mode 100644
index 0000000..f201bcc
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/flags_arm64_test.s
@@ -0,0 +1,32 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm64
+
+#include "textflag.h"
+
+TEXT ·asmAddFlags(SB),NOSPLIT,$0-24
+ MOVD x+0(FP), R0
+ MOVD y+8(FP), R1
+ CMN R0, R1
+ WORD $0xd53b4200 // MOVD NZCV, R0
+ MOVD R0, ret+16(FP)
+ RET
+
+TEXT ·asmSubFlags(SB),NOSPLIT,$0-24
+ MOVD x+0(FP), R0
+ MOVD y+8(FP), R1
+ CMP R1, R0
+ WORD $0xd53b4200 // MOVD NZCV, R0
+ MOVD R0, ret+16(FP)
+ RET
+
+TEXT ·asmAndFlags(SB),NOSPLIT,$0-24
+ MOVD x+0(FP), R0
+ MOVD y+8(FP), R1
+ TST R1, R0
+ WORD $0xd53b4200 // MOVD NZCV, R0
+ BIC $0x30000000, R0 // clear C, V bits, as TST does not change those flags
+ MOVD R0, ret+16(FP)
+ RET
diff --git a/src/cmd/compile/internal/ssa/flags_test.go b/src/cmd/compile/internal/ssa/flags_test.go
new file mode 100644
index 0000000..d64abf6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/flags_test.go
@@ -0,0 +1,108 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64 arm64
+
+package ssa
+
+// This file tests the functions addFlags64 and subFlags64 by comparing their
+// results to what the chip calculates.
+
+import (
+ "runtime"
+ "testing"
+)
+
+func TestAddFlagsNative(t *testing.T) {
+ var numbers = []int64{
+ 1, 0, -1,
+ 2, -2,
+ 1<<63 - 1, -1 << 63,
+ }
+ coverage := map[flagConstant]bool{}
+ for _, x := range numbers {
+ for _, y := range numbers {
+ a := addFlags64(x, y)
+ b := flagRegister2flagConstant(asmAddFlags(x, y), false)
+ if a != b {
+ t.Errorf("asmAdd diff: x=%x y=%x got=%s want=%s\n", x, y, a, b)
+ }
+ coverage[a] = true
+ }
+ }
+ if len(coverage) != 9 { // TODO: can we cover all outputs?
+ t.Errorf("coverage too small, got %d want 9", len(coverage))
+ }
+}
+
+func TestSubFlagsNative(t *testing.T) {
+ var numbers = []int64{
+ 1, 0, -1,
+ 2, -2,
+ 1<<63 - 1, -1 << 63,
+ }
+ coverage := map[flagConstant]bool{}
+ for _, x := range numbers {
+ for _, y := range numbers {
+ a := subFlags64(x, y)
+ b := flagRegister2flagConstant(asmSubFlags(x, y), true)
+ if a != b {
+ t.Errorf("asmSub diff: x=%x y=%x got=%s want=%s\n", x, y, a, b)
+ }
+ coverage[a] = true
+ }
+ }
+ if len(coverage) != 7 { // TODO: can we cover all outputs?
+ t.Errorf("coverage too small, got %d want 7", len(coverage))
+ }
+}
+
+func TestAndFlagsNative(t *testing.T) {
+ var numbers = []int64{
+ 1, 0, -1,
+ 2, -2,
+ 1<<63 - 1, -1 << 63,
+ }
+ coverage := map[flagConstant]bool{}
+ for _, x := range numbers {
+ for _, y := range numbers {
+ a := logicFlags64(x & y)
+ b := flagRegister2flagConstant(asmAndFlags(x, y), false)
+ if a != b {
+ t.Errorf("asmAnd diff: x=%x y=%x got=%s want=%s\n", x, y, a, b)
+ }
+ coverage[a] = true
+ }
+ }
+ if len(coverage) != 3 {
+ t.Errorf("coverage too small, got %d want 3", len(coverage))
+ }
+}
+
+func asmAddFlags(x, y int64) int
+func asmSubFlags(x, y int64) int
+func asmAndFlags(x, y int64) int
+
+func flagRegister2flagConstant(x int, sub bool) flagConstant {
+ var fcb flagConstantBuilder
+ switch runtime.GOARCH {
+ case "amd64":
+ fcb.Z = x>>6&1 != 0
+ fcb.N = x>>7&1 != 0
+ fcb.C = x>>0&1 != 0
+ if sub {
+ // Convert from amd64-sense to arm-sense
+ fcb.C = !fcb.C
+ }
+ fcb.V = x>>11&1 != 0
+ case "arm64":
+ fcb.Z = x>>30&1 != 0
+ fcb.N = x>>31&1 != 0
+ fcb.C = x>>29&1 != 0
+ fcb.V = x>>28&1 != 0
+ default:
+ panic("unsupported architecture: " + runtime.GOARCH)
+ }
+ return fcb.encode()
+}
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
new file mode 100644
index 0000000..e6f899a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -0,0 +1,799 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "crypto/sha1"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strings"
+)
+
+type writeSyncer interface {
+ io.Writer
+ Sync() error
+}
+
+// A Func represents a Go func declaration (or function literal) and its body.
+// This package compiles each Func independently.
+// Funcs are single-use; a new Func must be created for every compiled function.
+type Func struct {
+ Config *Config // architecture information
+ Cache *Cache // re-usable cache
+ fe Frontend // frontend state associated with this Func, callbacks into compiler frontend
+ pass *pass // current pass information (name, options, etc.)
+ Name string // e.g. NewFunc or (*Func).NumBlocks (no package prefix)
+ Type *types.Type // type signature of the function.
+ Blocks []*Block // unordered set of all basic blocks (note: not indexable by ID)
+ Entry *Block // the entry basic block
+
+ bid idAlloc // block ID allocator
+ vid idAlloc // value ID allocator
+
+ // Given an environment variable used for debug hash match,
+ // what file (if any) receives the yes/no logging?
+ logfiles map[string]writeSyncer
+ HTMLWriter *HTMLWriter // html writer, for debugging
+ DebugTest bool // default true unless $GOSSAHASH != ""; as a debugging aid, make new code conditional on this and use GOSSAHASH to binary search for failing cases
+ PrintOrHtmlSSA bool // true if GOSSAFUNC matches, true even if fe.Log() (spew phase results to stdout) is false.
+ ruleMatches map[string]int // number of times countRule was called during compilation for any given string
+
+ scheduled bool // Values in Blocks are in final order
+ laidout bool // Blocks are ordered
+ NoSplit bool // true if function is marked as nosplit. Used by schedule check pass.
+ dumpFileSeq uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName)
+
+ // when register allocation is done, maps value ids to locations
+ RegAlloc []Location
+
+ // map from LocalSlot to set of Values that we want to store in that slot.
+ NamedValues map[LocalSlot][]*Value
+ // Names is a copy of NamedValues.Keys. We keep a separate list
+ // of keys to make iteration order deterministic.
+ Names []LocalSlot
+
+ // WBLoads is a list of Blocks that branch on the write
+ // barrier flag. Safe-points are disabled from the OpLoad that
+ // reads the write-barrier flag until the control flow rejoins
+ // below the two successors of this block.
+ WBLoads []*Block
+
+ freeValues *Value // free Values linked by argstorage[0]. All other fields except ID are 0/nil.
+ freeBlocks *Block // free Blocks linked by succstorage[0].b. All other fields except ID are 0/nil.
+
+ cachedPostorder []*Block // cached postorder traversal
+ cachedIdom []*Block // cached immediate dominators
+ cachedSdom SparseTree // cached dominator tree
+ cachedLoopnest *loopnest // cached loop nest information
+ cachedLineStarts *xposmap // cached map/set of xpos to integers
+
+ auxmap auxmap // map from aux values to opaque ids used by CSE
+ constants map[int64][]*Value // constants cache, keyed by constant value; users must check value's Op and Type
+}
+
+// NewFunc returns a new, empty function object.
+// Caller must set f.Config and f.Cache before using f.
+func NewFunc(fe Frontend) *Func {
+ return &Func{fe: fe, NamedValues: make(map[LocalSlot][]*Value)}
+}
+
+// NumBlocks returns an integer larger than the id of any Block in the Func.
+func (f *Func) NumBlocks() int {
+ return f.bid.num()
+}
+
+// NumValues returns an integer larger than the id of any Value in the Func.
+func (f *Func) NumValues() int {
+ return f.vid.num()
+}
+
+// newSparseSet returns a sparse set that can store at least up to n integers.
+func (f *Func) newSparseSet(n int) *sparseSet {
+ for i, scr := range f.Cache.scrSparseSet {
+ if scr != nil && scr.cap() >= n {
+ f.Cache.scrSparseSet[i] = nil
+ scr.clear()
+ return scr
+ }
+ }
+ return newSparseSet(n)
+}
+
+// retSparseSet returns a sparse set to the config's cache of sparse
+// sets to be reused by f.newSparseSet.
+func (f *Func) retSparseSet(ss *sparseSet) {
+ for i, scr := range f.Cache.scrSparseSet {
+ if scr == nil {
+ f.Cache.scrSparseSet[i] = ss
+ return
+ }
+ }
+ f.Cache.scrSparseSet = append(f.Cache.scrSparseSet, ss)
+}
+
+// newSparseMap returns a sparse map that can store at least up to n integers.
+func (f *Func) newSparseMap(n int) *sparseMap {
+ for i, scr := range f.Cache.scrSparseMap {
+ if scr != nil && scr.cap() >= n {
+ f.Cache.scrSparseMap[i] = nil
+ scr.clear()
+ return scr
+ }
+ }
+ return newSparseMap(n)
+}
+
+// retSparseMap returns a sparse map to the config's cache of sparse
+// sets to be reused by f.newSparseMap.
+func (f *Func) retSparseMap(ss *sparseMap) {
+ for i, scr := range f.Cache.scrSparseMap {
+ if scr == nil {
+ f.Cache.scrSparseMap[i] = ss
+ return
+ }
+ }
+ f.Cache.scrSparseMap = append(f.Cache.scrSparseMap, ss)
+}
+
+// newPoset returns a new poset from the internal cache
+func (f *Func) newPoset() *poset {
+ if len(f.Cache.scrPoset) > 0 {
+ po := f.Cache.scrPoset[len(f.Cache.scrPoset)-1]
+ f.Cache.scrPoset = f.Cache.scrPoset[:len(f.Cache.scrPoset)-1]
+ return po
+ }
+ return newPoset()
+}
+
+// retPoset returns a poset to the internal cache
+func (f *Func) retPoset(po *poset) {
+ f.Cache.scrPoset = append(f.Cache.scrPoset, po)
+}
+
+// newDeadcodeLive returns a slice for the
+// deadcode pass to use to indicate which values are live.
+func (f *Func) newDeadcodeLive() []bool {
+ r := f.Cache.deadcode.live
+ f.Cache.deadcode.live = nil
+ return r
+}
+
+// retDeadcodeLive returns a deadcode live value slice for re-use.
+func (f *Func) retDeadcodeLive(live []bool) {
+ f.Cache.deadcode.live = live
+}
+
+// newDeadcodeLiveOrderStmts returns a slice for the
+// deadcode pass to use to indicate which values
+// need special treatment for statement boundaries.
+func (f *Func) newDeadcodeLiveOrderStmts() []*Value {
+ r := f.Cache.deadcode.liveOrderStmts
+ f.Cache.deadcode.liveOrderStmts = nil
+ return r
+}
+
+// retDeadcodeLiveOrderStmts returns a deadcode liveOrderStmts slice for re-use.
+func (f *Func) retDeadcodeLiveOrderStmts(liveOrderStmts []*Value) {
+ f.Cache.deadcode.liveOrderStmts = liveOrderStmts
+}
+
+// newValue allocates a new Value with the given fields and places it at the end of b.Values.
+func (f *Func) newValue(op Op, t *types.Type, b *Block, pos src.XPos) *Value {
+ var v *Value
+ if f.freeValues != nil {
+ v = f.freeValues
+ f.freeValues = v.argstorage[0]
+ v.argstorage[0] = nil
+ } else {
+ ID := f.vid.get()
+ if int(ID) < len(f.Cache.values) {
+ v = &f.Cache.values[ID]
+ v.ID = ID
+ } else {
+ v = &Value{ID: ID}
+ }
+ }
+ v.Op = op
+ v.Type = t
+ v.Block = b
+ if notStmtBoundary(op) {
+ pos = pos.WithNotStmt()
+ }
+ v.Pos = pos
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// newValueNoBlock allocates a new Value with the given fields.
+// The returned value is not placed in any block. Once the caller
+// decides on a block b, it must set b.Block and append
+// the returned value to b.Values.
+func (f *Func) newValueNoBlock(op Op, t *types.Type, pos src.XPos) *Value {
+ var v *Value
+ if f.freeValues != nil {
+ v = f.freeValues
+ f.freeValues = v.argstorage[0]
+ v.argstorage[0] = nil
+ } else {
+ ID := f.vid.get()
+ if int(ID) < len(f.Cache.values) {
+ v = &f.Cache.values[ID]
+ v.ID = ID
+ } else {
+ v = &Value{ID: ID}
+ }
+ }
+ v.Op = op
+ v.Type = t
+ v.Block = nil // caller must fix this.
+ if notStmtBoundary(op) {
+ pos = pos.WithNotStmt()
+ }
+ v.Pos = pos
+ return v
+}
+
+// logPassStat writes a string key and int value as a warning in a
+// tab-separated format easily handled by spreadsheets or awk.
+// file names, lines, and function names are included to provide enough (?)
+// context to allow item-by-item comparisons across runs.
+// For example:
+// awk 'BEGIN {FS="\t"} $3~/TIME/{sum+=$4} END{print "t(ns)=",sum}' t.log
+func (f *Func) LogStat(key string, args ...interface{}) {
+ value := ""
+ for _, a := range args {
+ value += fmt.Sprintf("\t%v", a)
+ }
+ n := "missing_pass"
+ if f.pass != nil {
+ n = strings.Replace(f.pass.name, " ", "_", -1)
+ }
+ f.Warnl(f.Entry.Pos, "\t%s\t%s%s\t%s", n, key, value, f.Name)
+}
+
+// unCacheLine removes v from f's constant cache "line" for aux,
+// resets v.InCache when it is found (and removed),
+// and returns whether v was found in that line.
+func (f *Func) unCacheLine(v *Value, aux int64) bool {
+ vv := f.constants[aux]
+ for i, cv := range vv {
+ if v == cv {
+ vv[i] = vv[len(vv)-1]
+ vv[len(vv)-1] = nil
+ f.constants[aux] = vv[0 : len(vv)-1]
+ v.InCache = false
+ return true
+ }
+ }
+ return false
+}
+
+// unCache removes v from f's constant cache.
+func (f *Func) unCache(v *Value) {
+ if v.InCache {
+ aux := v.AuxInt
+ if f.unCacheLine(v, aux) {
+ return
+ }
+ if aux == 0 {
+ switch v.Op {
+ case OpConstNil:
+ aux = constNilMagic
+ case OpConstSlice:
+ aux = constSliceMagic
+ case OpConstString:
+ aux = constEmptyStringMagic
+ case OpConstInterface:
+ aux = constInterfaceMagic
+ }
+ if aux != 0 && f.unCacheLine(v, aux) {
+ return
+ }
+ }
+ f.Fatalf("unCached value %s not found in cache, auxInt=0x%x, adjusted aux=0x%x", v.LongString(), v.AuxInt, aux)
+ }
+}
+
+// freeValue frees a value. It must no longer be referenced or have any args.
+func (f *Func) freeValue(v *Value) {
+ if v.Block == nil {
+ f.Fatalf("trying to free an already freed value")
+ }
+ if v.Uses != 0 {
+ f.Fatalf("value %s still has %d uses", v, v.Uses)
+ }
+ if len(v.Args) != 0 {
+ f.Fatalf("value %s still has %d args", v, len(v.Args))
+ }
+ // Clear everything but ID (which we reuse).
+ id := v.ID
+ if v.InCache {
+ f.unCache(v)
+ }
+ *v = Value{}
+ v.ID = id
+ v.argstorage[0] = f.freeValues
+ f.freeValues = v
+}
+
+// newBlock allocates a new Block of the given kind and places it at the end of f.Blocks.
+func (f *Func) NewBlock(kind BlockKind) *Block {
+ var b *Block
+ if f.freeBlocks != nil {
+ b = f.freeBlocks
+ f.freeBlocks = b.succstorage[0].b
+ b.succstorage[0].b = nil
+ } else {
+ ID := f.bid.get()
+ if int(ID) < len(f.Cache.blocks) {
+ b = &f.Cache.blocks[ID]
+ b.ID = ID
+ } else {
+ b = &Block{ID: ID}
+ }
+ }
+ b.Kind = kind
+ b.Func = f
+ b.Preds = b.predstorage[:0]
+ b.Succs = b.succstorage[:0]
+ b.Values = b.valstorage[:0]
+ f.Blocks = append(f.Blocks, b)
+ f.invalidateCFG()
+ return b
+}
+
+func (f *Func) freeBlock(b *Block) {
+ if b.Func == nil {
+ f.Fatalf("trying to free an already freed block")
+ }
+ // Clear everything but ID (which we reuse).
+ id := b.ID
+ *b = Block{}
+ b.ID = id
+ b.succstorage[0].b = f.freeBlocks
+ f.freeBlocks = b
+}
+
+// NewValue0 returns a new value in the block with no arguments and zero aux values.
+func (b *Block) NewValue0(pos src.XPos, op Op, t *types.Type) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = v.argstorage[:0]
+ return v
+}
+
+// NewValue returns a new value in the block with no arguments and an auxint value.
+func (b *Block) NewValue0I(pos src.XPos, op Op, t *types.Type, auxint int64) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = v.argstorage[:0]
+ return v
+}
+
+// NewValue returns a new value in the block with no arguments and an aux value.
+func (b *Block) NewValue0A(pos src.XPos, op Op, t *types.Type, aux interface{}) *Value {
+ if _, ok := aux.(int64); ok {
+ // Disallow int64 aux values. They should be in the auxint field instead.
+ // Maybe we want to allow this at some point, but for now we disallow it
+ // to prevent errors like using NewValue1A instead of NewValue1I.
+ b.Fatalf("aux field has int64 type op=%s type=%s aux=%v", op, t, aux)
+ }
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Aux = aux
+ v.Args = v.argstorage[:0]
+ return v
+}
+
+// NewValue returns a new value in the block with no arguments and both an auxint and aux values.
+func (b *Block) NewValue0IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Aux = aux
+ v.Args = v.argstorage[:0]
+ return v
+}
+
+// NewValue1 returns a new value in the block with one argument and zero aux values.
+func (b *Block) NewValue1(pos src.XPos, op Op, t *types.Type, arg *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = v.argstorage[:1]
+ v.argstorage[0] = arg
+ arg.Uses++
+ return v
+}
+
+// NewValue1I returns a new value in the block with one argument and an auxint value.
+func (b *Block) NewValue1I(pos src.XPos, op Op, t *types.Type, auxint int64, arg *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = v.argstorage[:1]
+ v.argstorage[0] = arg
+ arg.Uses++
+ return v
+}
+
+// NewValue1A returns a new value in the block with one argument and an aux value.
+func (b *Block) NewValue1A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Aux = aux
+ v.Args = v.argstorage[:1]
+ v.argstorage[0] = arg
+ arg.Uses++
+ return v
+}
+
+// NewValue1IA returns a new value in the block with one argument and both an auxint and aux values.
+func (b *Block) NewValue1IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}, arg *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Aux = aux
+ v.Args = v.argstorage[:1]
+ v.argstorage[0] = arg
+ arg.Uses++
+ return v
+}
+
+// NewValue2 returns a new value in the block with two arguments and zero aux values.
+func (b *Block) NewValue2(pos src.XPos, op Op, t *types.Type, arg0, arg1 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = v.argstorage[:2]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ arg0.Uses++
+ arg1.Uses++
+ return v
+}
+
+// NewValue2A returns a new value in the block with two arguments and one aux values.
+func (b *Block) NewValue2A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg0, arg1 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Aux = aux
+ v.Args = v.argstorage[:2]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ arg0.Uses++
+ arg1.Uses++
+ return v
+}
+
+// NewValue2I returns a new value in the block with two arguments and an auxint value.
+func (b *Block) NewValue2I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = v.argstorage[:2]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ arg0.Uses++
+ arg1.Uses++
+ return v
+}
+
+// NewValue2IA returns a new value in the block with two arguments and both an auxint and aux values.
+func (b *Block) NewValue2IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux interface{}, arg0, arg1 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Aux = aux
+ v.Args = v.argstorage[:2]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ arg0.Uses++
+ arg1.Uses++
+ return v
+}
+
+// NewValue3 returns a new value in the block with three arguments and zero aux values.
+func (b *Block) NewValue3(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = v.argstorage[:3]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ v.argstorage[2] = arg2
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ return v
+}
+
+// NewValue3I returns a new value in the block with three arguments and an auxint value.
+func (b *Block) NewValue3I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1, arg2 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = v.argstorage[:3]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ v.argstorage[2] = arg2
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ return v
+}
+
+// NewValue3A returns a new value in the block with three argument and an aux value.
+func (b *Block) NewValue3A(pos src.XPos, op Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Aux = aux
+ v.Args = v.argstorage[:3]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ v.argstorage[2] = arg2
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ return v
+}
+
+// NewValue4 returns a new value in the block with four arguments and zero aux values.
+func (b *Block) NewValue4(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2, arg3 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = []*Value{arg0, arg1, arg2, arg3}
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ arg3.Uses++
+ return v
+}
+
+// NewValue4I returns a new value in the block with four arguments and and auxint value.
+func (b *Block) NewValue4I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1, arg2, arg3 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = []*Value{arg0, arg1, arg2, arg3}
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ arg3.Uses++
+ return v
+}
+
+// constVal returns a constant value for c.
+func (f *Func) constVal(op Op, t *types.Type, c int64, setAuxInt bool) *Value {
+ if f.constants == nil {
+ f.constants = make(map[int64][]*Value)
+ }
+ vv := f.constants[c]
+ for _, v := range vv {
+ if v.Op == op && v.Type.Compare(t) == types.CMPeq {
+ if setAuxInt && v.AuxInt != c {
+ panic(fmt.Sprintf("cached const %s should have AuxInt of %d", v.LongString(), c))
+ }
+ return v
+ }
+ }
+ var v *Value
+ if setAuxInt {
+ v = f.Entry.NewValue0I(src.NoXPos, op, t, c)
+ } else {
+ v = f.Entry.NewValue0(src.NoXPos, op, t)
+ }
+ f.constants[c] = append(vv, v)
+ v.InCache = true
+ return v
+}
+
+// These magic auxint values let us easily cache non-numeric constants
+// using the same constants map while making collisions unlikely.
+// These values are unlikely to occur in regular code and
+// are easy to grep for in case of bugs.
+const (
+ constSliceMagic = 1122334455
+ constInterfaceMagic = 2233445566
+ constNilMagic = 3344556677
+ constEmptyStringMagic = 4455667788
+)
+
+// ConstInt returns an int constant representing its argument.
+func (f *Func) ConstBool(t *types.Type, c bool) *Value {
+ i := int64(0)
+ if c {
+ i = 1
+ }
+ return f.constVal(OpConstBool, t, i, true)
+}
+func (f *Func) ConstInt8(t *types.Type, c int8) *Value {
+ return f.constVal(OpConst8, t, int64(c), true)
+}
+func (f *Func) ConstInt16(t *types.Type, c int16) *Value {
+ return f.constVal(OpConst16, t, int64(c), true)
+}
+func (f *Func) ConstInt32(t *types.Type, c int32) *Value {
+ return f.constVal(OpConst32, t, int64(c), true)
+}
+func (f *Func) ConstInt64(t *types.Type, c int64) *Value {
+ return f.constVal(OpConst64, t, c, true)
+}
+func (f *Func) ConstFloat32(t *types.Type, c float64) *Value {
+ return f.constVal(OpConst32F, t, int64(math.Float64bits(float64(float32(c)))), true)
+}
+func (f *Func) ConstFloat64(t *types.Type, c float64) *Value {
+ return f.constVal(OpConst64F, t, int64(math.Float64bits(c)), true)
+}
+
+func (f *Func) ConstSlice(t *types.Type) *Value {
+ return f.constVal(OpConstSlice, t, constSliceMagic, false)
+}
+func (f *Func) ConstInterface(t *types.Type) *Value {
+ return f.constVal(OpConstInterface, t, constInterfaceMagic, false)
+}
+func (f *Func) ConstNil(t *types.Type) *Value {
+ return f.constVal(OpConstNil, t, constNilMagic, false)
+}
+func (f *Func) ConstEmptyString(t *types.Type) *Value {
+ v := f.constVal(OpConstString, t, constEmptyStringMagic, false)
+ v.Aux = ""
+ return v
+}
+func (f *Func) ConstOffPtrSP(t *types.Type, c int64, sp *Value) *Value {
+ v := f.constVal(OpOffPtr, t, c, true)
+ if len(v.Args) == 0 {
+ v.AddArg(sp)
+ }
+ return v
+
+}
+
+func (f *Func) Frontend() Frontend { return f.fe }
+func (f *Func) Warnl(pos src.XPos, msg string, args ...interface{}) { f.fe.Warnl(pos, msg, args...) }
+func (f *Func) Logf(msg string, args ...interface{}) { f.fe.Logf(msg, args...) }
+func (f *Func) Log() bool { return f.fe.Log() }
+func (f *Func) Fatalf(msg string, args ...interface{}) { f.fe.Fatalf(f.Entry.Pos, msg, args...) }
+
+// postorder returns the reachable blocks in f in a postorder traversal.
+func (f *Func) postorder() []*Block {
+ if f.cachedPostorder == nil {
+ f.cachedPostorder = postorder(f)
+ }
+ return f.cachedPostorder
+}
+
+func (f *Func) Postorder() []*Block {
+ return f.postorder()
+}
+
+// Idom returns a map from block ID to the immediate dominator of that block.
+// f.Entry.ID maps to nil. Unreachable blocks map to nil as well.
+func (f *Func) Idom() []*Block {
+ if f.cachedIdom == nil {
+ f.cachedIdom = dominators(f)
+ }
+ return f.cachedIdom
+}
+
+// Sdom returns a sparse tree representing the dominator relationships
+// among the blocks of f.
+func (f *Func) Sdom() SparseTree {
+ if f.cachedSdom == nil {
+ f.cachedSdom = newSparseTree(f, f.Idom())
+ }
+ return f.cachedSdom
+}
+
+// loopnest returns the loop nest information for f.
+func (f *Func) loopnest() *loopnest {
+ if f.cachedLoopnest == nil {
+ f.cachedLoopnest = loopnestfor(f)
+ }
+ return f.cachedLoopnest
+}
+
+// invalidateCFG tells f that its CFG has changed.
+func (f *Func) invalidateCFG() {
+ f.cachedPostorder = nil
+ f.cachedIdom = nil
+ f.cachedSdom = nil
+ f.cachedLoopnest = nil
+}
+
+// DebugHashMatch reports whether environment variable evname
+// 1) is empty (this is a special more-quickly implemented case of 3)
+// 2) is "y" or "Y"
+// 3) is a suffix of the sha1 hash of name
+// 4) is a suffix of the environment variable
+// fmt.Sprintf("%s%d", evname, n)
+// provided that all such variables are nonempty for 0 <= i <= n
+// Otherwise it returns false.
+// When true is returned the message
+// "%s triggered %s\n", evname, name
+// is printed on the file named in environment variable
+// GSHS_LOGFILE
+// or standard out if that is empty or there is an error
+// opening the file.
+func (f *Func) DebugHashMatch(evname string) bool {
+ name := f.fe.MyImportPath() + "." + f.Name
+ evhash := os.Getenv(evname)
+ switch evhash {
+ case "":
+ return true // default behavior with no EV is "on"
+ case "y", "Y":
+ f.logDebugHashMatch(evname, name)
+ return true
+ case "n", "N":
+ return false
+ }
+ // Check the hash of the name against a partial input hash.
+ // We use this feature to do a binary search to
+ // find a function that is incorrectly compiled.
+ hstr := ""
+ for _, b := range sha1.Sum([]byte(name)) {
+ hstr += fmt.Sprintf("%08b", b)
+ }
+
+ if strings.HasSuffix(hstr, evhash) {
+ f.logDebugHashMatch(evname, name)
+ return true
+ }
+
+ // Iteratively try additional hashes to allow tests for multi-point
+ // failure.
+ for i := 0; true; i++ {
+ ev := fmt.Sprintf("%s%d", evname, i)
+ evv := os.Getenv(ev)
+ if evv == "" {
+ break
+ }
+ if strings.HasSuffix(hstr, evv) {
+ f.logDebugHashMatch(ev, name)
+ return true
+ }
+ }
+ return false
+}
+
+func (f *Func) logDebugHashMatch(evname, name string) {
+ if f.logfiles == nil {
+ f.logfiles = make(map[string]writeSyncer)
+ }
+ file := f.logfiles[evname]
+ if file == nil {
+ file = os.Stdout
+ if tmpfile := os.Getenv("GSHS_LOGFILE"); tmpfile != "" {
+ var err error
+ file, err = os.OpenFile(tmpfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
+ if err != nil {
+ f.Fatalf("could not open hash-testing logfile %s", tmpfile)
+ }
+ }
+ f.logfiles[evname] = file
+ }
+ fmt.Fprintf(file, "%s triggered %s\n", evname, name)
+ file.Sync()
+}
+
+func DebugNameMatch(evname, name string) bool {
+ return os.Getenv(evname) == name
+}
+
+func (f *Func) spSb() (sp, sb *Value) {
+ initpos := f.Entry.Pos
+ for _, v := range f.Entry.Values {
+ if v.Op == OpSB {
+ sb = v
+ }
+ if v.Op == OpSP {
+ sp = v
+ }
+ if sb != nil && sp != nil {
+ break
+ }
+ }
+ if sb == nil {
+ sb = f.Entry.NewValue0(initpos.WithNotStmt(), OpSB, f.Config.Types.Uintptr)
+ }
+ if sp == nil {
+ sp = f.Entry.NewValue0(initpos.WithNotStmt(), OpSP, f.Config.Types.Uintptr)
+ }
+ return
+}
diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go
new file mode 100644
index 0000000..568c643
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/func_test.go
@@ -0,0 +1,484 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains some utility functions to help define Funcs for testing.
+// As an example, the following func
+//
+// b1:
+// v1 = InitMem <mem>
+// Plain -> b2
+// b2:
+// Exit v1
+// b3:
+// v2 = Const <bool> [true]
+// If v2 -> b3 b2
+//
+// can be defined as
+//
+// fun := Fun("entry",
+// Bloc("entry",
+// Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+// Goto("exit")),
+// Bloc("exit",
+// Exit("mem")),
+// Bloc("deadblock",
+// Valu("deadval", OpConstBool, c.config.Types.Bool, 0, true),
+// If("deadval", "deadblock", "exit")))
+//
+// and the Blocks or Values used in the Func can be accessed
+// like this:
+// fun.blocks["entry"] or fun.values["deadval"]
+
+package ssa
+
+// TODO(matloob): Choose better names for Fun, Bloc, Goto, etc.
+// TODO(matloob): Write a parser for the Func disassembly. Maybe
+// the parser can be used instead of Fun.
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+// Compare two Funcs for equivalence. Their CFGs must be isomorphic,
+// and their values must correspond.
+// Requires that values and predecessors are in the same order, even
+// though Funcs could be equivalent when they are not.
+// TODO(matloob): Allow values and predecessors to be in different
+// orders if the CFG are otherwise equivalent.
+func Equiv(f, g *Func) bool {
+ valcor := make(map[*Value]*Value)
+ var checkVal func(fv, gv *Value) bool
+ checkVal = func(fv, gv *Value) bool {
+ if fv == nil && gv == nil {
+ return true
+ }
+ if valcor[fv] == nil && valcor[gv] == nil {
+ valcor[fv] = gv
+ valcor[gv] = fv
+ // Ignore ids. Ops and Types are compared for equality.
+ // TODO(matloob): Make sure types are canonical and can
+ // be compared for equality.
+ if fv.Op != gv.Op || fv.Type != gv.Type || fv.AuxInt != gv.AuxInt {
+ return false
+ }
+ if !reflect.DeepEqual(fv.Aux, gv.Aux) {
+ // This makes the assumption that aux values can be compared
+ // using DeepEqual.
+ // TODO(matloob): Aux values may be *gc.Sym pointers in the near
+ // future. Make sure they are canonical.
+ return false
+ }
+ if len(fv.Args) != len(gv.Args) {
+ return false
+ }
+ for i := range fv.Args {
+ if !checkVal(fv.Args[i], gv.Args[i]) {
+ return false
+ }
+ }
+ }
+ return valcor[fv] == gv && valcor[gv] == fv
+ }
+ blkcor := make(map[*Block]*Block)
+ var checkBlk func(fb, gb *Block) bool
+ checkBlk = func(fb, gb *Block) bool {
+ if blkcor[fb] == nil && blkcor[gb] == nil {
+ blkcor[fb] = gb
+ blkcor[gb] = fb
+ // ignore ids
+ if fb.Kind != gb.Kind {
+ return false
+ }
+ if len(fb.Values) != len(gb.Values) {
+ return false
+ }
+ for i := range fb.Values {
+ if !checkVal(fb.Values[i], gb.Values[i]) {
+ return false
+ }
+ }
+ if len(fb.Succs) != len(gb.Succs) {
+ return false
+ }
+ for i := range fb.Succs {
+ if !checkBlk(fb.Succs[i].b, gb.Succs[i].b) {
+ return false
+ }
+ }
+ if len(fb.Preds) != len(gb.Preds) {
+ return false
+ }
+ for i := range fb.Preds {
+ if !checkBlk(fb.Preds[i].b, gb.Preds[i].b) {
+ return false
+ }
+ }
+ return true
+
+ }
+ return blkcor[fb] == gb && blkcor[gb] == fb
+ }
+
+ return checkBlk(f.Entry, g.Entry)
+}
+
+// fun is the return type of Fun. It contains the created func
+// itself as well as indexes from block and value names into the
+// corresponding Blocks and Values.
+type fun struct {
+ f *Func
+ blocks map[string]*Block
+ values map[string]*Value
+}
+
+var emptyPass pass = pass{
+ name: "empty pass",
+}
+
+// AuxCallLSym returns an AuxCall initialized with an LSym that should pass "check"
+// as the Aux of a static call.
+func AuxCallLSym(name string) *AuxCall {
+ return &AuxCall{Fn: &obj.LSym{}}
+}
+
+// Fun takes the name of an entry bloc and a series of Bloc calls, and
+// returns a fun containing the composed Func. entry must be a name
+// supplied to one of the Bloc functions. Each of the bloc names and
+// valu names should be unique across the Fun.
+func (c *Conf) Fun(entry string, blocs ...bloc) fun {
+ f := NewFunc(c.Frontend())
+ f.Config = c.config
+ // TODO: Either mark some SSA tests as t.Parallel,
+ // or set up a shared Cache and Reset it between tests.
+ // But not both.
+ f.Cache = new(Cache)
+ f.pass = &emptyPass
+ f.cachedLineStarts = newXposmap(map[int]lineRange{0: {0, 100}, 1: {0, 100}, 2: {0, 100}, 3: {0, 100}, 4: {0, 100}})
+
+ blocks := make(map[string]*Block)
+ values := make(map[string]*Value)
+ // Create all the blocks and values.
+ for _, bloc := range blocs {
+ b := f.NewBlock(bloc.control.kind)
+ blocks[bloc.name] = b
+ for _, valu := range bloc.valus {
+ // args are filled in the second pass.
+ values[valu.name] = b.NewValue0IA(src.NoXPos, valu.op, valu.t, valu.auxint, valu.aux)
+ }
+ }
+ // Connect the blocks together and specify control values.
+ f.Entry = blocks[entry]
+ for _, bloc := range blocs {
+ b := blocks[bloc.name]
+ c := bloc.control
+ // Specify control values.
+ if c.control != "" {
+ cval, ok := values[c.control]
+ if !ok {
+ f.Fatalf("control value for block %s missing", bloc.name)
+ }
+ b.SetControl(cval)
+ }
+ // Fill in args.
+ for _, valu := range bloc.valus {
+ v := values[valu.name]
+ for _, arg := range valu.args {
+ a, ok := values[arg]
+ if !ok {
+ b.Fatalf("arg %s missing for value %s in block %s",
+ arg, valu.name, bloc.name)
+ }
+ v.AddArg(a)
+ }
+ }
+ // Connect to successors.
+ for _, succ := range c.succs {
+ b.AddEdgeTo(blocks[succ])
+ }
+ }
+ return fun{f, blocks, values}
+}
+
+// Bloc defines a block for Fun. The bloc name should be unique
+// across the containing Fun. entries should consist of calls to valu,
+// as well as one call to Goto, If, or Exit to specify the block kind.
+func Bloc(name string, entries ...interface{}) bloc {
+ b := bloc{}
+ b.name = name
+ seenCtrl := false
+ for _, e := range entries {
+ switch v := e.(type) {
+ case ctrl:
+ // there should be exactly one Ctrl entry.
+ if seenCtrl {
+ panic(fmt.Sprintf("already seen control for block %s", name))
+ }
+ b.control = v
+ seenCtrl = true
+ case valu:
+ b.valus = append(b.valus, v)
+ }
+ }
+ if !seenCtrl {
+ panic(fmt.Sprintf("block %s doesn't have control", b.name))
+ }
+ return b
+}
+
+// Valu defines a value in a block.
+func Valu(name string, op Op, t *types.Type, auxint int64, aux interface{}, args ...string) valu {
+ return valu{name, op, t, auxint, aux, args}
+}
+
+// Goto specifies that this is a BlockPlain and names the single successor.
+// TODO(matloob): choose a better name.
+func Goto(succ string) ctrl {
+ return ctrl{BlockPlain, "", []string{succ}}
+}
+
+// If specifies a BlockIf.
+func If(cond, sub, alt string) ctrl {
+ return ctrl{BlockIf, cond, []string{sub, alt}}
+}
+
+// Exit specifies a BlockExit.
+func Exit(arg string) ctrl {
+ return ctrl{BlockExit, arg, []string{}}
+}
+
+// Eq specifies a BlockAMD64EQ.
+func Eq(cond, sub, alt string) ctrl {
+ return ctrl{BlockAMD64EQ, cond, []string{sub, alt}}
+}
+
+// bloc, ctrl, and valu are internal structures used by Bloc, Valu, Goto,
+// If, and Exit to help define blocks.
+
+type bloc struct {
+ name string
+ control ctrl
+ valus []valu
+}
+
+type ctrl struct {
+ kind BlockKind
+ control string
+ succs []string
+}
+
+type valu struct {
+ name string
+ op Op
+ t *types.Type
+ auxint int64
+ aux interface{}
+ args []string
+}
+
+func TestArgs(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("a", OpConst64, c.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, c.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, c.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+ sum := fun.values["sum"]
+ for i, name := range []string{"a", "b"} {
+ if sum.Args[i] != fun.values[name] {
+ t.Errorf("arg %d for sum is incorrect: want %s, got %s",
+ i, sum.Args[i], fun.values[name])
+ }
+ }
+}
+
+func TestEquiv(t *testing.T) {
+ cfg := testConfig(t)
+ equivalentCases := []struct{ f, g fun }{
+ // simple case
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ },
+ // block order changed
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("exit",
+ Exit("mem")),
+ Bloc("entry",
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit"))),
+ },
+ }
+ for _, c := range equivalentCases {
+ if !Equiv(c.f.f, c.g.f) {
+ t.Error("expected equivalence. Func definitions:")
+ t.Error(c.f.f)
+ t.Error(c.g.f)
+ }
+ }
+
+ differentCases := []struct{ f, g fun }{
+ // different shape
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Exit("mem"))),
+ },
+ // value order changed
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Exit("mem"))),
+ },
+ // value auxint different
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Exit("mem"))),
+ },
+ // value aux different
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 0, 14),
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 0, 26),
+ Exit("mem"))),
+ },
+ // value args different
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 0, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "b", "a"),
+ Exit("mem"))),
+ },
+ }
+ for _, c := range differentCases {
+ if Equiv(c.f.f, c.g.f) {
+ t.Error("expected difference. Func definitions:")
+ t.Error(c.f.f)
+ t.Error(c.g.f)
+ }
+ }
+}
+
+// TestConstCache ensures that the cache will not return
+// reused free'd values with a non-matching AuxInt
+func TestConstCache(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Exit("mem")))
+ v1 := f.f.ConstBool(c.config.Types.Bool, false)
+ v2 := f.f.ConstBool(c.config.Types.Bool, true)
+ f.f.freeValue(v1)
+ f.f.freeValue(v2)
+ v3 := f.f.ConstBool(c.config.Types.Bool, false)
+ v4 := f.f.ConstBool(c.config.Types.Bool, true)
+ if v3.AuxInt != 0 {
+ t.Errorf("expected %s to have auxint of 0\n", v3.LongString())
+ }
+ if v4.AuxInt != 1 {
+ t.Errorf("expected %s to have auxint of 1\n", v4.LongString())
+ }
+
+}
+
+// opcodeMap returns a map from opcode to the number of times that opcode
+// appears in the function.
+func opcodeMap(f *Func) map[Op]int {
+ m := map[Op]int{}
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ m[v.Op]++
+ }
+ }
+ return m
+}
+
+// opcodeCounts checks that the number of opcodes listed in m agree with the
+// number of opcodes that appear in the function.
+func checkOpcodeCounts(t *testing.T, f *Func, m map[Op]int) {
+ n := opcodeMap(f)
+ for op, cnt := range m {
+ if n[op] != cnt {
+ t.Errorf("%s appears %d times, want %d times", op, n[op], cnt)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go
new file mode 100644
index 0000000..c51461c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse.go
@@ -0,0 +1,243 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+)
+
+// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange).
+func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange) }
+
+// fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf).
+func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf) }
+
+type fuseType uint8
+
+const (
+ fuseTypePlain fuseType = 1 << iota
+ fuseTypeIf
+ fuseTypeIntInRange
+ fuseTypeShortCircuit
+)
+
+// fuse simplifies control flow by joining basic blocks.
+func fuse(f *Func, typ fuseType) {
+ for changed := true; changed; {
+ changed = false
+ // Fuse from end to beginning, to avoid quadratic behavior in fuseBlockPlain. See issue 13554.
+ for i := len(f.Blocks) - 1; i >= 0; i-- {
+ b := f.Blocks[i]
+ if typ&fuseTypeIf != 0 {
+ changed = fuseBlockIf(b) || changed
+ }
+ if typ&fuseTypeIntInRange != 0 {
+ changed = fuseIntegerComparisons(b) || changed
+ }
+ if typ&fuseTypePlain != 0 {
+ changed = fuseBlockPlain(b) || changed
+ }
+ if typ&fuseTypeShortCircuit != 0 {
+ changed = shortcircuitBlock(b) || changed
+ }
+ }
+ if changed {
+ f.invalidateCFG()
+ }
+ }
+}
+
+// fuseBlockIf handles the following cases where s0 and s1 are empty blocks.
+//
+// b b b b
+// / \ | \ / | | |
+// s0 s1 | s1 s0 | | |
+// \ / | / \ | | |
+// ss ss ss ss
+//
+// If all Phi ops in ss have identical variables for slots corresponding to
+// s0, s1 and b then the branch can be dropped.
+// This optimization often comes up in switch statements with multiple
+// expressions in a case clause:
+// switch n {
+// case 1,2,3: return 4
+// }
+// TODO: If ss doesn't contain any OpPhis, are s0 and s1 dead code anyway.
+func fuseBlockIf(b *Block) bool {
+ if b.Kind != BlockIf {
+ return false
+ }
+
+ var ss0, ss1 *Block
+ s0 := b.Succs[0].b
+ i0 := b.Succs[0].i
+ if s0.Kind != BlockPlain || len(s0.Preds) != 1 || !isEmpty(s0) {
+ s0, ss0 = b, s0
+ } else {
+ ss0 = s0.Succs[0].b
+ i0 = s0.Succs[0].i
+ }
+ s1 := b.Succs[1].b
+ i1 := b.Succs[1].i
+ if s1.Kind != BlockPlain || len(s1.Preds) != 1 || !isEmpty(s1) {
+ s1, ss1 = b, s1
+ } else {
+ ss1 = s1.Succs[0].b
+ i1 = s1.Succs[0].i
+ }
+
+ if ss0 != ss1 {
+ return false
+ }
+ ss := ss0
+
+ // s0 and s1 are equal with b if the corresponding block is missing
+ // (2nd, 3rd and 4th case in the figure).
+
+ for _, v := range ss.Values {
+ if v.Op == OpPhi && v.Uses > 0 && v.Args[i0] != v.Args[i1] {
+ return false
+ }
+ }
+
+ // Now we have two of following b->ss, b->s0->ss and b->s1->ss,
+ // with s0 and s1 empty if exist.
+ // We can replace it with b->ss without if all OpPhis in ss
+ // have identical predecessors (verified above).
+ // No critical edge is introduced because b will have one successor.
+ if s0 != b && s1 != b {
+ // Replace edge b->s0->ss with b->ss.
+ // We need to keep a slot for Phis corresponding to b.
+ b.Succs[0] = Edge{ss, i0}
+ ss.Preds[i0] = Edge{b, 0}
+ b.removeEdge(1)
+ s1.removeEdge(0)
+ } else if s0 != b {
+ b.removeEdge(0)
+ s0.removeEdge(0)
+ } else if s1 != b {
+ b.removeEdge(1)
+ s1.removeEdge(0)
+ } else {
+ b.removeEdge(1)
+ }
+ b.Kind = BlockPlain
+ b.Likely = BranchUnknown
+ b.ResetControls()
+
+ // Trash the empty blocks s0 and s1.
+ blocks := [...]*Block{s0, s1}
+ for _, s := range &blocks {
+ if s == b {
+ continue
+ }
+ // Move any (dead) values in s0 or s1 to b,
+ // where they will be eliminated by the next deadcode pass.
+ for _, v := range s.Values {
+ v.Block = b
+ }
+ b.Values = append(b.Values, s.Values...)
+ // Clear s.
+ s.Kind = BlockInvalid
+ s.Values = nil
+ s.Succs = nil
+ s.Preds = nil
+ }
+ return true
+}
+
+// isEmpty reports whether b contains any live values.
+// There may be false positives.
+func isEmpty(b *Block) bool {
+ for _, v := range b.Values {
+ if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() {
+ return false
+ }
+ }
+ return true
+}
+
+func fuseBlockPlain(b *Block) bool {
+ if b.Kind != BlockPlain {
+ return false
+ }
+
+ c := b.Succs[0].b
+ if len(c.Preds) != 1 {
+ return false
+ }
+
+ // If a block happened to end in a statement marker,
+ // try to preserve it.
+ if b.Pos.IsStmt() == src.PosIsStmt {
+ l := b.Pos.Line()
+ for _, v := range c.Values {
+ if v.Pos.IsStmt() == src.PosNotStmt {
+ continue
+ }
+ if l == v.Pos.Line() {
+ v.Pos = v.Pos.WithIsStmt()
+ l = 0
+ break
+ }
+ }
+ if l != 0 && c.Pos.Line() == l {
+ c.Pos = c.Pos.WithIsStmt()
+ }
+ }
+
+ // move all of b's values to c.
+ for _, v := range b.Values {
+ v.Block = c
+ }
+ // Use whichever value slice is larger, in the hopes of avoiding growth.
+ // However, take care to avoid c.Values pointing to b.valstorage.
+ // See golang.org/issue/18602.
+ // It's important to keep the elements in the same order; maintenance of
+ // debugging information depends on the order of *Values in Blocks.
+ // This can also cause changes in the order (which may affect other
+ // optimizations and possibly compiler output) for 32-vs-64 bit compilation
+ // platforms (word size affects allocation bucket size affects slice capacity).
+ if cap(c.Values) >= cap(b.Values) || len(b.Values) <= len(b.valstorage) {
+ bl := len(b.Values)
+ cl := len(c.Values)
+ var t []*Value // construct t = b.Values followed-by c.Values, but with attention to allocation.
+ if cap(c.Values) < bl+cl {
+ // reallocate
+ t = make([]*Value, bl+cl)
+ } else {
+ // in place.
+ t = c.Values[0 : bl+cl]
+ }
+ copy(t[bl:], c.Values) // possibly in-place
+ c.Values = t
+ copy(c.Values, b.Values)
+ } else {
+ c.Values = append(b.Values, c.Values...)
+ }
+
+ // replace b->c edge with preds(b) -> c
+ c.predstorage[0] = Edge{}
+ if len(b.Preds) > len(b.predstorage) {
+ c.Preds = b.Preds
+ } else {
+ c.Preds = append(c.predstorage[:0], b.Preds...)
+ }
+ for i, e := range c.Preds {
+ p := e.b
+ p.Succs[e.i] = Edge{c, i}
+ }
+ f := b.Func
+ if f.Entry == b {
+ f.Entry = c
+ }
+
+ // trash b, just in case
+ b.Kind = BlockInvalid
+ b.Values = nil
+ b.Preds = nil
+ b.Succs = nil
+ return true
+}
diff --git a/src/cmd/compile/internal/ssa/fuse_comparisons.go b/src/cmd/compile/internal/ssa/fuse_comparisons.go
new file mode 100644
index 0000000..d843fc3
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse_comparisons.go
@@ -0,0 +1,157 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// fuseIntegerComparisons optimizes inequalities such as '1 <= x && x < 5',
+// which can be optimized to 'unsigned(x-1) < 4'.
+//
+// Look for branch structure like:
+//
+// p
+// |\
+// | b
+// |/ \
+// s0 s1
+//
+// In our example, p has control '1 <= x', b has control 'x < 5',
+// and s0 and s1 are the if and else results of the comparison.
+//
+// This will be optimized into:
+//
+// p
+// \
+// b
+// / \
+// s0 s1
+//
+// where b has the combined control value 'unsigned(x-1) < 4'.
+// Later passes will then fuse p and b.
+func fuseIntegerComparisons(b *Block) bool {
+ if len(b.Preds) != 1 {
+ return false
+ }
+ p := b.Preds[0].Block()
+ if b.Kind != BlockIf || p.Kind != BlockIf {
+ return false
+ }
+
+ // Don't merge control values if b is likely to be bypassed anyway.
+ if p.Likely == BranchLikely && p.Succs[0].Block() != b {
+ return false
+ }
+ if p.Likely == BranchUnlikely && p.Succs[1].Block() != b {
+ return false
+ }
+
+ // Check if the control values combine to make an integer inequality that
+ // can be further optimized later.
+ bc := b.Controls[0]
+ pc := p.Controls[0]
+ if !areMergeableInequalities(bc, pc) {
+ return false
+ }
+
+ // If the first (true) successors match then we have a disjunction (||).
+ // If the second (false) successors match then we have a conjunction (&&).
+ for i, op := range [2]Op{OpOrB, OpAndB} {
+ if p.Succs[i].Block() != b.Succs[i].Block() {
+ continue
+ }
+
+ // TODO(mundaym): should we also check the cost of executing b?
+ // Currently we might speculatively execute b even if b contains
+ // a lot of instructions. We could just check that len(b.Values)
+ // is lower than a fixed amount. Bear in mind however that the
+ // other optimization passes might yet reduce the cost of b
+ // significantly so we shouldn't be overly conservative.
+ if !canSpeculativelyExecute(b) {
+ return false
+ }
+
+ // Logically combine the control values for p and b.
+ v := b.NewValue0(bc.Pos, op, bc.Type)
+ v.AddArg(pc)
+ v.AddArg(bc)
+
+ // Set the combined control value as the control value for b.
+ b.SetControl(v)
+
+ // Modify p so that it jumps directly to b.
+ p.removeEdge(i)
+ p.Kind = BlockPlain
+ p.Likely = BranchUnknown
+ p.ResetControls()
+
+ return true
+ }
+
+ // TODO: could negate condition(s) to merge controls.
+ return false
+}
+
+// getConstIntArgIndex returns the index of the first argument that is a
+// constant integer or -1 if no such argument exists.
+func getConstIntArgIndex(v *Value) int {
+ for i, a := range v.Args {
+ switch a.Op {
+ case OpConst8, OpConst16, OpConst32, OpConst64:
+ return i
+ }
+ }
+ return -1
+}
+
+// isSignedInequality reports whether op represents the inequality < or ≤
+// in the signed domain.
+func isSignedInequality(v *Value) bool {
+ switch v.Op {
+ case OpLess64, OpLess32, OpLess16, OpLess8,
+ OpLeq64, OpLeq32, OpLeq16, OpLeq8:
+ return true
+ }
+ return false
+}
+
+// isUnsignedInequality reports whether op represents the inequality < or ≤
+// in the unsigned domain.
+func isUnsignedInequality(v *Value) bool {
+ switch v.Op {
+ case OpLess64U, OpLess32U, OpLess16U, OpLess8U,
+ OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U:
+ return true
+ }
+ return false
+}
+
+func areMergeableInequalities(x, y *Value) bool {
+ // We need both inequalities to be either in the signed or unsigned domain.
+ // TODO(mundaym): it would also be good to merge when we have an Eq op that
+ // could be transformed into a Less/Leq. For example in the unsigned
+ // domain 'x == 0 || 3 < x' is equivalent to 'x <= 0 || 3 < x'
+ inequalityChecks := [...]func(*Value) bool{
+ isSignedInequality,
+ isUnsignedInequality,
+ }
+ for _, f := range inequalityChecks {
+ if !f(x) || !f(y) {
+ continue
+ }
+
+ // Check that both inequalities are comparisons with constants.
+ xi := getConstIntArgIndex(x)
+ if xi < 0 {
+ return false
+ }
+ yi := getConstIntArgIndex(y)
+ if yi < 0 {
+ return false
+ }
+
+ // Check that the non-constant arguments to the inequalities
+ // are the same.
+ return x.Args[xi^1] == y.Args[yi^1]
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go
new file mode 100644
index 0000000..1519099
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse_test.go
@@ -0,0 +1,203 @@
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+ "strconv"
+ "testing"
+)
+
+func TestFuseEliminatesOneBranch(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "then", "exit")),
+ Bloc("then",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] && b.Kind != BlockInvalid {
+ t.Errorf("then was not eliminated, but should have")
+ }
+ }
+}
+
+func TestFuseEliminatesBothBranches(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "then", "else")),
+ Bloc("then",
+ Goto("exit")),
+ Bloc("else",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] && b.Kind != BlockInvalid {
+ t.Errorf("then was not eliminated, but should have")
+ }
+ if b == fun.blocks["else"] && b.Kind != BlockInvalid {
+ t.Errorf("else was not eliminated, but should have")
+ }
+ }
+}
+
+func TestFuseHandlesPhis(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "then", "else")),
+ Bloc("then",
+ Goto("exit")),
+ Bloc("else",
+ Goto("exit")),
+ Bloc("exit",
+ Valu("phi", OpPhi, ptrType, 0, nil, "ptr1", "ptr1"),
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] && b.Kind != BlockInvalid {
+ t.Errorf("then was not eliminated, but should have")
+ }
+ if b == fun.blocks["else"] && b.Kind != BlockInvalid {
+ t.Errorf("else was not eliminated, but should have")
+ }
+ }
+}
+
+func TestFuseEliminatesEmptyBlocks(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("z0")),
+ Bloc("z1",
+ Goto("z2")),
+ Bloc("z3",
+ Goto("exit")),
+ Bloc("z2",
+ Goto("z3")),
+ Bloc("z0",
+ Goto("z1")),
+ Bloc("exit",
+ Exit("mem"),
+ ))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for k, b := range fun.blocks {
+ if k[:1] == "z" && b.Kind != BlockInvalid {
+ t.Errorf("%s was not eliminated, but should have", k)
+ }
+ }
+}
+
+func TestFuseSideEffects(t *testing.T) {
+ // Test that we don't fuse branches that have side effects but
+ // have no use (e.g. followed by infinite loop).
+ // See issue #36005.
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("b", OpArg, c.config.Types.Bool, 0, nil),
+ If("b", "then", "else")),
+ Bloc("then",
+ Valu("call1", OpStaticCall, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Goto("empty")),
+ Bloc("else",
+ Valu("call2", OpStaticCall, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Goto("empty")),
+ Bloc("empty",
+ Goto("loop")),
+ Bloc("loop",
+ Goto("loop")))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] && b.Kind == BlockInvalid {
+ t.Errorf("then is eliminated, but should not")
+ }
+ if b == fun.blocks["else"] && b.Kind == BlockInvalid {
+ t.Errorf("else is eliminated, but should not")
+ }
+ }
+}
+
+func BenchmarkFuse(b *testing.B) {
+ for _, n := range [...]int{1, 10, 100, 1000, 10000} {
+ b.Run(strconv.Itoa(n), func(b *testing.B) {
+ c := testConfig(b)
+
+ blocks := make([]bloc, 0, 2*n+3)
+ blocks = append(blocks,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("cond", OpArg, c.config.Types.Bool, 0, nil),
+ Valu("x", OpArg, c.config.Types.Int64, 0, nil),
+ Goto("exit")))
+
+ phiArgs := make([]string, 0, 2*n)
+ for i := 0; i < n; i++ {
+ cname := fmt.Sprintf("c%d", i)
+ blocks = append(blocks,
+ Bloc(fmt.Sprintf("b%d", i), If("cond", cname, "merge")),
+ Bloc(cname, Goto("merge")))
+ phiArgs = append(phiArgs, "x", "x")
+ }
+ blocks = append(blocks,
+ Bloc("merge",
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, phiArgs...),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ fun := c.Fun("entry", blocks...)
+ fuseLate(fun.f)
+ }
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
new file mode 100644
index 0000000..fbc12fd
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -0,0 +1,1111 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(Ptr|32|16|8) ...) => (ADDL ...)
+(Add(32|64)F ...) => (ADDS(S|D) ...)
+(Add32carry ...) => (ADDLcarry ...)
+(Add32withcarry ...) => (ADCL ...)
+
+(Sub(Ptr|32|16|8) ...) => (SUBL ...)
+(Sub(32|64)F ...) => (SUBS(S|D) ...)
+(Sub32carry ...) => (SUBLcarry ...)
+(Sub32withcarry ...) => (SBBL ...)
+
+(Mul(32|16|8) ...) => (MULL ...)
+(Mul(32|64)F ...) => (MULS(S|D) ...)
+(Mul32uhilo ...) => (MULLQU ...)
+
+(Select0 (Mul32uover x y)) => (Select0 <typ.UInt32> (MULLU x y))
+(Select1 (Mul32uover x y)) => (SETO (Select1 <types.TypeFlags> (MULLU x y)))
+
+(Avg32u ...) => (AVGLU ...)
+
+(Div(32|64)F ...) => (DIVS(S|D) ...)
+(Div(32|32u|16|16u) ...) => (DIV(L|LU|W|WU) ...)
+(Div8 x y) => (DIVW (SignExt8to16 x) (SignExt8to16 y))
+(Div8u x y) => (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+
+(Hmul(32|32u) ...) => (HMUL(L|LU) ...)
+
+(Mod(32|32u|16|16u) ...) => (MOD(L|LU|W|WU) ...)
+(Mod8 x y) => (MODW (SignExt8to16 x) (SignExt8to16 y))
+(Mod8u x y) => (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+
+(And(32|16|8) ...) => (ANDL ...)
+(Or(32|16|8) ...) => (ORL ...)
+(Xor(32|16|8) ...) => (XORL ...)
+
+(Neg(32|16|8) ...) => (NEGL ...)
+(Neg32F x) => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
+(Neg64F x) => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
+
+(Com(32|16|8) ...) => (NOTL ...)
+
+// Lowering boolean ops
+(AndB ...) => (ANDL ...)
+(OrB ...) => (ORL ...)
+(Not x) => (XORLconst [1] x)
+
+// Lowering pointer arithmetic
+(OffPtr [off] ptr) => (ADDLconst [int32(off)] ptr)
+
+(Bswap32 ...) => (BSWAPL ...)
+
+(Sqrt ...) => (SQRTSD ...)
+
+(Ctz16 x) => (BSFL (ORLconst <typ.UInt32> [0x10000] x))
+(Ctz16NonZero ...) => (BSFL ...)
+
+// Lowering extension
+(SignExt8to16 ...) => (MOVBLSX ...)
+(SignExt8to32 ...) => (MOVBLSX ...)
+(SignExt16to32 ...) => (MOVWLSX ...)
+
+(ZeroExt8to16 ...) => (MOVBLZX ...)
+(ZeroExt8to32 ...) => (MOVBLZX ...)
+(ZeroExt16to32 ...) => (MOVWLZX ...)
+
+(Signmask x) => (SARLconst x [31])
+(Zeromask <t> x) => (XORLconst [-1] (SBBLcarrymask <t> (CMPLconst x [1])))
+(Slicemask <t> x) => (SARLconst (NEGL <t> x) [31])
+
+// Lowering truncation
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+
+// Lowering float-int conversions
+(Cvt32to32F ...) => (CVTSL2SS ...)
+(Cvt32to64F ...) => (CVTSL2SD ...)
+
+(Cvt32Fto32 ...) => (CVTTSS2SL ...)
+(Cvt64Fto32 ...) => (CVTTSD2SL ...)
+
+(Cvt32Fto64F ...) => (CVTSS2SD ...)
+(Cvt64Fto32F ...) => (CVTSD2SS ...)
+
+(Round32F ...) => (Copy ...)
+(Round64F ...) => (Copy ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+// Lowering shifts
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
+(Lsh32x(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [32])))
+(Lsh16x(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [32])))
+(Lsh8x(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [32])))
+
+(Lsh32x(32|16|8) <t> x y) && shiftIsBounded(v) => (SHLL <t> x y)
+(Lsh16x(32|16|8) <t> x y) && shiftIsBounded(v) => (SHLL <t> x y)
+(Lsh8x(32|16|8) <t> x y) && shiftIsBounded(v) => (SHLL <t> x y)
+
+(Rsh32Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [32])))
+(Rsh16Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [16])))
+(Rsh8Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [8])))
+
+(Rsh32Ux(32|16|8) <t> x y) && shiftIsBounded(v) => (SHRL <t> x y)
+(Rsh16Ux(32|16|8) <t> x y) && shiftIsBounded(v) => (SHRW <t> x y)
+(Rsh8Ux(32|16|8) <t> x y) && shiftIsBounded(v) => (SHRB <t> x y)
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
+
+(Rsh32x(32|16|8) <t> x y) && !shiftIsBounded(v) => (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMP(L|W|B)const y [32])))))
+(Rsh16x(32|16|8) <t> x y) && !shiftIsBounded(v) => (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMP(L|W|B)const y [16])))))
+(Rsh8x(32|16|8) <t> x y) && !shiftIsBounded(v) => (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMP(L|W|B)const y [8])))))
+
+(Rsh32x(32|16|8) <t> x y) && shiftIsBounded(v) => (SARL x y)
+(Rsh16x(32|16|8) <t> x y) && shiftIsBounded(v) => (SARW x y)
+(Rsh8x(32|16|8) <t> x y) && shiftIsBounded(v) => (SARB x y)
+
+// constant shifts
+// generic opt rewrites all constant shifts to shift by Const64
+(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SHLLconst x [int32(c)])
+(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SARLconst x [int32(c)])
+(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 => (SHRLconst x [int32(c)])
+(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SHLLconst x [int32(c)])
+(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SARWconst x [int16(c)])
+(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 => (SHRWconst x [int16(c)])
+(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SHLLconst x [int32(c)])
+(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SARBconst x [int8(c)])
+(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 => (SHRBconst x [int8(c)])
+
+// large constant shifts
+(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 => (SARLconst x [31])
+(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 => (SARWconst x [15])
+(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SARBconst x [7])
+
+// constant rotates
+(RotateLeft32 x (MOVLconst [c])) => (ROLLconst [c&31] x)
+(RotateLeft16 x (MOVLconst [c])) => (ROLWconst [int16(c&15)] x)
+(RotateLeft8 x (MOVLconst [c])) => (ROLBconst [int8(c&7)] x)
+
+// Lowering comparisons
+(Less32 x y) => (SETL (CMPL x y))
+(Less16 x y) => (SETL (CMPW x y))
+(Less8 x y) => (SETL (CMPB x y))
+(Less32U x y) => (SETB (CMPL x y))
+(Less16U x y) => (SETB (CMPW x y))
+(Less8U x y) => (SETB (CMPB x y))
+// Use SETGF with reversed operands to dodge NaN case
+(Less64F x y) => (SETGF (UCOMISD y x))
+(Less32F x y) => (SETGF (UCOMISS y x))
+
+(Leq32 x y) => (SETLE (CMPL x y))
+(Leq16 x y) => (SETLE (CMPW x y))
+(Leq8 x y) => (SETLE (CMPB x y))
+(Leq32U x y) => (SETBE (CMPL x y))
+(Leq16U x y) => (SETBE (CMPW x y))
+(Leq8U x y) => (SETBE (CMPB x y))
+// Use SETGEF with reversed operands to dodge NaN case
+(Leq64F x y) => (SETGEF (UCOMISD y x))
+(Leq32F x y) => (SETGEF (UCOMISS y x))
+
+(Eq32 x y) => (SETEQ (CMPL x y))
+(Eq16 x y) => (SETEQ (CMPW x y))
+(Eq8 x y) => (SETEQ (CMPB x y))
+(EqB x y) => (SETEQ (CMPB x y))
+(EqPtr x y) => (SETEQ (CMPL x y))
+(Eq64F x y) => (SETEQF (UCOMISD x y))
+(Eq32F x y) => (SETEQF (UCOMISS x y))
+
+(Neq32 x y) => (SETNE (CMPL x y))
+(Neq16 x y) => (SETNE (CMPW x y))
+(Neq8 x y) => (SETNE (CMPB x y))
+(NeqB x y) => (SETNE (CMPB x y))
+(NeqPtr x y) => (SETNE (CMPL x y))
+(Neq64F x y) => (SETNEF (UCOMISD x y))
+(Neq32F x y) => (SETNEF (UCOMISS x y))
+
+// Lowering loads
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVLload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem)
+
+// Lowering stores
+// These more-specific FP versions of Store pattern should come first.
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVSDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVSSstore ptr val mem)
+
+(Store {t} ptr val mem) && t.Size() == 4 => (MOVLstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+
+// Lowering moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem)
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVLstore [3] dst (MOVLload [3] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [8] dst src mem) =>
+ (MOVLstore [4] dst (MOVLload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+
+// Adjust moves to be a multiple of 4 bytes.
+(Move [s] dst src mem)
+ && s > 8 && s%4 != 0 =>
+ (Move [s-s%4]
+ (ADDLconst <dst.Type> dst [int32(s%4)])
+ (ADDLconst <src.Type> src [int32(s%4)])
+ (MOVLstore dst (MOVLload src mem) mem))
+
+// Medium copying uses a duff device.
+(Move [s] dst src mem)
+ && s > 8 && s <= 4*128 && s%4 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [10*(128-s/4)] dst src mem)
+// 10 and 128 are magic constants. 10 is the number of bytes to encode:
+// MOVL (SI), CX
+// ADDL $4, SI
+// MOVL CX, (DI)
+// ADDL $4, DI
+// and 128 is the number of such blocks. See src/runtime/duff_386.s:duffcopy.
+
+// Large copying uses REP MOVSL.
+(Move [s] dst src mem) && (s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s) =>
+ (REPMOVSL dst src (MOVLconst [int32(s/4)]) mem)
+
+// Lowering Zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstoreconst [0] destptr mem)
+(Zero [2] destptr mem) => (MOVWstoreconst [0] destptr mem)
+(Zero [4] destptr mem) => (MOVLstoreconst [0] destptr mem)
+
+(Zero [3] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff32(0,2)] destptr
+ (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem))
+(Zero [5] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff32(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+(Zero [6] destptr mem) =>
+ (MOVWstoreconst [makeValAndOff32(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+(Zero [7] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff32(0,3)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+
+// Strip off any fractional word zeroing.
+(Zero [s] destptr mem) && s%4 != 0 && s > 4 =>
+ (Zero [s-s%4] (ADDLconst destptr [int32(s%4)])
+ (MOVLstoreconst [0] destptr mem))
+
+// Zero small numbers of words directly.
+(Zero [8] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff32(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+(Zero [12] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff32(0,8)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)))
+(Zero [16] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff32(0,12)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,8)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))))
+
+// Medium zeroing uses a duff device.
+(Zero [s] destptr mem)
+ && s > 16 && s <= 4*128 && s%4 == 0
+ && !config.noDuffDevice =>
+ (DUFFZERO [1*(128-s/4)] destptr (MOVLconst [0]) mem)
+// 1 and 128 are magic constants. 1 is the number of bytes to encode STOSL.
+// 128 is the number of STOSL instructions in duffzero.
+// See src/runtime/duff_386.s:duffzero.
+
+// Large zeroing uses REP STOSQ.
+(Zero [s] destptr mem)
+ && (s > 4*128 || (config.noDuffDevice && s > 16))
+ && s%4 == 0 =>
+ (REPSTOSL destptr (MOVLconst [int32(s/4)]) (MOVLconst [0]) mem)
+
+
+// Lowering constants
+(Const8 [c]) => (MOVLconst [int32(c)])
+(Const16 [c]) => (MOVLconst [int32(c)])
+(Const32 ...) => (MOVLconst ...)
+(Const(32|64)F ...) => (MOVS(S|D)const ...)
+(ConstNil) => (MOVLconst [0])
+(ConstBool [c]) => (MOVLconst [b2i32(c)])
+
+// Lowering calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+
+// Miscellaneous
+(IsNonNil p) => (SETNE (TESTL p p))
+(IsInBounds idx len) => (SETB (CMPL idx len))
+(IsSliceInBounds idx len) => (SETBE (CMPL idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetG ...) => (LoweredGetG ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(Addr {sym} base) => (LEAL {sym} base)
+(LocalAddr {sym} base _) => (LEAL {sym} base)
+
+// block rewrites
+(If (SETL cmp) yes no) => (LT cmp yes no)
+(If (SETLE cmp) yes no) => (LE cmp yes no)
+(If (SETG cmp) yes no) => (GT cmp yes no)
+(If (SETGE cmp) yes no) => (GE cmp yes no)
+(If (SETEQ cmp) yes no) => (EQ cmp yes no)
+(If (SETNE cmp) yes no) => (NE cmp yes no)
+(If (SETB cmp) yes no) => (ULT cmp yes no)
+(If (SETBE cmp) yes no) => (ULE cmp yes no)
+(If (SETA cmp) yes no) => (UGT cmp yes no)
+(If (SETAE cmp) yes no) => (UGE cmp yes no)
+(If (SETO cmp) yes no) => (OS cmp yes no)
+
+// Special case for floating point - LF/LEF not generated
+(If (SETGF cmp) yes no) => (UGT cmp yes no)
+(If (SETGEF cmp) yes no) => (UGE cmp yes no)
+(If (SETEQF cmp) yes no) => (EQF cmp yes no)
+(If (SETNEF cmp) yes no) => (NEF cmp yes no)
+
+(If cond yes no) => (NE (TESTB cond cond) yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
+
+// ***************************
+// Above: lowering rules
+// Below: optimizations
+// ***************************
+// TODO: Should the optimizations be a separate pass?
+
+// Fold boolean tests into blocks
+(NE (TESTB (SETL cmp) (SETL cmp)) yes no) => (LT cmp yes no)
+(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE cmp yes no)
+(NE (TESTB (SETG cmp) (SETG cmp)) yes no) => (GT cmp yes no)
+(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE cmp yes no)
+(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ cmp yes no)
+(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE cmp yes no)
+(NE (TESTB (SETB cmp) (SETB cmp)) yes no) => (ULT cmp yes no)
+(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no)
+(NE (TESTB (SETA cmp) (SETA cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no)
+
+// Special case for floating point - LF/LEF not generated
+(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF cmp yes no)
+(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no)
+
+// fold constants into instructions
+(ADDL x (MOVLconst [c])) => (ADDLconst [c] x)
+(ADDLcarry x (MOVLconst [c])) => (ADDLconstcarry [c] x)
+(ADCL x (MOVLconst [c]) f) => (ADCLconst [c] x f)
+
+(SUBL x (MOVLconst [c])) => (SUBLconst x [c])
+(SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst <v.Type> x [c]))
+(SUBLcarry x (MOVLconst [c])) => (SUBLconstcarry [c] x)
+(SBBL x (MOVLconst [c]) f) => (SBBLconst [c] x f)
+
+(MULL x (MOVLconst [c])) => (MULLconst [c] x)
+(ANDL x (MOVLconst [c])) => (ANDLconst [c] x)
+
+(ANDLconst [c] (ANDLconst [d] x)) => (ANDLconst [c & d] x)
+(XORLconst [c] (XORLconst [d] x)) => (XORLconst [c ^ d] x)
+(MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x)
+
+(ORL x (MOVLconst [c])) => (ORLconst [c] x)
+(XORL x (MOVLconst [c])) => (XORLconst [c] x)
+
+(SHLL x (MOVLconst [c])) => (SHLLconst [c&31] x)
+(SHRL x (MOVLconst [c])) => (SHRLconst [c&31] x)
+(SHRW x (MOVLconst [c])) && c&31 < 16 => (SHRWconst [int16(c&31)] x)
+(SHRW _ (MOVLconst [c])) && c&31 >= 16 => (MOVLconst [0])
+(SHRB x (MOVLconst [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x)
+(SHRB _ (MOVLconst [c])) && c&31 >= 8 => (MOVLconst [0])
+
+(SARL x (MOVLconst [c])) => (SARLconst [c&31] x)
+(SARW x (MOVLconst [c])) => (SARWconst [int16(min(int64(c&31),15))] x)
+(SARB x (MOVLconst [c])) => (SARBconst [int8(min(int64(c&31),7))] x)
+
+(SARL x (ANDLconst [31] y)) => (SARL x y)
+(SHLL x (ANDLconst [31] y)) => (SHLL x y)
+(SHRL x (ANDLconst [31] y)) => (SHRL x y)
+
+// Rotate instructions
+
+(ADDL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x)
+( ORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x)
+(XORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x)
+
+(ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2
+ => (ROLWconst x [int16(c)])
+( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2
+ => (ROLWconst x [int16(c)])
+(XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2
+ => (ROLWconst x [int16(c)])
+
+(ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1
+ => (ROLBconst x [int8(c)])
+( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1
+ => (ROLBconst x [int8(c)])
+(XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1
+ => (ROLBconst x [int8(c)])
+
+(ROLLconst [c] (ROLLconst [d] x)) => (ROLLconst [(c+d)&31] x)
+(ROLWconst [c] (ROLWconst [d] x)) => (ROLWconst [(c+d)&15] x)
+(ROLBconst [c] (ROLBconst [d] x)) => (ROLBconst [(c+d)& 7] x)
+
+
+// Constant shift simplifications
+
+(SHLLconst x [0]) => x
+(SHRLconst x [0]) => x
+(SARLconst x [0]) => x
+
+(SHRWconst x [0]) => x
+(SARWconst x [0]) => x
+
+(SHRBconst x [0]) => x
+(SARBconst x [0]) => x
+
+(ROLLconst [0] x) => x
+(ROLWconst [0] x) => x
+(ROLBconst [0] x) => x
+
+// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
+// because the x86 instructions are defined to use all 5 bits of the shift even
+// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
+// (SHRW x (MOVLconst [24])), but just in case.
+
+(CMPL x (MOVLconst [c])) => (CMPLconst x [c])
+(CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c]))
+(CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)])
+(CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)]))
+(CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)])
+(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+(CMP(L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(L|W|B) y x))
+
+// strength reduction
+// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
+// 1 - addl, shll, leal, negl, subl
+// 3 - imull
+// This limits the rewrites to two instructions.
+// Note that negl always operates in-place,
+// which can require a register-register move
+// to preserve the original value,
+// so it must be used with care.
+(MULLconst [-9] x) => (NEGL (LEAL8 <v.Type> x x))
+(MULLconst [-5] x) => (NEGL (LEAL4 <v.Type> x x))
+(MULLconst [-3] x) => (NEGL (LEAL2 <v.Type> x x))
+(MULLconst [-1] x) => (NEGL x)
+(MULLconst [0] _) => (MOVLconst [0])
+(MULLconst [1] x) => x
+(MULLconst [3] x) => (LEAL2 x x)
+(MULLconst [5] x) => (LEAL4 x x)
+(MULLconst [7] x) => (LEAL2 x (LEAL2 <v.Type> x x))
+(MULLconst [9] x) => (LEAL8 x x)
+(MULLconst [11] x) => (LEAL2 x (LEAL4 <v.Type> x x))
+(MULLconst [13] x) => (LEAL4 x (LEAL2 <v.Type> x x))
+(MULLconst [19] x) => (LEAL2 x (LEAL8 <v.Type> x x))
+(MULLconst [21] x) => (LEAL4 x (LEAL4 <v.Type> x x))
+(MULLconst [25] x) => (LEAL8 x (LEAL2 <v.Type> x x))
+(MULLconst [27] x) => (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
+(MULLconst [37] x) => (LEAL4 x (LEAL8 <v.Type> x x))
+(MULLconst [41] x) => (LEAL8 x (LEAL4 <v.Type> x x))
+(MULLconst [45] x) => (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
+(MULLconst [73] x) => (LEAL8 x (LEAL8 <v.Type> x x))
+(MULLconst [81] x) => (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
+
+(MULLconst [c] x) && isPowerOfTwo32(c+1) && c >= 15 => (SUBL (SHLLconst <v.Type> [int32(log32(c+1))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEAL1 (SHLLconst <v.Type> [int32(log32(c-1))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEAL2 (SHLLconst <v.Type> [int32(log32(c-2))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEAL4 (SHLLconst <v.Type> [int32(log32(c-4))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-8) && c >= 136 => (LEAL8 (SHLLconst <v.Type> [int32(log32(c-8))] x) x)
+(MULLconst [c] x) && c%3 == 0 && isPowerOfTwo32(c/3) => (SHLLconst [int32(log32(c/3))] (LEAL2 <v.Type> x x))
+(MULLconst [c] x) && c%5 == 0 && isPowerOfTwo32(c/5) => (SHLLconst [int32(log32(c/5))] (LEAL4 <v.Type> x x))
+(MULLconst [c] x) && c%9 == 0 && isPowerOfTwo32(c/9) => (SHLLconst [int32(log32(c/9))] (LEAL8 <v.Type> x x))
+
+// combine add/shift into LEAL
+(ADDL x (SHLLconst [3] y)) => (LEAL8 x y)
+(ADDL x (SHLLconst [2] y)) => (LEAL4 x y)
+(ADDL x (SHLLconst [1] y)) => (LEAL2 x y)
+(ADDL x (ADDL y y)) => (LEAL2 x y)
+(ADDL x (ADDL x y)) => (LEAL2 y x)
+
+// combine ADDL/ADDLconst into LEAL1
+(ADDLconst [c] (ADDL x y)) => (LEAL1 [c] x y)
+(ADDL (ADDLconst [c] x) y) => (LEAL1 [c] x y)
+
+// fold ADDL into LEAL
+(ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEAL [c+d] {s} x)
+(LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(int64(c)+int64(d)) => (LEAL [c+d] {s} x)
+(ADDLconst [c] x:(SP)) => (LEAL [c] x) // so it is rematerializeable
+(LEAL [c] {s} (ADDL x y)) && x.Op != OpSB && y.Op != OpSB => (LEAL1 [c] {s} x y)
+(ADDL x (LEAL [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEAL1 [c] {s} x y)
+
+// fold ADDLconst into LEALx
+(ADDLconst [c] (LEAL1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL1 [c+d] {s} x y)
+(ADDLconst [c] (LEAL2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL2 [c+d] {s} x y)
+(ADDLconst [c] (LEAL4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL4 [c+d] {s} x y)
+(ADDLconst [c] (LEAL8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL8 [c+d] {s} x y)
+(LEAL1 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL1 [c+d] {s} x y)
+(LEAL2 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL2 [c+d] {s} x y)
+(LEAL2 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEAL2 [c+2*d] {s} x y)
+(LEAL4 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL4 [c+d] {s} x y)
+(LEAL4 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEAL4 [c+4*d] {s} x y)
+(LEAL8 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL8 [c+d] {s} x y)
+(LEAL8 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEAL8 [c+8*d] {s} x y)
+
+// fold shifts into LEALx
+(LEAL1 [c] {s} x (SHLLconst [1] y)) => (LEAL2 [c] {s} x y)
+(LEAL1 [c] {s} x (SHLLconst [2] y)) => (LEAL4 [c] {s} x y)
+(LEAL1 [c] {s} x (SHLLconst [3] y)) => (LEAL8 [c] {s} x y)
+(LEAL2 [c] {s} x (SHLLconst [1] y)) => (LEAL4 [c] {s} x y)
+(LEAL2 [c] {s} x (SHLLconst [2] y)) => (LEAL8 [c] {s} x y)
+(LEAL4 [c] {s} x (SHLLconst [1] y)) => (LEAL8 [c] {s} x y)
+
+// reverse ordering of compare instruction
+(SETL (InvertFlags x)) => (SETG x)
+(SETG (InvertFlags x)) => (SETL x)
+(SETB (InvertFlags x)) => (SETA x)
+(SETA (InvertFlags x)) => (SETB x)
+(SETLE (InvertFlags x)) => (SETGE x)
+(SETGE (InvertFlags x)) => (SETLE x)
+(SETBE (InvertFlags x)) => (SETAE x)
+(SETAE (InvertFlags x)) => (SETBE x)
+(SETEQ (InvertFlags x)) => (SETEQ x)
+(SETNE (InvertFlags x)) => (SETNE x)
+
+// sign extended loads
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOVBLSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
+(MOVBLZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVWLSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
+(MOVWLZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBLZX x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWLZX x)
+(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBLSX x)
+(MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWLSX x)
+
+// Fold extensions and ANDs together.
+(MOVBLZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x)
+(MOVWLZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x)
+(MOVBLSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x)
+(MOVWLSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x)
+
+// Don't extend before storing
+(MOVWstore [off] {sym} ptr (MOVWL(S|Z)X x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBL(S|Z)X x) mem) => (MOVBstore [off] {sym} ptr x mem)
+
+// fold constants into memory operations
+// Note that this is not always a good idea because if not all the uses of
+// the ADDLconst get eliminated, we still have to compute the ADDLconst and we now
+// have potentially two live values (ptr and (ADDLconst [off] ptr)) instead of one.
+// Nevertheless, let's do it!
+(MOV(L|W|B|SS|SD)load [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(L|W|B|SS|SD)load [off1+off2] {sym} ptr mem)
+(MOV(L|W|B|SS|SD)store [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(L|W|B|SS|SD)store [off1+off2] {sym} ptr val mem)
+
+((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
+((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
+((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
+((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDLconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
+((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) && valoff1.canAdd32(off2) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+
+// Fold constants into stores.
+(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(int64(off)) =>
+ (MOVLstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(int64(off)) =>
+ (MOVWstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(int64(off)) =>
+ (MOVBstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
+
+// Fold address offsets into constant stores.
+(MOV(L|W|B)storeconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) =>
+ (MOV(L|W|B)storeconst [sc.addOffset32(off)] {s} ptr mem)
+
+// We need to fold LEAL into the MOVx ops so that the live variable analysis knows
+// what variables are being read/written by the ops.
+// Note: we turn off this merging for operations on globals when building
+// position-independent code (when Flag_shared is set).
+// PIC needs a spare register to load the PC into. Having the LEAL be
+// a separate instruction gives us that register. Having the LEAL be
+// a separate instruction also allows it to be CSEd (which is good because
+// it compiles to a thunk call).
+(MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOV(L|W|B|SS|SD)store [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOV(L|W|B|SS|SD)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+(MOV(L|W|B)storeconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOV(L|W|B)storeconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+
+((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ && valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+
+// Merge load/store to op
+((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
+(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ && y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off)) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff32(c,off)] {sym} ptr mem)
+
+// fold LEALs together
+(LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
+
+// LEAL into LEAL1
+(LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL1 into LEAL
+(LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL into LEAL[248]
+(LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL[248] into LEAL
+(LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL[1248] into LEAL[1248]. Only some such merges are possible.
+(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+(LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+2*int64(off2)) =>
+ (LEAL4 [off1+2*off2] {sym} x y)
+(LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+4*int64(off2)) =>
+ (LEAL8 [off1+4*off2] {sym} x y)
+
+// Absorb InvertFlags into branches.
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+
+// Constant comparisons.
+(CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)<uint32(y) => (FlagLT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)>uint32(y) => (FlagLT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)<uint32(y) => (FlagGT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT)
+
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)<uint16(y) => (FlagLT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)>uint16(y) => (FlagLT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)<uint16(y) => (FlagGT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT)
+
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)<uint8(y) => (FlagLT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)>uint8(y) => (FlagLT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)<uint8(y) => (FlagGT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT)
+
+// Other known comparisons.
+(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) => (FlagLT_ULT)
+(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT)
+(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < n => (FlagLT_ULT)
+// TODO: DIVxU also.
+
+// Absorb flag constants into SBB ops.
+(SBBLcarrymask (FlagEQ)) => (MOVLconst [0])
+(SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1])
+(SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0])
+(SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1])
+(SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0])
+
+// Absorb flag constants into branches.
+(EQ (FlagEQ) yes no) => (First yes no)
+(EQ (FlagLT_ULT) yes no) => (First no yes)
+(EQ (FlagLT_UGT) yes no) => (First no yes)
+(EQ (FlagGT_ULT) yes no) => (First no yes)
+(EQ (FlagGT_UGT) yes no) => (First no yes)
+
+(NE (FlagEQ) yes no) => (First no yes)
+(NE (FlagLT_ULT) yes no) => (First yes no)
+(NE (FlagLT_UGT) yes no) => (First yes no)
+(NE (FlagGT_ULT) yes no) => (First yes no)
+(NE (FlagGT_UGT) yes no) => (First yes no)
+
+(LT (FlagEQ) yes no) => (First no yes)
+(LT (FlagLT_ULT) yes no) => (First yes no)
+(LT (FlagLT_UGT) yes no) => (First yes no)
+(LT (FlagGT_ULT) yes no) => (First no yes)
+(LT (FlagGT_UGT) yes no) => (First no yes)
+
+(LE (FlagEQ) yes no) => (First yes no)
+(LE (FlagLT_ULT) yes no) => (First yes no)
+(LE (FlagLT_UGT) yes no) => (First yes no)
+(LE (FlagGT_ULT) yes no) => (First no yes)
+(LE (FlagGT_UGT) yes no) => (First no yes)
+
+(GT (FlagEQ) yes no) => (First no yes)
+(GT (FlagLT_ULT) yes no) => (First no yes)
+(GT (FlagLT_UGT) yes no) => (First no yes)
+(GT (FlagGT_ULT) yes no) => (First yes no)
+(GT (FlagGT_UGT) yes no) => (First yes no)
+
+(GE (FlagEQ) yes no) => (First yes no)
+(GE (FlagLT_ULT) yes no) => (First no yes)
+(GE (FlagLT_UGT) yes no) => (First no yes)
+(GE (FlagGT_ULT) yes no) => (First yes no)
+(GE (FlagGT_UGT) yes no) => (First yes no)
+
+(ULT (FlagEQ) yes no) => (First no yes)
+(ULT (FlagLT_ULT) yes no) => (First yes no)
+(ULT (FlagLT_UGT) yes no) => (First no yes)
+(ULT (FlagGT_ULT) yes no) => (First yes no)
+(ULT (FlagGT_UGT) yes no) => (First no yes)
+
+(ULE (FlagEQ) yes no) => (First yes no)
+(ULE (FlagLT_ULT) yes no) => (First yes no)
+(ULE (FlagLT_UGT) yes no) => (First no yes)
+(ULE (FlagGT_ULT) yes no) => (First yes no)
+(ULE (FlagGT_UGT) yes no) => (First no yes)
+
+(UGT (FlagEQ) yes no) => (First no yes)
+(UGT (FlagLT_ULT) yes no) => (First no yes)
+(UGT (FlagLT_UGT) yes no) => (First yes no)
+(UGT (FlagGT_ULT) yes no) => (First no yes)
+(UGT (FlagGT_UGT) yes no) => (First yes no)
+
+(UGE (FlagEQ) yes no) => (First yes no)
+(UGE (FlagLT_ULT) yes no) => (First no yes)
+(UGE (FlagLT_UGT) yes no) => (First yes no)
+(UGE (FlagGT_ULT) yes no) => (First no yes)
+(UGE (FlagGT_UGT) yes no) => (First yes no)
+
+// Absorb flag constants into SETxx ops.
+(SETEQ (FlagEQ)) => (MOVLconst [1])
+(SETEQ (FlagLT_ULT)) => (MOVLconst [0])
+(SETEQ (FlagLT_UGT)) => (MOVLconst [0])
+(SETEQ (FlagGT_ULT)) => (MOVLconst [0])
+(SETEQ (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETNE (FlagEQ)) => (MOVLconst [0])
+(SETNE (FlagLT_ULT)) => (MOVLconst [1])
+(SETNE (FlagLT_UGT)) => (MOVLconst [1])
+(SETNE (FlagGT_ULT)) => (MOVLconst [1])
+(SETNE (FlagGT_UGT)) => (MOVLconst [1])
+
+(SETL (FlagEQ)) => (MOVLconst [0])
+(SETL (FlagLT_ULT)) => (MOVLconst [1])
+(SETL (FlagLT_UGT)) => (MOVLconst [1])
+(SETL (FlagGT_ULT)) => (MOVLconst [0])
+(SETL (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETLE (FlagEQ)) => (MOVLconst [1])
+(SETLE (FlagLT_ULT)) => (MOVLconst [1])
+(SETLE (FlagLT_UGT)) => (MOVLconst [1])
+(SETLE (FlagGT_ULT)) => (MOVLconst [0])
+(SETLE (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETG (FlagEQ)) => (MOVLconst [0])
+(SETG (FlagLT_ULT)) => (MOVLconst [0])
+(SETG (FlagLT_UGT)) => (MOVLconst [0])
+(SETG (FlagGT_ULT)) => (MOVLconst [1])
+(SETG (FlagGT_UGT)) => (MOVLconst [1])
+
+(SETGE (FlagEQ)) => (MOVLconst [1])
+(SETGE (FlagLT_ULT)) => (MOVLconst [0])
+(SETGE (FlagLT_UGT)) => (MOVLconst [0])
+(SETGE (FlagGT_ULT)) => (MOVLconst [1])
+(SETGE (FlagGT_UGT)) => (MOVLconst [1])
+
+(SETB (FlagEQ)) => (MOVLconst [0])
+(SETB (FlagLT_ULT)) => (MOVLconst [1])
+(SETB (FlagLT_UGT)) => (MOVLconst [0])
+(SETB (FlagGT_ULT)) => (MOVLconst [1])
+(SETB (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETBE (FlagEQ)) => (MOVLconst [1])
+(SETBE (FlagLT_ULT)) => (MOVLconst [1])
+(SETBE (FlagLT_UGT)) => (MOVLconst [0])
+(SETBE (FlagGT_ULT)) => (MOVLconst [1])
+(SETBE (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETA (FlagEQ)) => (MOVLconst [0])
+(SETA (FlagLT_ULT)) => (MOVLconst [0])
+(SETA (FlagLT_UGT)) => (MOVLconst [1])
+(SETA (FlagGT_ULT)) => (MOVLconst [0])
+(SETA (FlagGT_UGT)) => (MOVLconst [1])
+
+(SETAE (FlagEQ)) => (MOVLconst [1])
+(SETAE (FlagLT_ULT)) => (MOVLconst [0])
+(SETAE (FlagLT_UGT)) => (MOVLconst [1])
+(SETAE (FlagGT_ULT)) => (MOVLconst [0])
+(SETAE (FlagGT_UGT)) => (MOVLconst [1])
+
+// Remove redundant *const ops
+(ADDLconst [c] x) && c==0 => x
+(SUBLconst [c] x) && c==0 => x
+(ANDLconst [c] _) && c==0 => (MOVLconst [0])
+(ANDLconst [c] x) && c==-1 => x
+(ORLconst [c] x) && c==0 => x
+(ORLconst [c] _) && c==-1 => (MOVLconst [-1])
+(XORLconst [c] x) && c==0 => x
+// TODO: since we got rid of the W/B versions, we might miss
+// things like (ANDLconst [0x100] x) which were formerly
+// (ANDBconst [0] x). Probably doesn't happen very often.
+// If we cared, we might do:
+// (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 => (MOVLconst [0])
+
+// Convert constant subtracts to constant adds
+(SUBLconst [c] x) => (ADDLconst [-c] x)
+
+// generic constant folding
+// TODO: more of this
+(ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d])
+(ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x)
+(SARLconst [c] (MOVLconst [d])) => (MOVLconst [d>>uint64(c)])
+(SARWconst [c] (MOVLconst [d])) => (MOVLconst [d>>uint64(c)])
+(SARBconst [c] (MOVLconst [d])) => (MOVLconst [d>>uint64(c)])
+(NEGL (MOVLconst [c])) => (MOVLconst [-c])
+(MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d])
+(ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d])
+(ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d])
+(XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d])
+(NOTL (MOVLconst [c])) => (MOVLconst [^c])
+
+// generic simplifications
+// TODO: more of this
+(ADDL x (NEGL y)) => (SUBL x y)
+(SUBL x x) => (MOVLconst [0])
+(ANDL x x) => x
+(ORL x x) => x
+(XORL x x) => (MOVLconst [0])
+
+// checking AND against 0.
+(CMP(L|W|B)const l:(ANDL x y) [0]) && l.Uses==1 => (TEST(L|W|B) x y)
+(CMPLconst l:(ANDLconst [c] x) [0]) && l.Uses==1 => (TESTLconst [c] x)
+(CMPWconst l:(ANDLconst [c] x) [0]) && l.Uses==1 => (TESTWconst [int16(c)] x)
+(CMPBconst l:(ANDLconst [c] x) [0]) && l.Uses==1 => (TESTBconst [int8(c)] x)
+
+// TEST %reg,%reg is shorter than CMP
+(CMP(L|W|B)const x [0]) => (TEST(L|W|B) x x)
+
+// Convert LEAL1 back to ADDL if we can
+(LEAL1 [0] {nil} x y) => (ADDL x y)
+
+// Combining byte loads into larger (unaligned) loads.
+// There are many ways these combinations could occur. This is
+// designed to match the way encoding/binary.LittleEndian does it.
+(ORL x0:(MOVBload [i0] {s} p mem)
+ s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
+ && i1 == i0+1
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, s0)
+ => @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
+
+(ORL x0:(MOVBload [i] {s} p0 mem)
+ s0:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, s0)
+ => @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
+
+(ORL o0:(ORL
+ x0:(MOVWload [i0] {s} p mem)
+ s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem)))
+ s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem)))
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, s0, s1, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem)
+
+(ORL o0:(ORL
+ x0:(MOVWload [i] {s} p0 mem)
+ s0:(SHLLconst [16] x1:(MOVBload [i] {s} p1 mem)))
+ s1:(SHLLconst [24] x2:(MOVBload [i] {s} p2 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && o0.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && sequentialAddresses(p1, p2, 1)
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, s0, s1, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p0 mem)
+
+// Combine constant stores into larger (unaligned) stores.
+(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 1 == c.Off()
+ && clobber(x)
+ => (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem)
+(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 1 == c.Off()
+ && clobber(x)
+ => (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem)
+
+(MOVBstoreconst [c] {s} p1 x:(MOVBstoreconst [a] {s} p0 mem))
+ && x.Uses == 1
+ && a.Off() == c.Off()
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem)
+(MOVBstoreconst [a] {s} p0 x:(MOVBstoreconst [c] {s} p1 mem))
+ && x.Uses == 1
+ && a.Off() == c.Off()
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem)
+
+(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 2 == c.Off()
+ && clobber(x)
+ => (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem)
+(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
+ && clobber(x)
+ => (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem)
+
+(MOVWstoreconst [c] {s} p1 x:(MOVWstoreconst [a] {s} p0 mem))
+ && x.Uses == 1
+ && a.Off() == c.Off()
+ && sequentialAddresses(p0, p1, 2)
+ && clobber(x)
+ => (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem)
+(MOVWstoreconst [a] {s} p0 x:(MOVWstoreconst [c] {s} p1 mem))
+ && x.Uses == 1
+ && a.Off() == c.Off()
+ && sequentialAddresses(p0, p1, 2)
+ && clobber(x)
+ => (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem)
+
+// Combine stores into larger (unaligned) stores.
+(MOVBstore [i] {s} p (SHR(W|L)const [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHR(W|L)const [8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i] {s} p w mem)
+(MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-1] {s} p w0 mem)
+
+(MOVBstore [i] {s} p1 (SHR(W|L)const [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstore [i] {s} p0 w mem)
+(MOVBstore [i] {s} p0 w x:(MOVBstore {s} [i] p1 (SHR(W|L)const [8] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstore [i] {s} p0 w mem)
+(MOVBstore [i] {s} p1 (SHRLconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRLconst [j-8] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstore [i] {s} p0 w0 mem)
+
+(MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVLstore [i-2] {s} p w mem)
+(MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVLstore [i-2] {s} p w0 mem)
+
+(MOVWstore [i] {s} p1 (SHRLconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && clobber(x)
+ => (MOVLstore [i] {s} p0 w mem)
+(MOVWstore [i] {s} p1 (SHRLconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && clobber(x)
+ => (MOVLstore [i] {s} p0 w0 mem)
+
+// For PIC, break floating-point constant loading into two instructions so we have
+// a register to use for holding the address of the constant pool entry.
+(MOVSSconst [c]) && config.ctxt.Flag_shared => (MOVSSconst2 (MOVSSconst1 [c]))
+(MOVSDconst [c]) && config.ctxt.Flag_shared => (MOVSDconst2 (MOVSDconst1 [c]))
+
+(CMP(L|W|B) l:(MOV(L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(L|W|B)load {sym} [off] ptr x mem)
+(CMP(L|W|B) x l:(MOV(L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(L|W|B)load {sym} [off] ptr x mem))
+
+(CMP(L|W|B)const l:(MOV(L|W|B)load {sym} [off] ptr mem) [c])
+ && l.Uses == 1
+ && validValAndOff(int64(c), int64(off))
+ && clobber(l) =>
+ @l.Block (CMP(L|W|B)constload {sym} [makeValAndOff32(int32(c),int32(off))] ptr mem)
+
+(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(c),int64(off)) => (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem)
+(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),int64(off)) => (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem)
+(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),int64(off)) => (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem)
+
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/gen/386Ops.go b/src/cmd/compile/internal/ssa/gen/386Ops.go
new file mode 100644
index 0000000..737b99c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/386Ops.go
@@ -0,0 +1,585 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - Floating-point types live in the low natural slot of an sse2 register.
+// Unused portions are junk.
+// - We do not use AH,BH,CH,DH registers.
+// - When doing sub-register operations, we try to write the whole
+// destination register to avoid a partial-register write.
+// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
+// filled by sign-extending the used portion. Users of AuxInt which interpret
+// AuxInt as unsigned (e.g. shifts) must be careful.
+
+// Suffixes encode the bit width of various instructions.
+// L (long word) = 32 bit
+// W (word) = 16 bit
+// B (byte) = 8 bit
+
+// copied from ../../x86/reg.go
+var regNames386 = []string{
+ "AX",
+ "CX",
+ "DX",
+ "BX",
+ "SP",
+ "BP",
+ "SI",
+ "DI",
+ "X0",
+ "X1",
+ "X2",
+ "X3",
+ "X4",
+ "X5",
+ "X6",
+ "X7",
+
+ // If you add registers, update asyncPreempt in runtime
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNames386) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNames386 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ ax = buildReg("AX")
+ cx = buildReg("CX")
+ dx = buildReg("DX")
+ bx = buildReg("BX")
+ si = buildReg("SI")
+ gp = buildReg("AX CX DX BX BP SI DI")
+ fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7")
+ gpsp = gp | buildReg("SP")
+ gpspsb = gpsp | buildReg("SB")
+ callerSave = gp | fp
+ )
+ // Common slices of register masks
+ var (
+ gponly = []regMask{gp}
+ fponly = []regMask{fp}
+ )
+
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: gponly}
+ gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
+ gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly}
+ gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp11carry = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}}
+ gp21carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
+ gp1carry1 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp2carry1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
+ gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly}
+ gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}}
+ gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax}, clobbers: dx}
+ gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax}
+ gp11mod = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{dx}, clobbers: ax}
+ gp21mul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}
+
+ gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}}
+ gp1flags = regInfo{inputs: []regMask{gpsp}}
+ gp0flagsLoad = regInfo{inputs: []regMask{gpspsb, 0}}
+ gp1flagsLoad = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+ flagsgp = regInfo{inputs: nil, outputs: gponly}
+
+ readflags = regInfo{inputs: nil, outputs: gponly}
+ flagsgpax = regInfo{inputs: nil, clobbers: ax, outputs: []regMask{gp &^ ax}}
+
+ gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly}
+ gp21load = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: gponly}
+ gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly}
+ gp21loadidx = regInfo{inputs: []regMask{gp, gpspsb, gpsp, 0}, outputs: gponly}
+
+ gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+ gpstoreconst = regInfo{inputs: []regMask{gpspsb, 0}}
+ gpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, gpsp, 0}}
+ gpstoreconstidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+
+ fp01 = regInfo{inputs: nil, outputs: fponly}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
+ fp21load = regInfo{inputs: []regMask{fp, gpspsb, 0}, outputs: fponly}
+ fpgp = regInfo{inputs: fponly, outputs: gponly}
+ gpfp = regInfo{inputs: gponly, outputs: fponly}
+ fp11 = regInfo{inputs: fponly, outputs: fponly}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+
+ fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: fponly}
+ fploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: fponly}
+
+ fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}}
+ fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}}
+ )
+
+ var _386ops = []opData{
+ // fp ops
+ {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true, usesScratch: true}, // fp32 add
+ {name: "ADDSD", argLength: 2, reg: fp21, asm: "ADDSD", commutative: true, resultInArg0: true}, // fp64 add
+ {name: "SUBSS", argLength: 2, reg: fp21, asm: "SUBSS", resultInArg0: true, usesScratch: true}, // fp32 sub
+ {name: "SUBSD", argLength: 2, reg: fp21, asm: "SUBSD", resultInArg0: true}, // fp64 sub
+ {name: "MULSS", argLength: 2, reg: fp21, asm: "MULSS", commutative: true, resultInArg0: true, usesScratch: true}, // fp32 mul
+ {name: "MULSD", argLength: 2, reg: fp21, asm: "MULSD", commutative: true, resultInArg0: true}, // fp64 mul
+ {name: "DIVSS", argLength: 2, reg: fp21, asm: "DIVSS", resultInArg0: true, usesScratch: true}, // fp32 div
+ {name: "DIVSD", argLength: 2, reg: fp21, asm: "DIVSD", resultInArg0: true}, // fp64 div
+
+ {name: "MOVSSload", argLength: 2, reg: fpload, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp32 load
+ {name: "MOVSDload", argLength: 2, reg: fpload, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp64 load
+ {name: "MOVSSconst", reg: fp01, asm: "MOVSS", aux: "Float32", rematerializeable: true}, // fp32 constant
+ {name: "MOVSDconst", reg: fp01, asm: "MOVSD", aux: "Float64", rematerializeable: true}, // fp64 constant
+ {name: "MOVSSloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by i
+ {name: "MOVSSloadidx4", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by 4*i
+ {name: "MOVSDloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by i
+ {name: "MOVSDloadidx8", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by 8*i
+
+ {name: "MOVSSstore", argLength: 3, reg: fpstore, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp32 store
+ {name: "MOVSDstore", argLength: 3, reg: fpstore, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp64 store
+ {name: "MOVSSstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff", symEffect: "Write"}, // fp32 indexed by i store
+ {name: "MOVSSstoreidx4", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff", symEffect: "Write"}, // fp32 indexed by 4i store
+ {name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by i store
+ {name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by 8i store
+
+ {name: "ADDSSload", argLength: 3, reg: fp21load, asm: "ADDSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ADDSDload", argLength: 3, reg: fp21load, asm: "ADDSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBSSload", argLength: 3, reg: fp21load, asm: "SUBSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBSDload", argLength: 3, reg: fp21load, asm: "SUBSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULSSload", argLength: 3, reg: fp21load, asm: "MULSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULSDload", argLength: 3, reg: fp21load, asm: "MULSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "DIVSSload", argLength: 3, reg: fp21load, asm: "DIVSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "DIVSDload", argLength: 3, reg: fp21load, asm: "DIVSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+
+ // binary ops
+ {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", typ: "UInt32", clobberFlags: true}, // arg0 + auxint
+
+ {name: "ADDLcarry", argLength: 2, reg: gp21carry, asm: "ADDL", commutative: true, resultInArg0: true}, // arg0 + arg1, generates <carry,result> pair
+ {name: "ADDLconstcarry", argLength: 1, reg: gp11carry, asm: "ADDL", aux: "Int32", resultInArg0: true}, // arg0 + auxint, generates <carry,result> pair
+ {name: "ADCL", argLength: 3, reg: gp2carry1, asm: "ADCL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0+arg1+carry(arg2), where arg2 is flags
+ {name: "ADCLconst", argLength: 2, reg: gp1carry1, asm: "ADCL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0+auxint+carry(arg1), where arg1 is flags
+
+ {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
+ {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+
+ {name: "SUBLcarry", argLength: 2, reg: gp21carry, asm: "SUBL", resultInArg0: true}, // arg0-arg1, generates <borrow,result> pair
+ {name: "SUBLconstcarry", argLength: 1, reg: gp11carry, asm: "SUBL", aux: "Int32", resultInArg0: true}, // arg0-auxint, generates <borrow,result> pair
+ {name: "SBBL", argLength: 3, reg: gp2carry1, asm: "SBBL", resultInArg0: true, clobberFlags: true}, // arg0-arg1-borrow(arg2), where arg2 is flags
+ {name: "SBBLconst", argLength: 2, reg: gp1carry1, asm: "SBBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0-auxint-borrow(arg1), where arg1 is flags
+
+ {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true}, // arg0 * auxint
+
+ {name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt32,Flags)", asm: "MULL", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 32x32->64 unsigned multiply). Returns uint32(x), and flags set to overflow if uint32(x) != x.
+
+ {name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULLU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width
+
+ {name: "MULLQU", argLength: 2, reg: gp21mul, commutative: true, asm: "MULL", clobberFlags: true}, // arg0 * arg1, high 32 in result[0], low 32 in result[1]
+
+ {name: "AVGLU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 32 result bits
+
+ // For DIVL, DIVW, MODL and MODW, AuxInt non-zero means that the divisor has been proved to be not -1.
+ {name: "DIVL", argLength: 2, reg: gp11div, asm: "IDIVL", aux: "Bool", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVW", argLength: 2, reg: gp11div, asm: "IDIVW", aux: "Bool", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVLU", argLength: 2, reg: gp11div, asm: "DIVL", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVWU", argLength: 2, reg: gp11div, asm: "DIVW", clobberFlags: true}, // arg0 / arg1
+
+ {name: "MODL", argLength: 2, reg: gp11mod, asm: "IDIVL", aux: "Bool", clobberFlags: true}, // arg0 % arg1
+ {name: "MODW", argLength: 2, reg: gp11mod, asm: "IDIVW", aux: "Bool", clobberFlags: true}, // arg0 % arg1
+ {name: "MODLU", argLength: 2, reg: gp11mod, asm: "DIVL", clobberFlags: true}, // arg0 % arg1
+ {name: "MODWU", argLength: 2, reg: gp11mod, asm: "DIVW", clobberFlags: true}, // arg0 % arg1
+
+ {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+
+ {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+
+ {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+
+ {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"}, // arg0 compare to auxint
+ {name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, // arg0 compare to auxint
+
+ // compare *(arg0+auxint+aux) to arg1 (in that order). arg2=mem.
+ {name: "CMPLload", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWload", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBload", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+
+ // compare *(arg0+ValAndOff(AuxInt).Off()+aux) to ValAndOff(AuxInt).Val() (in that order). arg1=mem.
+ {name: "CMPLconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPL", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPW", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPB", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+
+ {name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags", usesScratch: true}, // arg0 compare to arg1, f32
+ {name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags", usesScratch: true}, // arg0 compare to arg1, f64
+
+ {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
+ {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0
+ {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0
+
+ {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 32
+ {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-31
+ // Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount!
+
+ {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31
+ {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-15
+ {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-7
+
+ {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
+ {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-15
+ {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-7
+
+ {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31
+ {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15
+ {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7
+
+ // binary-op with a memory source operand
+ {name: "ADDLload", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBLload", argLength: 3, reg: gp21load, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULLload", argLength: 3, reg: gp21load, asm: "IMULL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ANDLload", argLength: 3, reg: gp21load, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ORLload", argLength: 3, reg: gp21load, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "XORLload", argLength: 3, reg: gp21load, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+
+ // binary-op with an indexed memory source operand
+ {name: "ADDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "SUBLloadidx4", argLength: 4, reg: gp21loadidx, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "MULLloadidx4", argLength: 4, reg: gp21loadidx, asm: "IMULL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 * tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "ANDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "ORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "XORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+
+ // unary ops
+ {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true, clobberFlags: true}, // -arg0
+
+ {name: "NOTL", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true, clobberFlags: true}, // ^arg0
+
+ {name: "BSFL", argLength: 1, reg: gp11, asm: "BSFL", clobberFlags: true}, // arg0 # of low-order zeroes ; undef if zero
+ {name: "BSFW", argLength: 1, reg: gp11, asm: "BSFW", clobberFlags: true}, // arg0 # of low-order zeroes ; undef if zero
+
+ {name: "BSRL", argLength: 1, reg: gp11, asm: "BSRL", clobberFlags: true}, // arg0 # of high-order zeroes ; undef if zero
+ {name: "BSRW", argLength: 1, reg: gp11, asm: "BSRW", clobberFlags: true}, // arg0 # of high-order zeroes ; undef if zero
+
+ {name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes
+
+ {name: "SQRTSD", argLength: 1, reg: fp11, asm: "SQRTSD"}, // sqrt(arg0)
+
+ {name: "SBBLcarrymask", argLength: 1, reg: flagsgp, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear.
+ // Note: SBBW and SBBB are subsumed by SBBL
+
+ {name: "SETEQ", argLength: 1, reg: readflags, asm: "SETEQ"}, // extract == condition from arg0
+ {name: "SETNE", argLength: 1, reg: readflags, asm: "SETNE"}, // extract != condition from arg0
+ {name: "SETL", argLength: 1, reg: readflags, asm: "SETLT"}, // extract signed < condition from arg0
+ {name: "SETLE", argLength: 1, reg: readflags, asm: "SETLE"}, // extract signed <= condition from arg0
+ {name: "SETG", argLength: 1, reg: readflags, asm: "SETGT"}, // extract signed > condition from arg0
+ {name: "SETGE", argLength: 1, reg: readflags, asm: "SETGE"}, // extract signed >= condition from arg0
+ {name: "SETB", argLength: 1, reg: readflags, asm: "SETCS"}, // extract unsigned < condition from arg0
+ {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0
+ {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0
+ {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
+ {name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"}, // extract if overflow flag is set from arg0
+ // Need different opcodes for floating point conditions because
+ // any comparison involving a NaN is always FALSE and thus
+ // the patterns for inverting conditions cannot be used.
+ {name: "SETEQF", argLength: 1, reg: flagsgpax, asm: "SETEQ", clobberFlags: true}, // extract == condition from arg0
+ {name: "SETNEF", argLength: 1, reg: flagsgpax, asm: "SETNE", clobberFlags: true}, // extract != condition from arg0
+ {name: "SETORD", argLength: 1, reg: flagsgp, asm: "SETPC"}, // extract "ordered" (No Nan present) condition from arg0
+ {name: "SETNAN", argLength: 1, reg: flagsgp, asm: "SETPS"}, // extract "unordered" (Nan present) condition from arg0
+
+ {name: "SETGF", argLength: 1, reg: flagsgp, asm: "SETHI"}, // extract floating > condition from arg0
+ {name: "SETGEF", argLength: 1, reg: flagsgp, asm: "SETCC"}, // extract floating >= condition from arg0
+
+ {name: "MOVBLSX", argLength: 1, reg: gp11, asm: "MOVBLSX"}, // sign extend arg0 from int8 to int32
+ {name: "MOVBLZX", argLength: 1, reg: gp11, asm: "MOVBLZX"}, // zero extend arg0 from int8 to int32
+ {name: "MOVWLSX", argLength: 1, reg: gp11, asm: "MOVWLSX"}, // sign extend arg0 from int16 to int32
+ {name: "MOVWLZX", argLength: 1, reg: gp11, asm: "MOVWLZX"}, // zero extend arg0 from int16 to int32
+
+ {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint
+
+ {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL", usesScratch: true}, // convert float64 to int32
+ {name: "CVTTSS2SL", argLength: 1, reg: fpgp, asm: "CVTTSS2SL", usesScratch: true}, // convert float32 to int32
+ {name: "CVTSL2SS", argLength: 1, reg: gpfp, asm: "CVTSL2SS", usesScratch: true}, // convert int32 to float32
+ {name: "CVTSL2SD", argLength: 1, reg: gpfp, asm: "CVTSL2SD", usesScratch: true}, // convert int32 to float64
+ {name: "CVTSD2SS", argLength: 1, reg: fp11, asm: "CVTSD2SS", usesScratch: true}, // convert float64 to float32
+ {name: "CVTSS2SD", argLength: 1, reg: fp11, asm: "CVTSS2SD"}, // convert float32 to float64
+
+ {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation.
+
+ {name: "LEAL", argLength: 1, reg: gp11sb, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAL1", argLength: 2, reg: gp21sb, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAL2", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAL4", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAL8", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
+ // Note: LEAL{1,2,4,8} must not have OpSB as either argument.
+
+ // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBLSXload", argLength: 2, reg: gpload, asm: "MOVBLSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int32
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVWLSXload", argLength: 2, reg: gpload, asm: "MOVWLSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int32
+ {name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
+
+ // direct binary-op on memory (read-modify-write)
+ {name: "ADDLmodify", argLength: 3, reg: gpstore, asm: "ADDL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) += arg1, arg2=mem
+ {name: "SUBLmodify", argLength: 3, reg: gpstore, asm: "SUBL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) -= arg1, arg2=mem
+ {name: "ANDLmodify", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) &= arg1, arg2=mem
+ {name: "ORLmodify", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) |= arg1, arg2=mem
+ {name: "XORLmodify", argLength: 3, reg: gpstore, asm: "XORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) ^= arg1, arg2=mem
+
+ // direct binary-op on indexed memory (read-modify-write)
+ {name: "ADDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ADDL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) += arg2, arg3=mem
+ {name: "SUBLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "SUBL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) -= arg2, arg3=mem
+ {name: "ANDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ANDL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) &= arg2, arg3=mem
+ {name: "ORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ORL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) |= arg2, arg3=mem
+ {name: "XORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "XORL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) ^= arg2, arg3=mem
+
+ // direct binary-op on memory with a constant (read-modify-write)
+ {name: "ADDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ANDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "XORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ // direct binary-op on indexed memory with a constant (read-modify-write)
+ {name: "ADDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+ {name: "ANDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+ {name: "ORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+ {name: "XORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "XORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+
+ // indexed loads/stores
+ {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", aux: "SymOff", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem
+ // TODO: sign-extending indexed loads
+ {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
+ // TODO: add size-mismatched indexed loads, like MOVBstoreidx4.
+
+ // For storeconst ops, the AuxInt field encodes both
+ // the value to store and an address offset of the store.
+ // Cast AuxInt to a ValAndOff to extract Val and Off fields.
+ {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem
+ {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 2 bytes of ...
+ {name: "MOVLstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 4 bytes of ...
+
+ {name: "MOVBstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+1*arg1+ValAndOff(AuxInt).Off()+aux. arg2=mem
+ {name: "MOVWstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 2 bytes of ... arg1 ...
+ {name: "MOVWstoreconstidx2", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 2 bytes of ... 2*arg1 ...
+ {name: "MOVLstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 4 bytes of ... arg1 ...
+ {name: "MOVLstoreconstidx4", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 4 bytes of ... 4*arg1 ...
+
+ // arg0 = pointer to start of memory to zero
+ // arg1 = value to store (will always be zero)
+ // arg2 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("AX")},
+ clobbers: buildReg("DI CX"),
+ // Note: CX is only clobbered when dynamic linking.
+ },
+ faultOnNilArg0: true,
+ },
+
+ // arg0 = address of memory to zero
+ // arg1 = # of 4-byte words to zero
+ // arg2 = value to store (will always be zero)
+ // arg3 = mem
+ // returns mem
+ {
+ name: "REPSTOSL",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("CX"), buildReg("AX")},
+ clobbers: buildReg("DI CX"),
+ },
+ faultOnNilArg0: true,
+ },
+
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = mem
+ // auxint = offset from duffcopy symbol to call
+ // returns memory
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI")},
+ clobbers: buildReg("DI SI CX"), // uses CX as a temporary
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = # of 8-byte words to copy
+ // arg3 = mem
+ // returns memory
+ {
+ name: "REPMOVSL",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI"), buildReg("CX")},
+ clobbers: buildReg("DI SI CX"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // (InvertFlags (CMPL a b)) == (CMPL b a)
+ // So if we want (SETL (CMPL a b)) but we can't do that because a is a constant,
+ // then we do (SETL (InvertFlags (CMPL b a))) instead.
+ // Rewrites will convert this to (SETG (CMPL b a)).
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Pseudo-ops
+ {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of DX (the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}, zeroWidth: true},
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+ //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary, but may clobber others.
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("DI"), ax}, clobbers: callerSave &^ gp}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{dx, bx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{cx, dx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{ax, cx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // Extend ops are the same as Bounds ops except the indexes are 64-bit.
+ {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, dx, bx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, cx, dx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, ax, cx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+
+ // Constant flag values. For any comparison, there are 5 possible
+ // outcomes: the three from the signed total order (<,==,>) and the
+ // three from the unsigned total order. The == cases overlap.
+ // Note: there's a sixth "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT_ULT"}, // signed < and unsigned <
+ {name: "FlagLT_UGT"}, // signed < and unsigned >
+ {name: "FlagGT_UGT"}, // signed > and unsigned <
+ {name: "FlagGT_ULT"}, // signed > and unsigned >
+
+ // Special ops for PIC floating-point constants.
+ // MOVSXconst1 loads the address of the constant-pool entry into a register.
+ // MOVSXconst2 loads the constant from that address.
+ // MOVSXconst1 returns a pointer, but we type it as uint32 because it can never point to the Go heap.
+ {name: "MOVSSconst1", reg: gp01, typ: "UInt32", aux: "Float32"},
+ {name: "MOVSDconst1", reg: gp01, typ: "UInt32", aux: "Float64"},
+ {name: "MOVSSconst2", argLength: 1, reg: gpfp, asm: "MOVSS"},
+ {name: "MOVSDconst2", argLength: 1, reg: gpfp, asm: "MOVSD"},
+ }
+
+ var _386blocks = []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "OS", controls: 1},
+ {name: "OC", controls: 1},
+ {name: "ULT", controls: 1},
+ {name: "ULE", controls: 1},
+ {name: "UGT", controls: 1},
+ {name: "UGE", controls: 1},
+ {name: "EQF", controls: 1},
+ {name: "NEF", controls: 1},
+ {name: "ORD", controls: 1}, // FP, ordered comparison (parity zero)
+ {name: "NAN", controls: 1}, // FP, unordered comparison (parity one)
+ }
+
+ archs = append(archs, arch{
+ name: "386",
+ pkg: "cmd/internal/obj/x86",
+ genfile: "../../x86/ssa.go",
+ ops: _386ops,
+ blocks: _386blocks,
+ regnames: regNames386,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: int8(num["BP"]),
+ linkreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/386splitload.rules b/src/cmd/compile/internal/ssa/gen/386splitload.rules
new file mode 100644
index 0000000..ed93b90
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/386splitload.rules
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// See the top of AMD64splitload.rules for discussion of these rules.
+
+(CMP(L|W|B)load {sym} [off] ptr x mem) => (CMP(L|W|B) (MOV(L|W|B)load {sym} [off] ptr mem) x)
+
+(CMPLconstload {sym} [vo] ptr mem) => (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
+(CMPWconstload {sym} [vo] ptr mem) => (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()])
+(CMPBconstload {sym} [vo] ptr mem) => (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()])
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
new file mode 100644
index 0000000..5de1e1e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -0,0 +1,2216 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(64|32|16|8) ...) => (ADD(Q|L|L|L) ...)
+(AddPtr ...) => (ADDQ ...)
+(Add(32|64)F ...) => (ADDS(S|D) ...)
+
+(Sub(64|32|16|8) ...) => (SUB(Q|L|L|L) ...)
+(SubPtr ...) => (SUBQ ...)
+(Sub(32|64)F ...) => (SUBS(S|D) ...)
+
+(Mul(64|32|16|8) ...) => (MUL(Q|L|L|L) ...)
+(Mul(32|64)F ...) => (MULS(S|D) ...)
+
+(Select0 (Mul64uover x y)) => (Select0 <typ.UInt64> (MULQU x y))
+(Select0 (Mul32uover x y)) => (Select0 <typ.UInt32> (MULLU x y))
+(Select1 (Mul(64|32)uover x y)) => (SETO (Select1 <types.TypeFlags> (MUL(Q|L)U x y)))
+
+(Hmul(64|32) ...) => (HMUL(Q|L) ...)
+(Hmul(64|32)u ...) => (HMUL(Q|L)U ...)
+
+(Div(64|32|16) [a] x y) => (Select0 (DIV(Q|L|W) [a] x y))
+(Div8 x y) => (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+(Div(64|32|16)u x y) => (Select0 (DIV(Q|L|W)U x y))
+(Div8u x y) => (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+(Div(32|64)F ...) => (DIVS(S|D) ...)
+
+(Select0 (Add64carry x y c)) =>
+ (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
+(Select1 (Add64carry x y c)) =>
+ (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
+(Select0 (Sub64borrow x y c)) =>
+ (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
+(Select1 (Sub64borrow x y c)) =>
+ (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
+
+// Optimize ADCQ and friends
+(ADCQ x (MOVQconst [c]) carry) && is32Bit(c) => (ADCQconst x [int32(c)] carry)
+(ADCQ x y (FlagEQ)) => (ADDQcarry x y)
+(ADCQconst x [c] (FlagEQ)) => (ADDQconstcarry x [c])
+(ADDQcarry x (MOVQconst [c])) && is32Bit(c) => (ADDQconstcarry x [int32(c)])
+(SBBQ x (MOVQconst [c]) borrow) && is32Bit(c) => (SBBQconst x [int32(c)] borrow)
+(SBBQ x y (FlagEQ)) => (SUBQborrow x y)
+(SBBQconst x [c] (FlagEQ)) => (SUBQconstborrow x [c])
+(SUBQborrow x (MOVQconst [c])) && is32Bit(c) => (SUBQconstborrow x [int32(c)])
+(Select1 (NEGLflags (MOVQconst [0]))) => (FlagEQ)
+(Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) => x
+
+
+(Mul64uhilo ...) => (MULQU2 ...)
+(Div128u ...) => (DIVQU2 ...)
+
+(Avg64u ...) => (AVGQU ...)
+
+(Mod(64|32|16) [a] x y) => (Select1 (DIV(Q|L|W) [a] x y))
+(Mod8 x y) => (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+(Mod(64|32|16)u x y) => (Select1 (DIV(Q|L|W)U x y))
+(Mod8u x y) => (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+
+(And(64|32|16|8) ...) => (AND(Q|L|L|L) ...)
+(Or(64|32|16|8) ...) => (OR(Q|L|L|L) ...)
+(Xor(64|32|16|8) ...) => (XOR(Q|L|L|L) ...)
+(Com(64|32|16|8) ...) => (NOT(Q|L|L|L) ...)
+
+(Neg(64|32|16|8) ...) => (NEG(Q|L|L|L) ...)
+(Neg32F x) => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
+(Neg64F x) => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
+
+// Lowering boolean ops
+(AndB ...) => (ANDL ...)
+(OrB ...) => (ORL ...)
+(Not x) => (XORLconst [1] x)
+
+// Lowering pointer arithmetic
+(OffPtr [off] ptr) && is32Bit(off) => (ADDQconst [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDQ (MOVQconst [off]) ptr)
+
+// Lowering other arithmetic
+(Ctz64 <t> x) => (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
+(Ctz32 x) => (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
+(Ctz16 x) => (BSFL (BTSLconst <typ.UInt32> [16] x))
+(Ctz8 x) => (BSFL (BTSLconst <typ.UInt32> [ 8] x))
+
+(Ctz64NonZero x) => (Select0 (BSFQ x))
+(Ctz32NonZero ...) => (BSFL ...)
+(Ctz16NonZero ...) => (BSFL ...)
+(Ctz8NonZero ...) => (BSFL ...)
+
+// BitLen64 of a 64 bit value x requires checking whether x == 0, since BSRQ is undefined when x == 0.
+// However, for zero-extended values, we can cheat a bit, and calculate
+// BSR(x<<1 + 1), which is guaranteed to be non-zero, and which conveniently
+// places the index of the highest set bit where we want it.
+(BitLen64 <t> x) => (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
+(BitLen32 x) => (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
+(BitLen16 x) => (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
+(BitLen8 x) => (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
+
+(Bswap(64|32) ...) => (BSWAP(Q|L) ...)
+
+(PopCount(64|32) ...) => (POPCNT(Q|L) ...)
+(PopCount16 x) => (POPCNTL (MOVWQZX <typ.UInt32> x))
+(PopCount8 x) => (POPCNTL (MOVBQZX <typ.UInt32> x))
+
+(Sqrt ...) => (SQRTSD ...)
+
+(RoundToEven x) => (ROUNDSD [0] x)
+(Floor x) => (ROUNDSD [1] x)
+(Ceil x) => (ROUNDSD [2] x)
+(Trunc x) => (ROUNDSD [3] x)
+
+(FMA x y z) => (VFMADD231SD z x y)
+
+// Lowering extension
+// Note: we always extend to 64 bits even though some ops don't need that many result bits.
+(SignExt8to16 ...) => (MOVBQSX ...)
+(SignExt8to32 ...) => (MOVBQSX ...)
+(SignExt8to64 ...) => (MOVBQSX ...)
+(SignExt16to32 ...) => (MOVWQSX ...)
+(SignExt16to64 ...) => (MOVWQSX ...)
+(SignExt32to64 ...) => (MOVLQSX ...)
+
+(ZeroExt8to16 ...) => (MOVBQZX ...)
+(ZeroExt8to32 ...) => (MOVBQZX ...)
+(ZeroExt8to64 ...) => (MOVBQZX ...)
+(ZeroExt16to32 ...) => (MOVWQZX ...)
+(ZeroExt16to64 ...) => (MOVWQZX ...)
+(ZeroExt32to64 ...) => (MOVLQZX ...)
+
+(Slicemask <t> x) => (SARQconst (NEGQ <t> x) [63])
+
+(SpectreIndex <t> x y) => (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
+(SpectreSliceIndex <t> x y) => (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
+
+// Lowering truncation
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Lowering float <-> int
+(Cvt32to32F ...) => (CVTSL2SS ...)
+(Cvt32to64F ...) => (CVTSL2SD ...)
+(Cvt64to32F ...) => (CVTSQ2SS ...)
+(Cvt64to64F ...) => (CVTSQ2SD ...)
+
+(Cvt32Fto32 ...) => (CVTTSS2SL ...)
+(Cvt32Fto64 ...) => (CVTTSS2SQ ...)
+(Cvt64Fto32 ...) => (CVTTSD2SL ...)
+(Cvt64Fto64 ...) => (CVTTSD2SQ ...)
+
+(Cvt32Fto64F ...) => (CVTSS2SD ...)
+(Cvt64Fto32F ...) => (CVTSD2SS ...)
+
+(Round(32|64)F ...) => (Copy ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+// Lowering shifts
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
+(Lsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
+(Lsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Lsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Lsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLQ x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
+
+(Rsh64Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
+(Rsh32Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Rsh16Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [16])))
+(Rsh8Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [8])))
+
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRQ x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRL x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRW x y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRB x y)
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
+(Rsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARQ <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [64])))))
+(Rsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARL <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [32])))))
+(Rsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARW <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [16])))))
+(Rsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARB <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [8])))))
+
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SARQ x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SARL x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SARW x y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SARB x y)
+
+// Lowering integer comparisons
+(Less(64|32|16|8) x y) => (SETL (CMP(Q|L|W|B) x y))
+(Less(64|32|16|8)U x y) => (SETB (CMP(Q|L|W|B) x y))
+(Leq(64|32|16|8) x y) => (SETLE (CMP(Q|L|W|B) x y))
+(Leq(64|32|16|8)U x y) => (SETBE (CMP(Q|L|W|B) x y))
+(Eq(Ptr|64|32|16|8|B) x y) => (SETEQ (CMP(Q|Q|L|W|B|B) x y))
+(Neq(Ptr|64|32|16|8|B) x y) => (SETNE (CMP(Q|Q|L|W|B|B) x y))
+
+// Lowering floating point comparisons
+// Note Go assembler gets UCOMISx operand order wrong, but it is right here
+// and the operands are reversed when generating assembly language.
+(Eq(32|64)F x y) => (SETEQF (UCOMIS(S|D) x y))
+(Neq(32|64)F x y) => (SETNEF (UCOMIS(S|D) x y))
+// Use SETGF/SETGEF with reversed operands to dodge NaN case.
+(Less(32|64)F x y) => (SETGF (UCOMIS(S|D) y x))
+(Leq(32|64)F x y) => (SETGEF (UCOMIS(S|D) y x))
+
+// Lowering loads
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVQload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) => (MOVLload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem)
+
+// Lowering stores
+// These more-specific FP versions of Store pattern should come first.
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVSDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVSSstore ptr val mem)
+
+(Store {t} ptr val mem) && t.Size() == 8 => (MOVQstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 => (MOVLstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+
+// Lowering moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem)
+(Move [8] dst src mem) => (MOVQstore dst (MOVQload src mem) mem)
+(Move [16] dst src mem) && config.useSSE => (MOVOstore dst (MOVOload src mem) mem)
+(Move [16] dst src mem) && !config.useSSE =>
+ (MOVQstore [8] dst (MOVQload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+
+(Move [32] dst src mem) =>
+ (Move [16]
+ (OffPtr <dst.Type> dst [16])
+ (OffPtr <src.Type> src [16])
+ (Move [16] dst src mem))
+
+(Move [48] dst src mem) && config.useSSE =>
+ (Move [32]
+ (OffPtr <dst.Type> dst [16])
+ (OffPtr <src.Type> src [16])
+ (Move [16] dst src mem))
+
+(Move [64] dst src mem) && config.useSSE =>
+ (Move [32]
+ (OffPtr <dst.Type> dst [32])
+ (OffPtr <src.Type> src [32])
+ (Move [32] dst src mem))
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVLstore [3] dst (MOVLload [3] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [9] dst src mem) =>
+ (MOVBstore [8] dst (MOVBload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [10] dst src mem) =>
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [12] dst src mem) =>
+ (MOVLstore [8] dst (MOVLload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [s] dst src mem) && s == 11 || s >= 13 && s <= 15 =>
+ (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+
+// Adjust moves to be a multiple of 16 bytes.
+(Move [s] dst src mem)
+ && s > 16 && s%16 != 0 && s%16 <= 8 =>
+ (Move [s-s%16]
+ (OffPtr <dst.Type> dst [s%16])
+ (OffPtr <src.Type> src [s%16])
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [s] dst src mem)
+ && s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE =>
+ (Move [s-s%16]
+ (OffPtr <dst.Type> dst [s%16])
+ (OffPtr <src.Type> src [s%16])
+ (MOVOstore dst (MOVOload src mem) mem))
+(Move [s] dst src mem)
+ && s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE =>
+ (Move [s-s%16]
+ (OffPtr <dst.Type> dst [s%16])
+ (OffPtr <src.Type> src [s%16])
+ (MOVQstore [8] dst (MOVQload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem)))
+
+// Medium copying uses a duff device.
+(Move [s] dst src mem)
+ && s > 64 && s <= 16*64 && s%16 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [s] dst src mem)
+
+// Large copying uses REP MOVSQ.
+(Move [s] dst src mem) && (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s) =>
+ (REPMOVSQ dst src (MOVQconst [s/8]) mem)
+
+// Lowering Zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff32(0,0)] destptr mem)
+(Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem)
+(Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)
+(Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)
+
+(Zero [3] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff32(0,2)] destptr
+ (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem))
+(Zero [5] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff32(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+(Zero [6] destptr mem) =>
+ (MOVWstoreconst [makeValAndOff32(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+(Zero [7] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff32(0,3)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+
+// Strip off any fractional word zeroing.
+(Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE =>
+ (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
+ (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
+
+// Zero small numbers of words directly.
+(Zero [16] destptr mem) && !config.useSSE =>
+ (MOVQstoreconst [makeValAndOff32(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
+(Zero [24] destptr mem) && !config.useSSE =>
+ (MOVQstoreconst [makeValAndOff32(0,16)] destptr
+ (MOVQstoreconst [makeValAndOff32(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)))
+(Zero [32] destptr mem) && !config.useSSE =>
+ (MOVQstoreconst [makeValAndOff32(0,24)] destptr
+ (MOVQstoreconst [makeValAndOff32(0,16)] destptr
+ (MOVQstoreconst [makeValAndOff32(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))))
+
+(Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE =>
+ (MOVQstoreconst [makeValAndOff32(0,int32(s-8))] destptr
+ (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
+
+// Adjust zeros to be a multiple of 16 bytes.
+(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE =>
+ (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
+ (MOVOstore destptr (MOVOconst [0]) mem))
+
+(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE =>
+ (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
+ (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
+
+(Zero [16] destptr mem) && config.useSSE =>
+ (MOVOstore destptr (MOVOconst [0]) mem)
+(Zero [32] destptr mem) && config.useSSE =>
+ (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
+ (MOVOstore destptr (MOVOconst [0]) mem))
+(Zero [48] destptr mem) && config.useSSE =>
+ (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0])
+ (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
+ (MOVOstore destptr (MOVOconst [0]) mem)))
+(Zero [64] destptr mem) && config.useSSE =>
+ (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0])
+ (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0])
+ (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
+ (MOVOstore destptr (MOVOconst [0]) mem))))
+
+// Medium zeroing uses a duff device.
+(Zero [s] destptr mem)
+ && s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [s] destptr (MOVOconst [0]) mem)
+
+// Large zeroing uses REP STOSQ.
+(Zero [s] destptr mem)
+ && (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32))
+ && s%8 == 0 =>
+ (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
+
+// Lowering constants
+(Const8 [c]) => (MOVLconst [int32(c)])
+(Const16 [c]) => (MOVLconst [int32(c)])
+(Const32 ...) => (MOVLconst ...)
+(Const64 ...) => (MOVQconst ...)
+(Const32F ...) => (MOVSSconst ...)
+(Const64F ...) => (MOVSDconst ...)
+(ConstNil ) => (MOVQconst [0])
+(ConstBool [c]) => (MOVLconst [b2i32(c)])
+
+// Lowering calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+
+// Lowering conditional moves
+// If the condition is a SETxx, we can just run a CMOV from the comparison that was
+// setting the flags.
+// Legend: HI=unsigned ABOVE, CS=unsigned BELOW, CC=unsigned ABOVE EQUAL, LS=unsigned BELOW EQUAL
+(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && (is64BitInt(t) || isPtr(t))
+ => (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
+(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is32BitInt(t)
+ => (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
+(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is16BitInt(t)
+ => (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
+
+// If the condition does not set the flags, we need to generate a comparison.
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 1
+ => (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 2
+ => (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 4
+ => (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
+
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
+ => (CMOVQNE y x (CMPQconst [0] check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
+ => (CMOVLNE y x (CMPQconst [0] check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
+ => (CMOVWNE y x (CMPQconst [0] check))
+
+// Absorb InvertFlags
+(CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
+ => (CMOVQ(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
+(CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
+ => (CMOVL(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
+(CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
+ => (CMOVW(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
+
+// Absorb constants generated during lower
+(CMOV(QEQ|QLE|QGE|QCC|QLS|LEQ|LLE|LGE|LCC|LLS|WEQ|WLE|WGE|WCC|WLS) _ x (FlagEQ)) => x
+(CMOV(QNE|QLT|QGT|QCS|QHI|LNE|LLT|LGT|LCS|LHI|WNE|WLT|WGT|WCS|WHI) y _ (FlagEQ)) => y
+(CMOV(QNE|QGT|QGE|QHI|QCC|LNE|LGT|LGE|LHI|LCC|WNE|WGT|WGE|WHI|WCC) _ x (FlagGT_UGT)) => x
+(CMOV(QEQ|QLE|QLT|QLS|QCS|LEQ|LLE|LLT|LLS|LCS|WEQ|WLE|WLT|WLS|WCS) y _ (FlagGT_UGT)) => y
+(CMOV(QNE|QGT|QGE|QLS|QCS|LNE|LGT|LGE|LLS|LCS|WNE|WGT|WGE|WLS|WCS) _ x (FlagGT_ULT)) => x
+(CMOV(QEQ|QLE|QLT|QHI|QCC|LEQ|LLE|LLT|LHI|LCC|WEQ|WLE|WLT|WHI|WCC) y _ (FlagGT_ULT)) => y
+(CMOV(QNE|QLT|QLE|QCS|QLS|LNE|LLT|LLE|LCS|LLS|WNE|WLT|WLE|WCS|WLS) _ x (FlagLT_ULT)) => x
+(CMOV(QEQ|QGT|QGE|QHI|QCC|LEQ|LGT|LGE|LHI|LCC|WEQ|WGT|WGE|WHI|WCC) y _ (FlagLT_ULT)) => y
+(CMOV(QNE|QLT|QLE|QHI|QCC|LNE|LLT|LLE|LHI|LCC|WNE|WLT|WLE|WHI|WCC) _ x (FlagLT_UGT)) => x
+(CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) => y
+
+// Miscellaneous
+(IsNonNil p) => (SETNE (TESTQ p p))
+(IsInBounds idx len) => (SETB (CMPQ idx len))
+(IsSliceInBounds idx len) => (SETBE (CMPQ idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetG ...) => (LoweredGetG ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+
+(HasCPUFeature {s}) => (SETNE (CMPQconst [0] (LoweredHasCPUFeature {s})))
+(Addr {sym} base) => (LEAQ {sym} base)
+(LocalAddr {sym} base _) => (LEAQ {sym} base)
+
+(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 => (SETLstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 => (SETLEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 => (SETGstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 => (SETGEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 => (SETEQstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 => (SETNEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 => (SETBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 => (SETBEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 => (SETAstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 => (SETAEstore [off] {sym} ptr x mem)
+
+// block rewrites
+(If (SETL cmp) yes no) => (LT cmp yes no)
+(If (SETLE cmp) yes no) => (LE cmp yes no)
+(If (SETG cmp) yes no) => (GT cmp yes no)
+(If (SETGE cmp) yes no) => (GE cmp yes no)
+(If (SETEQ cmp) yes no) => (EQ cmp yes no)
+(If (SETNE cmp) yes no) => (NE cmp yes no)
+(If (SETB cmp) yes no) => (ULT cmp yes no)
+(If (SETBE cmp) yes no) => (ULE cmp yes no)
+(If (SETA cmp) yes no) => (UGT cmp yes no)
+(If (SETAE cmp) yes no) => (UGE cmp yes no)
+(If (SETO cmp) yes no) => (OS cmp yes no)
+
+// Special case for floating point - LF/LEF not generated
+(If (SETGF cmp) yes no) => (UGT cmp yes no)
+(If (SETGEF cmp) yes no) => (UGE cmp yes no)
+(If (SETEQF cmp) yes no) => (EQF cmp yes no)
+(If (SETNEF cmp) yes no) => (NEF cmp yes no)
+
+(If cond yes no) => (NE (TESTB cond cond) yes no)
+
+// Atomic loads. Other than preserving their ordering with respect to other loads, nothing special here.
+(AtomicLoad8 ptr mem) => (MOVBatomicload ptr mem)
+(AtomicLoad32 ptr mem) => (MOVLatomicload ptr mem)
+(AtomicLoad64 ptr mem) => (MOVQatomicload ptr mem)
+(AtomicLoadPtr ptr mem) => (MOVQatomicload ptr mem)
+
+// Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load.
+// TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those?
+(AtomicStore8 ptr val mem) => (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
+(AtomicStore32 ptr val mem) => (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
+(AtomicStore64 ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
+(AtomicStorePtrNoWB ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
+
+// Atomic exchanges.
+(AtomicExchange32 ptr val mem) => (XCHGL val ptr mem)
+(AtomicExchange64 ptr val mem) => (XCHGQ val ptr mem)
+
+// Atomic adds.
+(AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (XADDLlock val ptr mem))
+(AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (XADDQlock val ptr mem))
+(Select0 <t> (AddTupleFirst32 val tuple)) => (ADDL val (Select0 <t> tuple))
+(Select1 (AddTupleFirst32 _ tuple)) => (Select1 tuple)
+(Select0 <t> (AddTupleFirst64 val tuple)) => (ADDQ val (Select0 <t> tuple))
+(Select1 (AddTupleFirst64 _ tuple)) => (Select1 tuple)
+
+// Atomic compare and swap.
+(AtomicCompareAndSwap32 ptr old new_ mem) => (CMPXCHGLlock ptr old new_ mem)
+(AtomicCompareAndSwap64 ptr old new_ mem) => (CMPXCHGQlock ptr old new_ mem)
+
+// Atomic memory updates.
+(AtomicAnd8 ptr val mem) => (ANDBlock ptr val mem)
+(AtomicAnd32 ptr val mem) => (ANDLlock ptr val mem)
+(AtomicOr8 ptr val mem) => (ORBlock ptr val mem)
+(AtomicOr32 ptr val mem) => (ORLlock ptr val mem)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// ***************************
+// Above: lowering rules
+// Below: optimizations
+// ***************************
+// TODO: Should the optimizations be a separate pass?
+
+// Fold boolean tests into blocks
+(NE (TESTB (SETL cmp) (SETL cmp)) yes no) => (LT cmp yes no)
+(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE cmp yes no)
+(NE (TESTB (SETG cmp) (SETG cmp)) yes no) => (GT cmp yes no)
+(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE cmp yes no)
+(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ cmp yes no)
+(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE cmp yes no)
+(NE (TESTB (SETB cmp) (SETB cmp)) yes no) => (ULT cmp yes no)
+(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no)
+(NE (TESTB (SETA cmp) (SETA cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no)
+
+// Unsigned comparisons to 0/1
+(ULT (TEST(Q|L|W|B) x x) yes no) => (First no yes)
+(UGE (TEST(Q|L|W|B) x x) yes no) => (First yes no)
+(SETB (TEST(Q|L|W|B) x x)) => (ConstBool [false])
+(SETAE (TEST(Q|L|W|B) x x)) => (ConstBool [true])
+
+// x & 1 != 0 -> x & 1
+(SETNE (TEST(B|W)const [1] x)) => (AND(L|L)const [1] x)
+(SETB (BT(L|Q)const [0] x)) => (AND(L|Q)const [1] x)
+
+// Recognize bit tests: a&(1<<b) != 0 for b suitably bounded
+// Note that BTx instructions use the carry bit, so we need to convert tests for zero flag
+// into tests for carry flags.
+// ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis
+// mutandis, for UGE and SETAE, and CC and SETCC.
+((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y))
+((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y))
+((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
+ => ((ULT|UGE) (BTLconst [int8(log32(c))] x))
+((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
+ => ((ULT|UGE) (BTQconst [int8(log32(c))] x))
+((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
+ => ((ULT|UGE) (BTQconst [int8(log64(c))] x))
+(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y))
+(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y))
+(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
+ => (SET(B|AE) (BTLconst [int8(log32(c))] x))
+(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
+ => (SET(B|AE) (BTQconst [int8(log32(c))] x))
+(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
+ => (SET(B|AE) (BTQconst [int8(log64(c))] x))
+// SET..store variant
+(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
+ => (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
+ => (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(int64(c))
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c))
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c)
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+
+// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
+// and further combining shifts.
+(BT(Q|L)const [c] (SHRQconst [d] x)) && (c+d)<64 => (BTQconst [c+d] x)
+(BT(Q|L)const [c] (SHLQconst [d] x)) && c>d => (BT(Q|L)const [c-d] x)
+(BT(Q|L)const [0] s:(SHRQ x y)) => (BTQ y x)
+(BTLconst [c] (SHRLconst [d] x)) && (c+d)<32 => (BTLconst [c+d] x)
+(BTLconst [c] (SHLLconst [d] x)) && c>d => (BTLconst [c-d] x)
+(BTLconst [0] s:(SHRL x y)) => (BTL y x)
+
+// Rewrite a & 1 != 1 into a & 1 == 0.
+// Among other things, this lets us turn (a>>b)&1 != 1 into a bit test.
+(SET(NE|EQ) (CMPLconst [1] s:(ANDLconst [1] _))) => (SET(EQ|NE) (CMPLconst [0] s))
+(SET(NE|EQ)store [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPLconst [0] s) mem)
+(SET(NE|EQ) (CMPQconst [1] s:(ANDQconst [1] _))) => (SET(EQ|NE) (CMPQconst [0] s))
+(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem)
+
+// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
+(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y)
+(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
+(ORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) <t> x) mem) =>
+ (BTSLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+(ORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) <t> x) mem) =>
+ (BTSQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+(XORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) <t> x) mem) =>
+ (BTCLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+(XORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) <t> x) mem) =>
+ (BTCQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+
+// Convert ORconst into BTS, if the code gets smaller, with boundary being
+// (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes).
+((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
+ => (BT(S|C)Qconst [int8(log32(c))] x)
+((ORL|XORL)const [c] x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
+ => (BT(S|C)Lconst [int8(log32(c))] x)
+((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
+ => (BT(S|C)Qconst [int8(log64(c))] x)
+((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
+ => (BT(S|C)Lconst [int8(log32(c))] x)
+
+// Recognize bit clearing: a &^= 1<<b
+(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y)
+(ANDQconst [c] x) && isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128
+ => (BTRQconst [int8(log32(^c))] x)
+(ANDLconst [c] x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
+ => (BTRLconst [int8(log32(^c))] x)
+(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
+ => (BTRQconst [int8(log64(^c))] x)
+(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
+ => (BTRLconst [int8(log32(^c))] x)
+(ANDLmodify [off] {sym} ptr (NOTL s:(SHLL (MOVLconst [1]) <t> x)) mem) =>
+ (BTRLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+(ANDQmodify [off] {sym} ptr (NOTQ s:(SHLQ (MOVQconst [1]) <t> x)) mem) =>
+ (BTRQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+
+// Special-case bit patterns on first/last bit.
+// generic.rules changes ANDs of high-part/low-part masks into a couple of shifts,
+// for instance:
+// x & 0xFFFF0000 -> (x >> 16) << 16
+// x & 0x80000000 -> (x >> 31) << 31
+//
+// In case the mask is just one bit (like second example above), it conflicts
+// with the above rules to detect bit-testing / bit-clearing of first/last bit.
+// We thus special-case them, by detecting the shift patterns.
+
+// Special case resetting first/last bit
+(SHL(L|Q)const [1] (SHR(L|Q)const [1] x))
+ => (BTR(L|Q)const [0] x)
+(SHRLconst [1] (SHLLconst [1] x))
+ => (BTRLconst [31] x)
+(SHRQconst [1] (SHLQconst [1] x))
+ => (BTRQconst [63] x)
+
+// Special case testing first/last bit (with double-shift generated by generic.rules)
+((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
+((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTQconst [31] x))
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
+
+((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTQconst [0] x))
+((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTLconst [0] x))
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem)
+
+// Special-case manually testing last bit with "a>>63 != 0" (without "&1")
+((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
+((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTLconst [31] x))
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
+
+// Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)
+(BTS(Q|L)const [c] (BTR(Q|L)const [c] x)) => (BTS(Q|L)const [c] x)
+(BTS(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTS(Q|L)const [c] x)
+(BTR(Q|L)const [c] (BTS(Q|L)const [c] x)) => (BTR(Q|L)const [c] x)
+(BTR(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTR(Q|L)const [c] x)
+
+// Fold boolean negation into SETcc.
+(XORLconst [1] (SETNE x)) => (SETEQ x)
+(XORLconst [1] (SETEQ x)) => (SETNE x)
+(XORLconst [1] (SETL x)) => (SETGE x)
+(XORLconst [1] (SETGE x)) => (SETL x)
+(XORLconst [1] (SETLE x)) => (SETG x)
+(XORLconst [1] (SETG x)) => (SETLE x)
+(XORLconst [1] (SETB x)) => (SETAE x)
+(XORLconst [1] (SETAE x)) => (SETB x)
+(XORLconst [1] (SETBE x)) => (SETA x)
+(XORLconst [1] (SETA x)) => (SETBE x)
+
+// Special case for floating point - LF/LEF not generated
+(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF cmp yes no)
+(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no)
+
+// Disabled because it interferes with the pattern match above and makes worse code.
+// (SETNEF x) => (ORQ (SETNE <typ.Int8> x) (SETNAN <typ.Int8> x))
+// (SETEQF x) => (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x))
+
+// fold constants into instructions
+(ADDQ x (MOVQconst [c])) && is32Bit(c) => (ADDQconst [int32(c)] x)
+(ADDQ x (MOVLconst [c])) => (ADDQconst [c] x)
+(ADDL x (MOVLconst [c])) => (ADDLconst [c] x)
+
+(SUBQ x (MOVQconst [c])) && is32Bit(c) => (SUBQconst x [int32(c)])
+(SUBQ (MOVQconst [c]) x) && is32Bit(c) => (NEGQ (SUBQconst <v.Type> x [int32(c)]))
+(SUBL x (MOVLconst [c])) => (SUBLconst x [c])
+(SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst <v.Type> x [c]))
+
+(MULQ x (MOVQconst [c])) && is32Bit(c) => (MULQconst [int32(c)] x)
+(MULL x (MOVLconst [c])) => (MULLconst [c] x)
+
+(ANDQ x (MOVQconst [c])) && is32Bit(c) => (ANDQconst [int32(c)] x)
+(ANDL x (MOVLconst [c])) => (ANDLconst [c] x)
+
+(AND(L|Q)const [c] (AND(L|Q)const [d] x)) => (AND(L|Q)const [c & d] x)
+(XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) => (XOR(L|Q)const [c ^ d] x)
+(OR(L|Q)const [c] (OR(L|Q)const [d] x)) => (OR(L|Q)const [c | d] x)
+
+(BTRLconst [c] (ANDLconst [d] x)) => (ANDLconst [d &^ (1<<uint32(c))] x)
+(ANDLconst [c] (BTRLconst [d] x)) => (ANDLconst [c &^ (1<<uint32(d))] x)
+(BTRLconst [c] (BTRLconst [d] x)) => (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
+
+(BTCLconst [c] (XORLconst [d] x)) => (XORLconst [d ^ 1<<uint32(c)] x)
+(XORLconst [c] (BTCLconst [d] x)) => (XORLconst [c ^ 1<<uint32(d)] x)
+(BTCLconst [c] (BTCLconst [d] x)) => (XORLconst [1<<uint32(c) | 1<<uint32(d)] x)
+
+(BTSLconst [c] (ORLconst [d] x)) => (ORLconst [d | 1<<uint32(c)] x)
+(ORLconst [c] (BTSLconst [d] x)) => (ORLconst [c | 1<<uint32(d)] x)
+(BTSLconst [c] (BTSLconst [d] x)) => (ORLconst [1<<uint32(c) | 1<<uint32(d)] x)
+
+(BTRQconst [c] (ANDQconst [d] x)) && is32Bit(int64(d) &^ (1<<uint32(c))) => (ANDQconst [d &^ (1<<uint32(c))] x)
+(ANDQconst [c] (BTRQconst [d] x)) && is32Bit(int64(c) &^ (1<<uint32(d))) => (ANDQconst [c &^ (1<<uint32(d))] x)
+(BTRQconst [c] (BTRQconst [d] x)) && is32Bit(^(1<<uint32(c) | 1<<uint32(d))) => (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
+
+(BTCQconst [c] (XORQconst [d] x)) && is32Bit(int64(d) ^ 1<<uint32(c)) => (XORQconst [d ^ 1<<uint32(c)] x)
+(XORQconst [c] (BTCQconst [d] x)) && is32Bit(int64(c) ^ 1<<uint32(d)) => (XORQconst [c ^ 1<<uint32(d)] x)
+(BTCQconst [c] (BTCQconst [d] x)) && is32Bit(1<<uint32(c) ^ 1<<uint32(d)) => (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
+
+(BTSQconst [c] (ORQconst [d] x)) && is32Bit(int64(d) | 1<<uint32(c)) => (ORQconst [d | 1<<uint32(c)] x)
+(ORQconst [c] (BTSQconst [d] x)) && is32Bit(int64(c) | 1<<uint32(d)) => (ORQconst [c | 1<<uint32(d)] x)
+(BTSQconst [c] (BTSQconst [d] x)) && is32Bit(1<<uint32(c) | 1<<uint32(d)) => (ORQconst [1<<uint32(c) | 1<<uint32(d)] x)
+
+
+(MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x)
+(MULQconst [c] (MULQconst [d] x)) && is32Bit(int64(c)*int64(d)) => (MULQconst [c * d] x)
+
+(ORQ x (MOVQconst [c])) && is32Bit(c) => (ORQconst [int32(c)] x)
+(ORQ x (MOVLconst [c])) => (ORQconst [c] x)
+(ORL x (MOVLconst [c])) => (ORLconst [c] x)
+
+(XORQ x (MOVQconst [c])) && is32Bit(c) => (XORQconst [int32(c)] x)
+(XORL x (MOVLconst [c])) => (XORLconst [c] x)
+
+(SHLQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x)
+(SHLL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x)
+
+(SHRQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x)
+(SHRL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x)
+(SHRW x (MOV(Q|L)const [c])) && c&31 < 16 => (SHRWconst [int8(c&31)] x)
+(SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 => (MOVLconst [0])
+(SHRB x (MOV(Q|L)const [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x)
+(SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 => (MOVLconst [0])
+
+(SARQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x)
+(SARL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x)
+(SARW x (MOV(Q|L)const [c])) => (SARWconst [int8(min(int64(c)&31,15))] x)
+(SARB x (MOV(Q|L)const [c])) => (SARBconst [int8(min(int64(c)&31,7))] x)
+
+
+// Operations which don't affect the low 6/5 bits of the shift amount are NOPs.
+((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
+((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
+
+((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
+((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
+
+((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
+((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
+
+((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGL <t> y))
+((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGL <t> y))
+
+// Constant rotate instructions
+((ADDQ|ORQ|XORQ) (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c => (ROLQconst x [c])
+((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c => (ROLLconst x [c])
+
+((ADDL|ORL|XORL) <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 => (ROLWconst x [c])
+((ADDL|ORL|XORL) <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 => (ROLBconst x [c])
+
+(ROLQconst [c] (ROLQconst [d] x)) => (ROLQconst [(c+d)&63] x)
+(ROLLconst [c] (ROLLconst [d] x)) => (ROLLconst [(c+d)&31] x)
+(ROLWconst [c] (ROLWconst [d] x)) => (ROLWconst [(c+d)&15] x)
+(ROLBconst [c] (ROLBconst [d] x)) => (ROLBconst [(c+d)& 7] x)
+
+(RotateLeft8 ...) => (ROLB ...)
+(RotateLeft16 ...) => (ROLW ...)
+(RotateLeft32 ...) => (ROLL ...)
+(RotateLeft64 ...) => (ROLQ ...)
+
+// Non-constant rotates.
+// We want to issue a rotate when the Go source contains code like
+// y &= 63
+// x << y | x >> (64-y)
+// The shift rules above convert << to SHLx and >> to SHRx.
+// SHRx converts its shift argument from 64-y to -y.
+// A tricky situation occurs when y==0. Then the original code would be:
+// x << 0 | x >> 64
+// But x >> 64 is 0, not x. So there's an additional mask that is ANDed in
+// to force the second term to 0. We don't need that mask, but we must match
+// it in order to strip it out.
+(ORQ (SHLQ x y) (ANDQ (SHRQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (ROLQ x y)
+(ORQ (SHRQ x y) (ANDQ (SHLQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (RORQ x y)
+
+(ORL (SHLL x y) (ANDL (SHRL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (ROLL x y)
+(ORL (SHRL x y) (ANDL (SHLL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (RORL x y)
+
+// Help with rotate detection
+(CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) => (FlagLT_ULT)
+(CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) => (FlagLT_ULT)
+
+(ORL (SHLL x (AND(Q|L)const y [15]))
+ (ANDL (SHRW x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])))
+ (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])) [16]))))
+ && v.Type.Size() == 2
+ => (ROLW x y)
+(ORL (SHRW x (AND(Q|L)const y [15]))
+ (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16]))))
+ && v.Type.Size() == 2
+ => (RORW x y)
+
+(ORL (SHLL x (AND(Q|L)const y [ 7]))
+ (ANDL (SHRB x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])))
+ (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])) [ 8]))))
+ && v.Type.Size() == 1
+ => (ROLB x y)
+(ORL (SHRB x (AND(Q|L)const y [ 7]))
+ (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8]))))
+ && v.Type.Size() == 1
+ => (RORB x y)
+
+// rotate left negative = rotate right
+(ROLQ x (NEG(Q|L) y)) => (RORQ x y)
+(ROLL x (NEG(Q|L) y)) => (RORL x y)
+(ROLW x (NEG(Q|L) y)) => (RORW x y)
+(ROLB x (NEG(Q|L) y)) => (RORB x y)
+
+// rotate right negative = rotate left
+(RORQ x (NEG(Q|L) y)) => (ROLQ x y)
+(RORL x (NEG(Q|L) y)) => (ROLL x y)
+(RORW x (NEG(Q|L) y)) => (ROLW x y)
+(RORB x (NEG(Q|L) y)) => (ROLB x y)
+
+// rotate by constants
+(ROLQ x (MOV(Q|L)const [c])) => (ROLQconst [int8(c&63)] x)
+(ROLL x (MOV(Q|L)const [c])) => (ROLLconst [int8(c&31)] x)
+(ROLW x (MOV(Q|L)const [c])) => (ROLWconst [int8(c&15)] x)
+(ROLB x (MOV(Q|L)const [c])) => (ROLBconst [int8(c&7) ] x)
+
+(RORQ x (MOV(Q|L)const [c])) => (ROLQconst [int8((-c)&63)] x)
+(RORL x (MOV(Q|L)const [c])) => (ROLLconst [int8((-c)&31)] x)
+(RORW x (MOV(Q|L)const [c])) => (ROLWconst [int8((-c)&15)] x)
+(RORB x (MOV(Q|L)const [c])) => (ROLBconst [int8((-c)&7) ] x)
+
+// Constant shift simplifications
+((SHLQ|SHRQ|SARQ)const x [0]) => x
+((SHLL|SHRL|SARL)const x [0]) => x
+((SHRW|SARW)const x [0]) => x
+((SHRB|SARB)const x [0]) => x
+((ROLQ|ROLL|ROLW|ROLB)const x [0]) => x
+
+// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
+// because the x86 instructions are defined to use all 5 bits of the shift even
+// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
+// (SHRW x (MOVLconst [24])), but just in case.
+
+(CMPQ x (MOVQconst [c])) && is32Bit(c) => (CMPQconst x [int32(c)])
+(CMPQ (MOVQconst [c]) x) && is32Bit(c) => (InvertFlags (CMPQconst x [int32(c)]))
+(CMPL x (MOVLconst [c])) => (CMPLconst x [c])
+(CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c]))
+(CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)])
+(CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)]))
+(CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)])
+(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+(CMP(Q|L|W|B) x y) && x.ID > y.ID => (InvertFlags (CMP(Q|L|W|B) y x))
+
+// Using MOVZX instead of AND is cheaper.
+(AND(Q|L)const [ 0xFF] x) => (MOVBQZX x)
+(AND(Q|L)const [0xFFFF] x) => (MOVWQZX x)
+// This rule is currently invalid because 0xFFFFFFFF is not representable by a signed int32.
+// Commenting out for now, because it also can't trigger because of the is32bit guard on the
+// ANDQconst lowering-rule, above, prevents 0xFFFFFFFF from matching (for the same reason)
+// Using an alternate form of this rule segfaults some binaries because of
+// adverse interactions with other passes.
+// (ANDQconst [0xFFFFFFFF] x) => (MOVLQZX x)
+
+// strength reduction
+// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
+// 1 - addq, shlq, leaq, negq, subq
+// 3 - imulq
+// This limits the rewrites to two instructions.
+// Note that negq always operates in-place,
+// which can require a register-register move
+// to preserve the original value,
+// so it must be used with care.
+(MUL(Q|L)const [-9] x) => (NEG(Q|L) (LEA(Q|L)8 <v.Type> x x))
+(MUL(Q|L)const [-5] x) => (NEG(Q|L) (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [-3] x) => (NEG(Q|L) (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [-1] x) => (NEG(Q|L) x)
+(MUL(Q|L)const [ 0] _) => (MOV(Q|L)const [0])
+(MUL(Q|L)const [ 1] x) => x
+(MUL(Q|L)const [ 3] x) => (LEA(Q|L)2 x x)
+(MUL(Q|L)const [ 5] x) => (LEA(Q|L)4 x x)
+(MUL(Q|L)const [ 7] x) => (LEA(Q|L)2 x (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [ 9] x) => (LEA(Q|L)8 x x)
+(MUL(Q|L)const [11] x) => (LEA(Q|L)2 x (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [13] x) => (LEA(Q|L)4 x (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [19] x) => (LEA(Q|L)2 x (LEA(Q|L)8 <v.Type> x x))
+(MUL(Q|L)const [21] x) => (LEA(Q|L)4 x (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [25] x) => (LEA(Q|L)8 x (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [27] x) => (LEA(Q|L)8 (LEA(Q|L)2 <v.Type> x x) (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [37] x) => (LEA(Q|L)4 x (LEA(Q|L)8 <v.Type> x x))
+(MUL(Q|L)const [41] x) => (LEA(Q|L)8 x (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [45] x) => (LEA(Q|L)8 (LEA(Q|L)4 <v.Type> x x) (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [73] x) => (LEA(Q|L)8 x (LEA(Q|L)8 <v.Type> x x))
+(MUL(Q|L)const [81] x) => (LEA(Q|L)8 (LEA(Q|L)8 <v.Type> x x) (LEA(Q|L)8 <v.Type> x x))
+
+(MUL(Q|L)const [c] x) && isPowerOfTwo64(int64(c)+1) && c >= 15 => (SUB(Q|L) (SHL(Q|L)const <v.Type> [int8(log64(int64(c)+1))] x) x)
+(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEA(Q|L)1 (SHL(Q|L)const <v.Type> [int8(log32(c-1))] x) x)
+(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEA(Q|L)2 (SHL(Q|L)const <v.Type> [int8(log32(c-2))] x) x)
+(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEA(Q|L)4 (SHL(Q|L)const <v.Type> [int8(log32(c-4))] x) x)
+(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-8) && c >= 136 => (LEA(Q|L)8 (SHL(Q|L)const <v.Type> [int8(log32(c-8))] x) x)
+(MUL(Q|L)const [c] x) && c%3 == 0 && isPowerOfTwo32(c/3) => (SHL(Q|L)const [int8(log32(c/3))] (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [c] x) && c%5 == 0 && isPowerOfTwo32(c/5) => (SHL(Q|L)const [int8(log32(c/5))] (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [c] x) && c%9 == 0 && isPowerOfTwo32(c/9) => (SHL(Q|L)const [int8(log32(c/9))] (LEA(Q|L)8 <v.Type> x x))
+
+// combine add/shift into LEAQ/LEAL
+(ADD(L|Q) x (SHL(L|Q)const [3] y)) => (LEA(L|Q)8 x y)
+(ADD(L|Q) x (SHL(L|Q)const [2] y)) => (LEA(L|Q)4 x y)
+(ADD(L|Q) x (SHL(L|Q)const [1] y)) => (LEA(L|Q)2 x y)
+(ADD(L|Q) x (ADD(L|Q) y y)) => (LEA(L|Q)2 x y)
+(ADD(L|Q) x (ADD(L|Q) x y)) => (LEA(L|Q)2 y x)
+
+// combine ADDQ/ADDQconst into LEAQ1/LEAL1
+(ADD(Q|L)const [c] (ADD(Q|L) x y)) => (LEA(Q|L)1 [c] x y)
+(ADD(Q|L) (ADD(Q|L)const [c] x) y) => (LEA(Q|L)1 [c] x y)
+(ADD(Q|L)const [c] (SHL(Q|L)const [1] x)) => (LEA(Q|L)1 [c] x x)
+
+// fold ADDQ/ADDL into LEAQ/LEAL
+(ADD(Q|L)const [c] (LEA(Q|L) [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
+(LEA(Q|L) [c] {s} (ADD(Q|L)const [d] x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
+(LEA(Q|L) [c] {s} (ADD(Q|L) x y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
+(ADD(Q|L) x (LEA(Q|L) [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
+
+// fold ADDQconst/ADDLconst into LEAQx/LEALx
+(ADD(Q|L)const [c] (LEA(Q|L)1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)1 [c+d] {s} x y)
+(ADD(Q|L)const [c] (LEA(Q|L)2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)2 [c+d] {s} x y)
+(ADD(Q|L)const [c] (LEA(Q|L)4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)4 [c+d] {s} x y)
+(ADD(Q|L)const [c] (LEA(Q|L)8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)8 [c+d] {s} x y)
+(LEA(Q|L)1 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)1 [c+d] {s} x y)
+(LEA(Q|L)2 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)2 [c+d] {s} x y)
+(LEA(Q|L)2 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEA(Q|L)2 [c+2*d] {s} x y)
+(LEA(Q|L)4 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)4 [c+d] {s} x y)
+(LEA(Q|L)4 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEA(Q|L)4 [c+4*d] {s} x y)
+(LEA(Q|L)8 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)8 [c+d] {s} x y)
+(LEA(Q|L)8 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEA(Q|L)8 [c+8*d] {s} x y)
+
+// fold shifts into LEAQx/LEALx
+(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)2 [c] {s} x y)
+(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)4 [c] {s} x y)
+(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [3] y)) => (LEA(Q|L)8 [c] {s} x y)
+(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)4 [c] {s} x y)
+(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)8 [c] {s} x y)
+(LEA(Q|L)4 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)8 [c] {s} x y)
+
+// reverse ordering of compare instruction
+(SETL (InvertFlags x)) => (SETG x)
+(SETG (InvertFlags x)) => (SETL x)
+(SETB (InvertFlags x)) => (SETA x)
+(SETA (InvertFlags x)) => (SETB x)
+(SETLE (InvertFlags x)) => (SETGE x)
+(SETGE (InvertFlags x)) => (SETLE x)
+(SETBE (InvertFlags x)) => (SETAE x)
+(SETAE (InvertFlags x)) => (SETBE x)
+(SETEQ (InvertFlags x)) => (SETEQ x)
+(SETNE (InvertFlags x)) => (SETNE x)
+
+(SETLstore [off] {sym} ptr (InvertFlags x) mem) => (SETGstore [off] {sym} ptr x mem)
+(SETGstore [off] {sym} ptr (InvertFlags x) mem) => (SETLstore [off] {sym} ptr x mem)
+(SETBstore [off] {sym} ptr (InvertFlags x) mem) => (SETAstore [off] {sym} ptr x mem)
+(SETAstore [off] {sym} ptr (InvertFlags x) mem) => (SETBstore [off] {sym} ptr x mem)
+(SETLEstore [off] {sym} ptr (InvertFlags x) mem) => (SETGEstore [off] {sym} ptr x mem)
+(SETGEstore [off] {sym} ptr (InvertFlags x) mem) => (SETLEstore [off] {sym} ptr x mem)
+(SETBEstore [off] {sym} ptr (InvertFlags x) mem) => (SETAEstore [off] {sym} ptr x mem)
+(SETAEstore [off] {sym} ptr (InvertFlags x) mem) => (SETBEstore [off] {sym} ptr x mem)
+(SETEQstore [off] {sym} ptr (InvertFlags x) mem) => (SETEQstore [off] {sym} ptr x mem)
+(SETNEstore [off] {sym} ptr (InvertFlags x) mem) => (SETNEstore [off] {sym} ptr x mem)
+
+// sign extended loads
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+(MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+(MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+(MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+(MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+(MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+(MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+(MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+(MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+(MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+(MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
+(MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
+(MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
+(MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
+
+(MOVLQZX x) && zeroUpper32Bits(x,3) => x
+(MOVWQZX x) && zeroUpper48Bits(x,3) => x
+(MOVBQZX x) && zeroUpper56Bits(x,3) => x
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQZX x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQZX x)
+(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQZX x)
+(MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQSX x)
+(MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQSX x)
+(MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQSX x)
+
+// Fold extensions and ANDs together.
+(MOVBQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x)
+(MOVWQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x)
+(MOVLQZX (ANDLconst [c] x)) => (ANDLconst [c] x)
+(MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x)
+(MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x)
+(MOVLQSX (ANDLconst [c] x)) && uint32(c) & 0x80000000 == 0 => (ANDLconst [c & 0x7fffffff] x)
+
+// Don't extend before storing
+(MOVLstore [off] {sym} ptr (MOVLQSX x) mem) => (MOVLstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWQSX x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBQSX x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVLstore [off] {sym} ptr (MOVLQZX x) mem) => (MOVLstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWQZX x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBQZX x) mem) => (MOVBstore [off] {sym} ptr x mem)
+
+// fold constants into memory operations
+// Note that this is not always a good idea because if not all the uses of
+// the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
+// have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
+// Nevertheless, let's do it!
+(MOV(Q|L|W|B|SS|SD|O)load [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(Q|L|W|B|SS|SD|O)load [off1+off2] {sym} ptr mem)
+(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {sym} ptr val mem)
+(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {sym} base val mem)
+((ADD|SUB|AND|OR|XOR)Qload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {sym} val base mem)
+((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
+(CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (CMP(Q|L|W|B)load [off1+off2] {sym} base val mem)
+(CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
+ (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+
+((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
+((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
+((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {sym} base val mem)
+((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {sym} base val mem)
+
+// Fold constants into stores.
+(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validVal(c) =>
+ (MOVQstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem)
+(MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
+ (MOVLstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
+ (MOVWstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
+ (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem)
+
+// Fold address offsets into constant stores.
+(MOV(Q|L|W|B)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd32(off) =>
+ (MOV(Q|L|W|B)storeconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+
+// We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
+// what variables are being read/written by the ops.
+(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOV(Q|L|W|B)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) =>
+ (MOV(Q|L|W|B)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+((ADD|SUB|AND|OR|XOR)Qload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+(CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
+ (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+
+((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+// fold LEAQs together
+(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
+
+// LEAQ into LEAQ1
+(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAQ1 into LEAQ
+(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAQ into LEAQ[248]
+(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAQ[248] into LEAQ
+(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAQ[1248] into LEAQ[1248]. Only some such merges are possible.
+(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+(LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil =>
+ (LEAQ4 [off1+2*off2] {sym1} x y)
+(LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil =>
+ (LEAQ8 [off1+4*off2] {sym1} x y)
+// TODO: more?
+
+// Lower LEAQ2/4/8 when the offset is a constant
+(LEAQ2 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*2) =>
+ (LEAQ [off+int32(scale)*2] {sym} x)
+(LEAQ4 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*4) =>
+ (LEAQ [off+int32(scale)*4] {sym} x)
+(LEAQ8 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*8) =>
+ (LEAQ [off+int32(scale)*8] {sym} x)
+
+// Absorb InvertFlags into branches.
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+
+// Constant comparisons.
+(CMPQconst (MOVQconst [x]) [y]) && x==int64(y) => (FlagEQ)
+(CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)<uint64(int64(y)) => (FlagLT_ULT)
+(CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)>uint64(int64(y)) => (FlagLT_UGT)
+(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)<uint64(int64(y)) => (FlagGT_ULT)
+(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)>uint64(int64(y)) => (FlagGT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)<uint32(y) => (FlagLT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)>uint32(y) => (FlagLT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)<uint32(y) => (FlagGT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)<uint16(y) => (FlagLT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)>uint16(y) => (FlagLT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)<uint16(y) => (FlagGT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)<uint8(y) => (FlagLT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)>uint8(y) => (FlagLT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)<uint8(y) => (FlagGT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT)
+
+// CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts.
+// In theory this applies to any of the simplifications above,
+// but CMPQ is the only one I've actually seen occur.
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y => (FlagEQ)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)<uint64(y) => (FlagLT_ULT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)>uint64(y) => (FlagLT_UGT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)<uint64(y) => (FlagGT_ULT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) => (FlagGT_UGT)
+
+// Other known comparisons.
+(CMPQconst (MOVBQZX _) [c]) && 0xFF < c => (FlagLT_ULT)
+(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c => (FlagLT_ULT)
+(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) => (FlagLT_ULT)
+(CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) => (FlagLT_ULT)
+(CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT)
+(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < n => (FlagLT_ULT)
+
+// TESTQ c c sets flags like CMPQ c 0.
+(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ)
+(TESTLconst [c] (MOVLconst [c])) && c == 0 => (FlagEQ)
+(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c < 0 => (FlagLT_UGT)
+(TESTLconst [c] (MOVLconst [c])) && c < 0 => (FlagLT_UGT)
+(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c > 0 => (FlagGT_UGT)
+(TESTLconst [c] (MOVLconst [c])) && c > 0 => (FlagGT_UGT)
+
+// TODO: DIVxU also.
+
+// Absorb flag constants into SBB ops.
+(SBBQcarrymask (FlagEQ)) => (MOVQconst [0])
+(SBBQcarrymask (FlagLT_ULT)) => (MOVQconst [-1])
+(SBBQcarrymask (FlagLT_UGT)) => (MOVQconst [0])
+(SBBQcarrymask (FlagGT_ULT)) => (MOVQconst [-1])
+(SBBQcarrymask (FlagGT_UGT)) => (MOVQconst [0])
+(SBBLcarrymask (FlagEQ)) => (MOVLconst [0])
+(SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1])
+(SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0])
+(SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1])
+(SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0])
+
+// Absorb flag constants into branches.
+((EQ|LE|GE|ULE|UGE) (FlagEQ) yes no) => (First yes no)
+((NE|LT|GT|ULT|UGT) (FlagEQ) yes no) => (First no yes)
+((NE|LT|LE|ULT|ULE) (FlagLT_ULT) yes no) => (First yes no)
+((EQ|GT|GE|UGT|UGE) (FlagLT_ULT) yes no) => (First no yes)
+((NE|LT|LE|UGT|UGE) (FlagLT_UGT) yes no) => (First yes no)
+((EQ|GT|GE|ULT|ULE) (FlagLT_UGT) yes no) => (First no yes)
+((NE|GT|GE|ULT|ULE) (FlagGT_ULT) yes no) => (First yes no)
+((EQ|LT|LE|UGT|UGE) (FlagGT_ULT) yes no) => (First no yes)
+((NE|GT|GE|UGT|UGE) (FlagGT_UGT) yes no) => (First yes no)
+((EQ|LT|LE|ULT|ULE) (FlagGT_UGT) yes no) => (First no yes)
+
+// Absorb flag constants into SETxx ops.
+((SETEQ|SETLE|SETGE|SETBE|SETAE) (FlagEQ)) => (MOVLconst [1])
+((SETNE|SETL|SETG|SETB|SETA) (FlagEQ)) => (MOVLconst [0])
+((SETNE|SETL|SETLE|SETB|SETBE) (FlagLT_ULT)) => (MOVLconst [1])
+((SETEQ|SETG|SETGE|SETA|SETAE) (FlagLT_ULT)) => (MOVLconst [0])
+((SETNE|SETL|SETLE|SETA|SETAE) (FlagLT_UGT)) => (MOVLconst [1])
+((SETEQ|SETG|SETGE|SETB|SETBE) (FlagLT_UGT)) => (MOVLconst [0])
+((SETNE|SETG|SETGE|SETB|SETBE) (FlagGT_ULT)) => (MOVLconst [1])
+((SETEQ|SETL|SETLE|SETA|SETAE) (FlagGT_ULT)) => (MOVLconst [0])
+((SETNE|SETG|SETGE|SETA|SETAE) (FlagGT_UGT)) => (MOVLconst [1])
+((SETEQ|SETL|SETLE|SETB|SETBE) (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETEQstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETNEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+(SETLstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETLstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETLstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETLEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETGstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETGstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+(SETGEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+(SETBstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETBstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETBstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETBEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETAstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETAstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+(SETAEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+// Remove redundant *const ops
+(ADDQconst [0] x) => x
+(ADDLconst [c] x) && c==0 => x
+(SUBQconst [0] x) => x
+(SUBLconst [c] x) && c==0 => x
+(ANDQconst [0] _) => (MOVQconst [0])
+(ANDLconst [c] _) && c==0 => (MOVLconst [0])
+(ANDQconst [-1] x) => x
+(ANDLconst [c] x) && c==-1 => x
+(ORQconst [0] x) => x
+(ORLconst [c] x) && c==0 => x
+(ORQconst [-1] _) => (MOVQconst [-1])
+(ORLconst [c] _) && c==-1 => (MOVLconst [-1])
+(XORQconst [0] x) => x
+(XORLconst [c] x) && c==0 => x
+// TODO: since we got rid of the W/B versions, we might miss
+// things like (ANDLconst [0x100] x) which were formerly
+// (ANDBconst [0] x). Probably doesn't happen very often.
+// If we cared, we might do:
+// (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
+
+// Remove redundant ops
+// Not in generic rules, because they may appear after lowering e. g. Slicemask
+(NEG(Q|L) (NEG(Q|L) x)) => x
+(NEG(Q|L) s:(SUB(Q|L) x y)) && s.Uses == 1 => (SUB(Q|L) y x)
+
+// Convert constant subtracts to constant adds
+(SUBQconst [c] x) && c != -(1<<31) => (ADDQconst [-c] x)
+(SUBLconst [c] x) => (ADDLconst [-c] x)
+
+// generic constant folding
+// TODO: more of this
+(ADDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)+d])
+(ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d])
+(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDQconst [c+d] x)
+(ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x)
+(SUBQconst (MOVQconst [d]) [c]) => (MOVQconst [d-int64(c)])
+(SUBQconst (SUBQconst x [d]) [c]) && is32Bit(int64(-c)-int64(d)) => (ADDQconst [-c-d] x)
+(SARQconst [c] (MOVQconst [d])) => (MOVQconst [d>>uint64(c)])
+(SARLconst [c] (MOVQconst [d])) => (MOVQconst [int64(int32(d))>>uint64(c)])
+(SARWconst [c] (MOVQconst [d])) => (MOVQconst [int64(int16(d))>>uint64(c)])
+(SARBconst [c] (MOVQconst [d])) => (MOVQconst [int64(int8(d))>>uint64(c)])
+(NEGQ (MOVQconst [c])) => (MOVQconst [-c])
+(NEGL (MOVLconst [c])) => (MOVLconst [-c])
+(MULQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)*d])
+(MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d])
+(ANDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)&d])
+(ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d])
+(ORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)|d])
+(ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d])
+(XORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)^d])
+(XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d])
+(NOTQ (MOVQconst [c])) => (MOVQconst [^c])
+(NOTL (MOVLconst [c])) => (MOVLconst [^c])
+(BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1<<uint32(c))])
+(BTSLconst [c] (MOVLconst [d])) => (MOVLconst [d|(1<<uint32(c))])
+(BTRQconst [c] (MOVQconst [d])) => (MOVQconst [d&^(1<<uint32(c))])
+(BTRLconst [c] (MOVLconst [d])) => (MOVLconst [d&^(1<<uint32(c))])
+(BTCQconst [c] (MOVQconst [d])) => (MOVQconst [d^(1<<uint32(c))])
+(BTCLconst [c] (MOVLconst [d])) => (MOVLconst [d^(1<<uint32(c))])
+
+// If c or d doesn't fit into 32 bits, then we can't construct ORQconst,
+// but we can still constant-fold.
+// In theory this applies to any of the simplifications above,
+// but ORQ is the only one I've actually seen occur.
+(ORQ (MOVQconst [c]) (MOVQconst [d])) => (MOVQconst [c|d])
+
+// generic simplifications
+// TODO: more of this
+(ADDQ x (NEGQ y)) => (SUBQ x y)
+(ADDL x (NEGL y)) => (SUBL x y)
+(SUBQ x x) => (MOVQconst [0])
+(SUBL x x) => (MOVLconst [0])
+(ANDQ x x) => x
+(ANDL x x) => x
+(ORQ x x) => x
+(ORL x x) => x
+(XORQ x x) => (MOVQconst [0])
+(XORL x x) => (MOVLconst [0])
+
+(SHLLconst [d] (MOVLconst [c])) => (MOVLconst [c << uint64(d)])
+(SHLQconst [d] (MOVQconst [c])) => (MOVQconst [c << uint64(d)])
+(SHLQconst [d] (MOVLconst [c])) => (MOVQconst [int64(c) << uint64(d)])
+
+// Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range.
+(NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) => (ADDQconst [-c] x)
+(MULQconst [c] (NEGQ x)) && c != -(1<<31) => (MULQconst [-c] x)
+
+// checking AND against 0.
+(CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y)
+(CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTL x y)
+(CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTW x y)
+(CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTB x y)
+(CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 => (TESTQconst [c] x)
+(CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTLconst [c] x)
+(CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTWconst [int16(c)] x)
+(CMPBconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTBconst [int8(c)] x)
+
+// Convert TESTx to TESTxconst if possible.
+(TESTQ (MOVQconst [c]) x) && is32Bit(c) => (TESTQconst [int32(c)] x)
+(TESTL (MOVLconst [c]) x) => (TESTLconst [c] x)
+(TESTW (MOVLconst [c]) x) => (TESTWconst [int16(c)] x)
+(TESTB (MOVLconst [c]) x) => (TESTBconst [int8(c)] x)
+
+// TEST %reg,%reg is shorter than CMP
+(CMPQconst x [0]) => (TESTQ x x)
+(CMPLconst x [0]) => (TESTL x x)
+(CMPWconst x [0]) => (TESTW x x)
+(CMPBconst x [0]) => (TESTB x x)
+(TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst => (TESTQ x x)
+(TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTL x x)
+(TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTW x x)
+(TESTBconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTB x x)
+
+// Convert LEAQ1 back to ADDQ if we can
+(LEAQ1 [0] x y) && v.Aux == nil => (ADDQ x y)
+
+// Combining byte loads into larger (unaligned) loads.
+// There are many ways these combinations could occur. This is
+// designed to match the way encoding/binary.LittleEndian does it.
+
+// Little-endian loads
+
+(OR(L|Q) x0:(MOVBload [i0] {s} p mem)
+ sh:(SHL(L|Q)const [8] x1:(MOVBload [i1] {s} p mem)))
+ && i1 == i0+1
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
+
+(OR(L|Q) x0:(MOVBload [i] {s} p0 mem)
+ sh:(SHL(L|Q)const [8] x1:(MOVBload [i] {s} p1 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
+
+(OR(L|Q) x0:(MOVWload [i0] {s} p mem)
+ sh:(SHL(L|Q)const [16] x1:(MOVWload [i1] {s} p mem)))
+ && i1 == i0+2
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
+
+(OR(L|Q) x0:(MOVWload [i] {s} p0 mem)
+ sh:(SHL(L|Q)const [16] x1:(MOVWload [i] {s} p1 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
+
+(ORQ x0:(MOVLload [i0] {s} p mem)
+ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)))
+ && i1 == i0+4
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
+
+(ORQ x0:(MOVLload [i] {s} p0 mem)
+ sh:(SHLQconst [32] x1:(MOVLload [i] {s} p1 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && sequentialAddresses(p0, p1, 4)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVQload [i] {s} p0 mem)
+
+(OR(L|Q)
+ s1:(SHL(L|Q)const [j1] x1:(MOVBload [i1] {s} p mem))
+ or:(OR(L|Q)
+ s0:(SHL(L|Q)const [j0] x0:(MOVBload [i0] {s} p mem))
+ y))
+ && i1 == i0+1
+ && j1 == j0+8
+ && j0 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
+
+(OR(L|Q)
+ s1:(SHL(L|Q)const [j1] x1:(MOVBload [i] {s} p1 mem))
+ or:(OR(L|Q)
+ s0:(SHL(L|Q)const [j0] x0:(MOVBload [i] {s} p0 mem))
+ y))
+ && j1 == j0+8
+ && j0 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
+
+(ORQ
+ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))
+ or:(ORQ
+ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))
+ y))
+ && i1 == i0+2
+ && j1 == j0+16
+ && j0 % 32 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
+
+(ORQ
+ s1:(SHLQconst [j1] x1:(MOVWload [i] {s} p1 mem))
+ or:(ORQ
+ s0:(SHLQconst [j0] x0:(MOVWload [i] {s} p0 mem))
+ y))
+ && j1 == j0+16
+ && j0 % 32 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i] {s} p0 mem)) y)
+
+// Big-endian loads
+
+(OR(L|Q)
+ x1:(MOVBload [i1] {s} p mem)
+ sh:(SHL(L|Q)const [8] x0:(MOVBload [i0] {s} p mem)))
+ && i1 == i0+1
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
+
+(OR(L|Q)
+ x1:(MOVBload [i] {s} p1 mem)
+ sh:(SHL(L|Q)const [8] x0:(MOVBload [i] {s} p0 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
+
+(OR(L|Q)
+ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))
+ sh:(SHL(L|Q)const [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
+ && i1 == i0+2
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
+
+(OR(L|Q)
+ r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem))
+ sh:(SHL(L|Q)const [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
+
+(ORQ
+ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))
+ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))))
+ && i1 == i0+4
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
+
+(ORQ
+ r1:(BSWAPL x1:(MOVLload [i] {s} p1 mem))
+ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i] {s} p0 mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && sequentialAddresses(p0, p1, 4)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i] {s} p0 mem))
+
+(OR(L|Q)
+ s0:(SHL(L|Q)const [j0] x0:(MOVBload [i0] {s} p mem))
+ or:(OR(L|Q)
+ s1:(SHL(L|Q)const [j1] x1:(MOVBload [i1] {s} p mem))
+ y))
+ && i1 == i0+1
+ && j1 == j0-8
+ && j1 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+
+(OR(L|Q)
+ s0:(SHL(L|Q)const [j0] x0:(MOVBload [i] {s} p0 mem))
+ or:(OR(L|Q)
+ s1:(SHL(L|Q)const [j1] x1:(MOVBload [i] {s} p1 mem))
+ y))
+ && j1 == j0-8
+ && j1 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
+
+(ORQ
+ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))
+ or:(ORQ
+ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))
+ y))
+ && i1 == i0+2
+ && j1 == j0-16
+ && j1 % 32 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, r0, r1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
+
+(ORQ
+ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem)))
+ or:(ORQ
+ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)))
+ y))
+ && j1 == j0-16
+ && j1 % 32 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, r0, r1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i] {s} p0 mem))) y)
+
+// Combine 2 byte stores + shift into rolw 8 + word store
+(MOVBstore [i] {s} p w
+ x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
+ && x0.Uses == 1
+ && clobber(x0)
+ => (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
+(MOVBstore [i] {s} p1 w
+ x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem))
+ && x0.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x0)
+ => (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem)
+
+// Combine stores + shifts into bswap and larger (unaligned) stores
+(MOVBstore [i] {s} p w
+ x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w)
+ x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w)
+ x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
+(MOVBstore [i] {s} p3 w
+ x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w)
+ x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w)
+ x0:(MOVBstore [i] {s} p0 (SHRLconst [24] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && sequentialAddresses(p1, p2, 1)
+ && sequentialAddresses(p2, p3, 1)
+ && clobber(x0, x1, x2)
+ => (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem)
+
+(MOVBstore [i] {s} p w
+ x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w)
+ x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w)
+ x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w)
+ x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w)
+ x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w)
+ x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w)
+ x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && clobber(x0, x1, x2, x3, x4, x5, x6)
+ => (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
+(MOVBstore [i] {s} p7 w
+ x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w)
+ x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w)
+ x4:(MOVBstore [i] {s} p4 (SHRQconst [24] w)
+ x3:(MOVBstore [i] {s} p3 (SHRQconst [32] w)
+ x2:(MOVBstore [i] {s} p2 (SHRQconst [40] w)
+ x1:(MOVBstore [i] {s} p1 (SHRQconst [48] w)
+ x0:(MOVBstore [i] {s} p0 (SHRQconst [56] w) mem))))))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && sequentialAddresses(p1, p2, 1)
+ && sequentialAddresses(p2, p3, 1)
+ && sequentialAddresses(p3, p4, 1)
+ && sequentialAddresses(p4, p5, 1)
+ && sequentialAddresses(p5, p6, 1)
+ && sequentialAddresses(p6, p7, 1)
+ && clobber(x0, x1, x2, x3, x4, x5, x6)
+ => (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem)
+
+// Combine constant stores into larger (unaligned) stores.
+(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 1 == c.Off()
+ && clobber(x)
+ => (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 1 == c.Off()
+ && clobber(x)
+ => (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 2 == c.Off()
+ && clobber(x)
+ => (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 2 == c.Off()
+ && clobber(x)
+ => (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+(MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 4 == c.Off()
+ && clobber(x)
+ => (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem)
+(MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 4 == c.Off()
+ && clobber(x)
+ => (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem)
+(MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
+ && config.useSSE
+ && x.Uses == 1
+ && c2.Off() + 8 == c.Off()
+ && c.Val() == 0
+ && c2.Val() == 0
+ && clobber(x)
+ => (MOVOstore [c2.Off32()] {s} p (MOVOconst [0]) mem)
+
+// Combine stores into larger (unaligned) stores. Little endian.
+(MOVBstore [i] {s} p (SHR(W|L|Q)const [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHR(W|L|Q)const [8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i] {s} p w mem)
+(MOVBstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVBstore [i-1] {s} p w0:(SHR(L|Q)const [j-8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-1] {s} p w0 mem)
+(MOVBstore [i] {s} p1 (SHR(W|L|Q)const [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstore [i] {s} p0 w mem)
+(MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHR(W|L|Q)const [8] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstore [i] {s} p0 w mem)
+(MOVBstore [i] {s} p1 (SHR(L|Q)const [j] w) x:(MOVBstore [i] {s} p0 w0:(SHR(L|Q)const [j-8] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstore [i] {s} p0 w0 mem)
+
+(MOVWstore [i] {s} p (SHR(L|Q)const [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVLstore [i-2] {s} p w mem)
+(MOVWstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVWstore [i-2] {s} p w0:(SHR(L|Q)const [j-16] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVLstore [i-2] {s} p w0 mem)
+(MOVWstore [i] {s} p1 (SHR(L|Q)const [16] w) x:(MOVWstore [i] {s} p0 w mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && clobber(x)
+ => (MOVLstore [i] {s} p0 w mem)
+(MOVWstore [i] {s} p1 (SHR(L|Q)const [j] w) x:(MOVWstore [i] {s} p0 w0:(SHR(L|Q)const [j-16] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && clobber(x)
+ => (MOVLstore [i] {s} p0 w0 mem)
+
+(MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVQstore [i-4] {s} p w mem)
+(MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVQstore [i-4] {s} p w0 mem)
+(MOVLstore [i] {s} p1 (SHRQconst [32] w) x:(MOVLstore [i] {s} p0 w mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 4)
+ && clobber(x)
+ => (MOVQstore [i] {s} p0 w mem)
+(MOVLstore [i] {s} p1 (SHRQconst [j] w) x:(MOVLstore [i] {s} p0 w0:(SHRQconst [j-32] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 4)
+ && clobber(x)
+ => (MOVQstore [i] {s} p0 w0 mem)
+
+(MOVBstore [i] {s} p
+ x1:(MOVBload [j] {s2} p2 mem)
+ mem2:(MOVBstore [i-1] {s} p
+ x2:(MOVBload [j-1] {s2} p2 mem) mem))
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && mem2.Uses == 1
+ && clobber(x1, x2, mem2)
+ => (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
+
+(MOVWstore [i] {s} p
+ x1:(MOVWload [j] {s2} p2 mem)
+ mem2:(MOVWstore [i-2] {s} p
+ x2:(MOVWload [j-2] {s2} p2 mem) mem))
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && mem2.Uses == 1
+ && clobber(x1, x2, mem2)
+ => (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
+
+(MOVLstore [i] {s} p
+ x1:(MOVLload [j] {s2} p2 mem)
+ mem2:(MOVLstore [i-4] {s} p
+ x2:(MOVLload [j-4] {s2} p2 mem) mem))
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && mem2.Uses == 1
+ && clobber(x1, x2, mem2)
+ => (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
+
+(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+(MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVQstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVLstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+
+(MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVQload [off1+off2] {sym} ptr mem)
+(MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVLload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVWload [off1+off2] {sym} ptr mem)
+(MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVBload [off1+off2] {sym} ptr mem)
+(MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (MOVQstore [off1+off2] {sym} ptr val mem)
+(MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (MOVLstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) =>
+ (MOVQstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) =>
+ (MOVLstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) =>
+ (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) =>
+ (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
+
+// Merge load and op
+// TODO: add indexed variants?
+((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem)
+((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
+(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((BTC|BTR|BTS)L l:(MOVLload [off] {sym} ptr mem) <t> x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((BTC|BTR|BTS)Lmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
+(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
+(MOVQstore {sym} [off] ptr y:((BTC|BTR|BTS)Q l:(MOVQload [off] {sym} ptr mem) <t> x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((BTC|BTR|BTS)Qmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+
+// Merge ADDQconst and LEAQ into atomic loads.
+(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem)
+(MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
+
+// Merge ADDQconst and LEAQ into atomic stores.
+(XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (XCHGQ [off1+off2] {sym} val ptr mem)
+(XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
+ (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+(XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (XCHGL [off1+off2] {sym} val ptr mem)
+(XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
+ (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+
+// Merge ADDQconst into atomic adds.
+// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
+(XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (XADDQlock [off1+off2] {sym} val ptr mem)
+(XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (XADDLlock [off1+off2] {sym} val ptr mem)
+
+// Merge ADDQconst into atomic compare and swaps.
+// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
+(CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
+(CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
+
+// We don't need the conditional move if we know the arg of BSF is not zero.
+(CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) && c != 0 => x
+// Extension is unnecessary for trailing zeros.
+(BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) => (BSFQ (ORQconst <t> [1<<8] x))
+(BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) => (BSFQ (ORQconst <t> [1<<16] x))
+
+// Redundant sign/zero extensions
+// Note: see issue 21963. We have to make sure we use the right type on
+// the resulting extension (the outer type, not the inner type).
+(MOVLQSX (MOVLQSX x)) => (MOVLQSX x)
+(MOVLQSX (MOVWQSX x)) => (MOVWQSX x)
+(MOVLQSX (MOVBQSX x)) => (MOVBQSX x)
+(MOVWQSX (MOVWQSX x)) => (MOVWQSX x)
+(MOVWQSX (MOVBQSX x)) => (MOVBQSX x)
+(MOVBQSX (MOVBQSX x)) => (MOVBQSX x)
+(MOVLQZX (MOVLQZX x)) => (MOVLQZX x)
+(MOVLQZX (MOVWQZX x)) => (MOVWQZX x)
+(MOVLQZX (MOVBQZX x)) => (MOVBQZX x)
+(MOVWQZX (MOVWQZX x)) => (MOVWQZX x)
+(MOVWQZX (MOVBQZX x)) => (MOVBQZX x)
+(MOVBQZX (MOVBQZX x)) => (MOVBQZX x)
+
+(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) =>
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) =>
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+
+// float <-> int register moves, with no conversion.
+// These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
+(MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) => (MOVQf2i val)
+(MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) => (MOVLf2i val)
+(MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) => (MOVQi2f val)
+(MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) => (MOVLi2f val)
+
+// Other load-like ops.
+(ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ADDQ x (MOVQf2i y))
+(ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ADDL x (MOVLf2i y))
+(SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (SUBQ x (MOVQf2i y))
+(SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (SUBL x (MOVLf2i y))
+(ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ANDQ x (MOVQf2i y))
+(ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ANDL x (MOVLf2i y))
+( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => ( ORQ x (MOVQf2i y))
+( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => ( ORL x (MOVLf2i y))
+(XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (XORQ x (MOVQf2i y))
+(XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (XORL x (MOVLf2i y))
+
+(ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (ADDSD x (MOVQi2f y))
+(ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (ADDSS x (MOVLi2f y))
+(SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (SUBSD x (MOVQi2f y))
+(SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (SUBSS x (MOVLi2f y))
+(MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (MULSD x (MOVQi2f y))
+(MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (MULSS x (MOVLi2f y))
+
+// Redirect stores to use the other register set.
+(MOVQstore [off] {sym} ptr (MOVQf2i val) mem) => (MOVSDstore [off] {sym} ptr val mem)
+(MOVLstore [off] {sym} ptr (MOVLf2i val) mem) => (MOVSSstore [off] {sym} ptr val mem)
+(MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) => (MOVQstore [off] {sym} ptr val mem)
+(MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) => (MOVLstore [off] {sym} ptr val mem)
+
+// Load args directly into the register class where it will be used.
+// We do this by just modifying the type of the Arg.
+(MOVQf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+(MOVLf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+(MOVQi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+(MOVLi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+
+// LEAQ is rematerializeable, so this helps to avoid register spill.
+// See issue 22947 for details
+(ADD(Q|L)const [off] x:(SP)) => (LEA(Q|L) [off] x)
+
+// HMULx is commutative, but its first argument must go in AX.
+// If possible, put a rematerializeable value in the first argument slot,
+// to reduce the odds that another value will be have to spilled
+// specifically to free up AX.
+(HMUL(Q|L) x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L) y x)
+(HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)U y x)
+
+// Fold loads into compares
+// Note: these may be undone by the flagalloc pass.
+(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(Q|L|W|B)load {sym} [off] ptr x mem)
+(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem))
+
+(CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c])
+ && l.Uses == 1
+ && clobber(l) =>
+@l.Block (CMP(Q|L)constload {sym} [makeValAndOff32(c,off)] ptr mem)
+(CMP(W|B)const l:(MOV(W|B)load {sym} [off] ptr mem) [c])
+ && l.Uses == 1
+ && clobber(l) =>
+@l.Block (CMP(W|B)constload {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+
+(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validValAndOff(c,int64(off)) => (CMPQconstload {sym} [makeValAndOff64(c,int64(off))] ptr mem)
+(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(c),int64(off)) => (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem)
+(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),int64(off)) => (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem)
+(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),int64(off)) => (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem)
+
+(TEST(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2)
+ && l == l2
+ && l.Uses == 2
+ && validValAndOff(0, int64(off))
+ && clobber(l) =>
+ @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
+
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) =>
+ (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))])
+ (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
new file mode 100644
index 0000000..a87581b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -0,0 +1,946 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - Floating-point types live in the low natural slot of an sse2 register.
+// Unused portions are junk.
+// - We do not use AH,BH,CH,DH registers.
+// - When doing sub-register operations, we try to write the whole
+// destination register to avoid a partial-register write.
+// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
+// filled by sign-extending the used portion. Users of AuxInt which interpret
+// AuxInt as unsigned (e.g. shifts) must be careful.
+// - All SymOff opcodes require their offset to fit in an int32.
+
+// Suffixes encode the bit width of various instructions.
+// Q (quad word) = 64 bit
+// L (long word) = 32 bit
+// W (word) = 16 bit
+// B (byte) = 8 bit
+
+// copied from ../../amd64/reg.go
+var regNamesAMD64 = []string{
+ "AX",
+ "CX",
+ "DX",
+ "BX",
+ "SP",
+ "BP",
+ "SI",
+ "DI",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "X0",
+ "X1",
+ "X2",
+ "X3",
+ "X4",
+ "X5",
+ "X6",
+ "X7",
+ "X8",
+ "X9",
+ "X10",
+ "X11",
+ "X12",
+ "X13",
+ "X14",
+ "X15",
+
+ // If you add registers, update asyncPreempt in runtime
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesAMD64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesAMD64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ ax = buildReg("AX")
+ cx = buildReg("CX")
+ dx = buildReg("DX")
+ bx = buildReg("BX")
+ gp = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15")
+ fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15")
+ gpsp = gp | buildReg("SP")
+ gpspsb = gpsp | buildReg("SB")
+ callerSave = gp | fp
+ )
+ // Common slices of register masks
+ var (
+ gponly = []regMask{gp}
+ fponly = []regMask{fp}
+ )
+
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: gponly}
+ gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
+ gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly}
+ gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
+ gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly}
+ gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}}
+ gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax, dx}}
+ gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax}
+ gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
+ gp2flags1flags = regInfo{inputs: []regMask{gp, gp, 0}, outputs: []regMask{gp, 0}}
+
+ gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}}
+ gp1flags = regInfo{inputs: []regMask{gpsp}}
+ gp0flagsLoad = regInfo{inputs: []regMask{gpspsb, 0}}
+ gp1flagsLoad = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+ gp2flagsLoad = regInfo{inputs: []regMask{gpspsb, gpsp, gpsp, 0}}
+ flagsgp = regInfo{inputs: nil, outputs: gponly}
+
+ gp11flags = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}}
+ gp1flags1flags = regInfo{inputs: []regMask{gp, 0}, outputs: []regMask{gp, 0}}
+
+ readflags = regInfo{inputs: nil, outputs: gponly}
+ flagsgpax = regInfo{inputs: nil, clobbers: ax, outputs: []regMask{gp &^ ax}}
+
+ gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly}
+ gp21load = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: gponly}
+ gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly}
+ gp21loadidx = regInfo{inputs: []regMask{gp, gpspsb, gpsp, 0}, outputs: gponly}
+ gp21pax = regInfo{inputs: []regMask{gp &^ ax, gp}, outputs: []regMask{gp &^ ax}, clobbers: ax}
+
+ gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+ gpstoreconst = regInfo{inputs: []regMask{gpspsb, 0}}
+ gpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, gpsp, 0}}
+ gpstoreconstidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+ gpstorexchg = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: []regMask{gp}}
+ cmpxchg = regInfo{inputs: []regMask{gp, ax, gp, 0}, outputs: []regMask{gp, 0}, clobbers: ax}
+
+ fp01 = regInfo{inputs: nil, outputs: fponly}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly}
+ fp21load = regInfo{inputs: []regMask{fp, gpspsb, 0}, outputs: fponly}
+ fp21loadidx = regInfo{inputs: []regMask{fp, gpspsb, gpspsb, 0}, outputs: fponly}
+ fpgp = regInfo{inputs: fponly, outputs: gponly}
+ gpfp = regInfo{inputs: gponly, outputs: fponly}
+ fp11 = regInfo{inputs: fponly, outputs: fponly}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+
+ fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: fponly}
+ fploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: fponly}
+
+ fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}}
+ fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}}
+ )
+
+ var AMD64ops = []opData{
+ // fp ops
+ {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true}, // fp32 add
+ {name: "ADDSD", argLength: 2, reg: fp21, asm: "ADDSD", commutative: true, resultInArg0: true}, // fp64 add
+ {name: "SUBSS", argLength: 2, reg: fp21, asm: "SUBSS", resultInArg0: true}, // fp32 sub
+ {name: "SUBSD", argLength: 2, reg: fp21, asm: "SUBSD", resultInArg0: true}, // fp64 sub
+ {name: "MULSS", argLength: 2, reg: fp21, asm: "MULSS", commutative: true, resultInArg0: true}, // fp32 mul
+ {name: "MULSD", argLength: 2, reg: fp21, asm: "MULSD", commutative: true, resultInArg0: true}, // fp64 mul
+ {name: "DIVSS", argLength: 2, reg: fp21, asm: "DIVSS", resultInArg0: true}, // fp32 div
+ {name: "DIVSD", argLength: 2, reg: fp21, asm: "DIVSD", resultInArg0: true}, // fp64 div
+
+ {name: "MOVSSload", argLength: 2, reg: fpload, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp32 load
+ {name: "MOVSDload", argLength: 2, reg: fpload, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp64 load
+ {name: "MOVSSconst", reg: fp01, asm: "MOVSS", aux: "Float32", rematerializeable: true}, // fp32 constant
+ {name: "MOVSDconst", reg: fp01, asm: "MOVSD", aux: "Float64", rematerializeable: true}, // fp64 constant
+ {name: "MOVSSloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSS", scale: 1, aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by i
+ {name: "MOVSSloadidx4", argLength: 3, reg: fploadidx, asm: "MOVSS", scale: 4, aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by 4*i
+ {name: "MOVSDloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSD", scale: 1, aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by i
+ {name: "MOVSDloadidx8", argLength: 3, reg: fploadidx, asm: "MOVSD", scale: 8, aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by 8*i
+
+ {name: "MOVSSstore", argLength: 3, reg: fpstore, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp32 store
+ {name: "MOVSDstore", argLength: 3, reg: fpstore, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp64 store
+ {name: "MOVSSstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSS", scale: 1, aux: "SymOff", symEffect: "Write"}, // fp32 indexed by i store
+ {name: "MOVSSstoreidx4", argLength: 4, reg: fpstoreidx, asm: "MOVSS", scale: 4, aux: "SymOff", symEffect: "Write"}, // fp32 indexed by 4i store
+ {name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", scale: 1, aux: "SymOff", symEffect: "Write"}, // fp64 indexed by i store
+ {name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", scale: 8, aux: "SymOff", symEffect: "Write"}, // fp64 indexed by 8i store
+
+ {name: "ADDSSload", argLength: 3, reg: fp21load, asm: "ADDSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ADDSDload", argLength: 3, reg: fp21load, asm: "ADDSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBSSload", argLength: 3, reg: fp21load, asm: "SUBSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBSDload", argLength: 3, reg: fp21load, asm: "SUBSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULSSload", argLength: 3, reg: fp21load, asm: "MULSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULSDload", argLength: 3, reg: fp21load, asm: "MULSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "DIVSSload", argLength: 3, reg: fp21load, asm: "DIVSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "DIVSDload", argLength: 3, reg: fp21load, asm: "DIVSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+
+ {name: "ADDSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "ADDSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "ADDSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "ADDSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "ADDSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "ADDSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "ADDSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "ADDSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "SUBSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "SUBSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "SUBSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "SUBSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "SUBSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "SUBSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "SUBSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "SUBSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "MULSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "MULSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "MULSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "MULSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "MULSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "MULSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "MULSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "MULSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "DIVSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "DIVSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 / tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "DIVSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "DIVSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 / tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "DIVSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "DIVSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 / tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "DIVSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "DIVSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 / tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+
+ // binary ops
+ {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ADDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
+ {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
+ {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+ {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+
+ {name: "MULQ", argLength: 2, reg: gp21, asm: "IMULQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMUL3Q", aux: "Int32", clobberFlags: true}, // arg0 * auxint
+ {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true}, // arg0 * auxint
+
+ {name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt32,Flags)", asm: "MULL", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 32x32->64 unsigned multiply). Returns uint32(x), and flags set to overflow if uint32(x) != x.
+ {name: "MULQU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt64,Flags)", asm: "MULQ", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 64x64->128 unsigned multiply). Returns uint64(x), and flags set to overflow if uint64(x) != x.
+
+ // HMULx[U] are intentionally not marked as commutative, even though they are.
+ // This is because they have asymmetric register requirements.
+ // There are rewrite rules to try to place arguments in preferable slots.
+ {name: "HMULQ", argLength: 2, reg: gp21hmul, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULL", argLength: 2, reg: gp21hmul, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULQU", argLength: 2, reg: gp21hmul, asm: "MULQ", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULLU", argLength: 2, reg: gp21hmul, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width
+
+ {name: "AVGQU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 64 result bits
+
+ // For DIVQ, DIVL and DIVW, AuxInt non-zero means that the divisor has been proved to be not -1.
+ {name: "DIVQ", argLength: 2, reg: gp11div, typ: "(Int64,Int64)", asm: "IDIVQ", aux: "Bool", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVL", argLength: 2, reg: gp11div, typ: "(Int32,Int32)", asm: "IDIVL", aux: "Bool", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVW", argLength: 2, reg: gp11div, typ: "(Int16,Int16)", asm: "IDIVW", aux: "Bool", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+
+ {name: "DIVQU", argLength: 2, reg: gp11div, typ: "(UInt64,UInt64)", asm: "DIVQ", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVLU", argLength: 2, reg: gp11div, typ: "(UInt32,UInt32)", asm: "DIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVWU", argLength: 2, reg: gp11div, typ: "(UInt16,UInt16)", asm: "DIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+
+ {name: "NEGLflags", argLength: 1, reg: gp11flags, typ: "(UInt32,Flags)", asm: "NEGL", resultInArg0: true}, // -arg0, flags set for 0-arg0.
+ // The following 4 add opcodes return the low 64 bits of the sum in the first result and
+ // the carry (the 65th bit) in the carry flag.
+ {name: "ADDQcarry", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "ADDQ", commutative: true, resultInArg0: true}, // r = arg0+arg1
+ {name: "ADCQ", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "ADCQ", commutative: true, resultInArg0: true}, // r = arg0+arg1+carry(arg2)
+ {name: "ADDQconstcarry", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "ADDQ", aux: "Int32", resultInArg0: true}, // r = arg0+auxint
+ {name: "ADCQconst", argLength: 2, reg: gp1flags1flags, typ: "(UInt64,Flags)", asm: "ADCQ", aux: "Int32", resultInArg0: true}, // r = arg0+auxint+carry(arg1)
+
+ // The following 4 add opcodes return the low 64 bits of the difference in the first result and
+ // the borrow (if the result is negative) in the carry flag.
+ {name: "SUBQborrow", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "SUBQ", resultInArg0: true}, // r = arg0-arg1
+ {name: "SBBQ", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "SBBQ", resultInArg0: true}, // r = arg0-(arg1+carry(arg2))
+ {name: "SUBQconstborrow", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "SUBQ", aux: "Int32", resultInArg0: true}, // r = arg0-auxint
+ {name: "SBBQconst", argLength: 2, reg: gp1flags1flags, typ: "(UInt64,Flags)", asm: "SBBQ", aux: "Int32", resultInArg0: true}, // r = arg0-(auxint+carry(arg1))
+
+ {name: "MULQU2", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}, commutative: true, asm: "MULQ", clobberFlags: true}, // arg0 * arg1, returns (hi, lo)
+ {name: "DIVQU2", argLength: 3, reg: regInfo{inputs: []regMask{dx, ax, gpsp}, outputs: []regMask{ax, dx}}, asm: "DIVQ", clobberFlags: true}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r)
+
+ {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ANDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ {name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ {name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORQconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "XORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ {name: "CMPQ", argLength: 2, reg: gp2flags, asm: "CMPQ", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPQconst", argLength: 1, reg: gp1flags, asm: "CMPQ", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"}, // arg0 compare to auxint
+ {name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, // arg0 compare to auxint
+
+ // compare *(arg0+auxint+aux) to arg1 (in that order). arg2=mem.
+ {name: "CMPQload", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPLload", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWload", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBload", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+
+ // compare *(arg0+ValAndOff(AuxInt).Off()+aux) to ValAndOff(AuxInt).Val() (in that order). arg1=mem.
+ {name: "CMPQconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPQ", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPLconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPL", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPW", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPB", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+
+ // compare *(arg0+N*arg1+auxint+aux) to arg2 (in that order). arg3=mem.
+ {name: "CMPQloadidx8", argLength: 4, reg: gp2flagsLoad, asm: "CMPQ", scale: 8, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPQloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPQ", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPLloadidx4", argLength: 4, reg: gp2flagsLoad, asm: "CMPL", scale: 4, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPLloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPL", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPWloadidx2", argLength: 4, reg: gp2flagsLoad, asm: "CMPW", scale: 2, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPWloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPW", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPBloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPB", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+
+ // compare *(arg0+N*arg1+ValAndOff(AuxInt).Off()+aux) to ValAndOff(AuxInt).Val() (in that order). arg2=mem.
+ {name: "CMPQconstloadidx8", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", scale: 8, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPQconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPLconstloadidx4", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", scale: 4, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPLconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPWconstloadidx2", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", scale: 2, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPWconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPBconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+
+ {name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags"}, // arg0 compare to arg1, f32
+ {name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags"}, // arg0 compare to arg1, f64
+
+ {name: "BTL", argLength: 2, reg: gp2flags, asm: "BTL", typ: "Flags"}, // test whether bit arg0%32 in arg1 is set
+ {name: "BTQ", argLength: 2, reg: gp2flags, asm: "BTQ", typ: "Flags"}, // test whether bit arg0%64 in arg1 is set
+ {name: "BTCL", argLength: 2, reg: gp21, asm: "BTCL", resultInArg0: true, clobberFlags: true}, // complement bit arg1%32 in arg0
+ {name: "BTCQ", argLength: 2, reg: gp21, asm: "BTCQ", resultInArg0: true, clobberFlags: true}, // complement bit arg1%64 in arg0
+ {name: "BTRL", argLength: 2, reg: gp21, asm: "BTRL", resultInArg0: true, clobberFlags: true}, // reset bit arg1%32 in arg0
+ {name: "BTRQ", argLength: 2, reg: gp21, asm: "BTRQ", resultInArg0: true, clobberFlags: true}, // reset bit arg1%64 in arg0
+ {name: "BTSL", argLength: 2, reg: gp21, asm: "BTSL", resultInArg0: true, clobberFlags: true}, // set bit arg1%32 in arg0
+ {name: "BTSQ", argLength: 2, reg: gp21, asm: "BTSQ", resultInArg0: true, clobberFlags: true}, // set bit arg1%64 in arg0
+ {name: "BTLconst", argLength: 1, reg: gp1flags, asm: "BTL", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 32
+ {name: "BTQconst", argLength: 1, reg: gp1flags, asm: "BTQ", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 64
+ {name: "BTCLconst", argLength: 1, reg: gp11, asm: "BTCL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 0 <= auxint < 32
+ {name: "BTCQconst", argLength: 1, reg: gp11, asm: "BTCQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 0 <= auxint < 64
+ {name: "BTRLconst", argLength: 1, reg: gp11, asm: "BTRL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // reset bit auxint in arg0, 0 <= auxint < 32
+ {name: "BTRQconst", argLength: 1, reg: gp11, asm: "BTRQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // reset bit auxint in arg0, 0 <= auxint < 64
+ {name: "BTSLconst", argLength: 1, reg: gp11, asm: "BTSL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 32
+ {name: "BTSQconst", argLength: 1, reg: gp11, asm: "BTSQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 64
+
+ // direct bit operation on memory operand
+ //
+ // Note that these operations do not mask the bit offset (arg1), and will write beyond their expected
+ // bounds if that argument is larger than 64/32 (for BT*Q and BT*L, respectively). If the compiler
+ // cannot prove that arg1 is in range, it must be explicitly masked (see e.g. the patterns that produce
+ // BT*modify from (MOVstore (BT* (MOVLload ptr mem) x) mem)).
+ {name: "BTCQmodify", argLength: 3, reg: gpstore, asm: "BTCQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit arg1 in 64-bit arg0+auxint+aux, arg2=mem
+ {name: "BTCLmodify", argLength: 3, reg: gpstore, asm: "BTCL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit arg1 in 32-bit arg0+auxint+aux, arg2=mem
+ {name: "BTSQmodify", argLength: 3, reg: gpstore, asm: "BTSQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit arg1 in 64-bit arg0+auxint+aux, arg2=mem
+ {name: "BTSLmodify", argLength: 3, reg: gpstore, asm: "BTSL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit arg1 in 32-bit arg0+auxint+aux, arg2=mem
+ {name: "BTRQmodify", argLength: 3, reg: gpstore, asm: "BTRQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit arg1 in 64-bit arg0+auxint+aux, arg2=mem
+ {name: "BTRLmodify", argLength: 3, reg: gpstore, asm: "BTRL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit arg1 in 32-bit arg0+auxint+aux, arg2=mem
+ {name: "BTCQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTCQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit ValAndOff(AuxInt).Val() in 64-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "BTCLconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTCL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit ValAndOff(AuxInt).Val() in 32-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "BTSQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTSQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit ValAndOff(AuxInt).Val() in 64-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "BTSLconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTSL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit ValAndOff(AuxInt).Val() in 32-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "BTRQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTRQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit ValAndOff(AuxInt).Val() in 64-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "BTRLconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTRL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit ValAndOff(AuxInt).Val() in 32-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ {name: "TESTQ", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTQ", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
+ {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
+ {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0
+ {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0
+
+ {name: "SHLQ", argLength: 2, reg: gp21shift, asm: "SHLQ", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 64
+ {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 32
+ {name: "SHLQconst", argLength: 1, reg: gp11, asm: "SHLQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-63
+ {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-31
+ // Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount!
+
+ {name: "SHRQ", argLength: 2, reg: gp21shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 64
+ {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, // unsigned uint32(arg0) >> arg1, shift amount is mod 32
+ {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, // unsigned uint16(arg0) >> arg1, shift amount is mod 32
+ {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, // unsigned uint8(arg0) >> arg1, shift amount is mod 32
+ {name: "SHRQconst", argLength: 1, reg: gp11, asm: "SHRQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-63
+ {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned uint32(arg0) >> auxint, shift amount 0-31
+ {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned uint16(arg0) >> auxint, shift amount 0-15
+ {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned uint8(arg0) >> auxint, shift amount 0-7
+
+ {name: "SARQ", argLength: 2, reg: gp21shift, asm: "SARQ", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
+ {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, // signed int32(arg0) >> arg1, shift amount is mod 32
+ {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, // signed int16(arg0) >> arg1, shift amount is mod 32
+ {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, // signed int8(arg0) >> arg1, shift amount is mod 32
+ {name: "SARQconst", argLength: 1, reg: gp11, asm: "SARQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
+ {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed int32(arg0) >> auxint, shift amount 0-31
+ {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed int16(arg0) >> auxint, shift amount 0-15
+ {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed int8(arg0) >> auxint, shift amount 0-7
+
+ {name: "ROLQ", argLength: 2, reg: gp21shift, asm: "ROLQ", resultInArg0: true, clobberFlags: true}, // arg0 rotate left arg1 bits.
+ {name: "ROLL", argLength: 2, reg: gp21shift, asm: "ROLL", resultInArg0: true, clobberFlags: true}, // arg0 rotate left arg1 bits.
+ {name: "ROLW", argLength: 2, reg: gp21shift, asm: "ROLW", resultInArg0: true, clobberFlags: true}, // arg0 rotate left arg1 bits.
+ {name: "ROLB", argLength: 2, reg: gp21shift, asm: "ROLB", resultInArg0: true, clobberFlags: true}, // arg0 rotate left arg1 bits.
+ {name: "RORQ", argLength: 2, reg: gp21shift, asm: "RORQ", resultInArg0: true, clobberFlags: true}, // arg0 rotate right arg1 bits.
+ {name: "RORL", argLength: 2, reg: gp21shift, asm: "RORL", resultInArg0: true, clobberFlags: true}, // arg0 rotate right arg1 bits.
+ {name: "RORW", argLength: 2, reg: gp21shift, asm: "RORW", resultInArg0: true, clobberFlags: true}, // arg0 rotate right arg1 bits.
+ {name: "RORB", argLength: 2, reg: gp21shift, asm: "RORB", resultInArg0: true, clobberFlags: true}, // arg0 rotate right arg1 bits.
+ {name: "ROLQconst", argLength: 1, reg: gp11, asm: "ROLQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-63
+ {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31
+ {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15
+ {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7
+
+ {name: "ADDLload", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ADDQload", argLength: 3, reg: gp21load, asm: "ADDQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBQload", argLength: 3, reg: gp21load, asm: "SUBQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBLload", argLength: 3, reg: gp21load, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ANDLload", argLength: 3, reg: gp21load, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ANDQload", argLength: 3, reg: gp21load, asm: "ANDQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ORQload", argLength: 3, reg: gp21load, asm: "ORQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ORLload", argLength: 3, reg: gp21load, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "XORQload", argLength: 3, reg: gp21load, asm: "XORQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "XORLload", argLength: 3, reg: gp21load, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+
+ {name: "ADDLloadidx1", argLength: 4, reg: gp21loadidx, asm: "ADDL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "ADDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ADDL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "ADDLloadidx8", argLength: 4, reg: gp21loadidx, asm: "ADDL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "ADDQloadidx1", argLength: 4, reg: gp21loadidx, asm: "ADDQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "ADDQloadidx8", argLength: 4, reg: gp21loadidx, asm: "ADDQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "SUBLloadidx1", argLength: 4, reg: gp21loadidx, asm: "SUBL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "SUBLloadidx4", argLength: 4, reg: gp21loadidx, asm: "SUBL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "SUBLloadidx8", argLength: 4, reg: gp21loadidx, asm: "SUBL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "SUBQloadidx1", argLength: 4, reg: gp21loadidx, asm: "SUBQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "SUBQloadidx8", argLength: 4, reg: gp21loadidx, asm: "SUBQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "ANDLloadidx1", argLength: 4, reg: gp21loadidx, asm: "ANDL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "ANDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ANDL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "ANDLloadidx8", argLength: 4, reg: gp21loadidx, asm: "ANDL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "ANDQloadidx1", argLength: 4, reg: gp21loadidx, asm: "ANDQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "ANDQloadidx8", argLength: 4, reg: gp21loadidx, asm: "ANDQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "ORLloadidx1", argLength: 4, reg: gp21loadidx, asm: "ORL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "ORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ORL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "ORLloadidx8", argLength: 4, reg: gp21loadidx, asm: "ORL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "ORQloadidx1", argLength: 4, reg: gp21loadidx, asm: "ORQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "ORQloadidx8", argLength: 4, reg: gp21loadidx, asm: "ORQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "XORLloadidx1", argLength: 4, reg: gp21loadidx, asm: "XORL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "XORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "XORL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "XORLloadidx8", argLength: 4, reg: gp21loadidx, asm: "XORL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "XORQloadidx1", argLength: 4, reg: gp21loadidx, asm: "XORQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "XORQloadidx8", argLength: 4, reg: gp21loadidx, asm: "XORQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+
+ // direct binary-op on memory (read-modify-write)
+ {name: "ADDQmodify", argLength: 3, reg: gpstore, asm: "ADDQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) += arg1, arg2=mem
+ {name: "SUBQmodify", argLength: 3, reg: gpstore, asm: "SUBQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) -= arg1, arg2=mem
+ {name: "ANDQmodify", argLength: 3, reg: gpstore, asm: "ANDQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) &= arg1, arg2=mem
+ {name: "ORQmodify", argLength: 3, reg: gpstore, asm: "ORQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) |= arg1, arg2=mem
+ {name: "XORQmodify", argLength: 3, reg: gpstore, asm: "XORQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) ^= arg1, arg2=mem
+ {name: "ADDLmodify", argLength: 3, reg: gpstore, asm: "ADDL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) += arg1, arg2=mem
+ {name: "SUBLmodify", argLength: 3, reg: gpstore, asm: "SUBL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) -= arg1, arg2=mem
+ {name: "ANDLmodify", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) &= arg1, arg2=mem
+ {name: "ORLmodify", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) |= arg1, arg2=mem
+ {name: "XORLmodify", argLength: 3, reg: gpstore, asm: "XORL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) ^= arg1, arg2=mem
+
+ {name: "ADDQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ADDQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) += arg2, arg3=mem
+ {name: "ADDQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ADDQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) += arg2, arg3=mem
+ {name: "SUBQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "SUBQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) -= arg2, arg3=mem
+ {name: "SUBQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "SUBQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) -= arg2, arg3=mem
+ {name: "ANDQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ANDQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) &= arg2, arg3=mem
+ {name: "ANDQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ANDQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) &= arg2, arg3=mem
+ {name: "ORQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ORQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) |= arg2, arg3=mem
+ {name: "ORQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ORQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) |= arg2, arg3=mem
+ {name: "XORQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "XORQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) ^= arg2, arg3=mem
+ {name: "XORQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "XORQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) ^= arg2, arg3=mem
+ {name: "ADDLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ADDL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) += arg2, arg3=mem
+ {name: "ADDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ADDL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+auxint+aux) += arg2, arg3=mem
+ {name: "ADDLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ADDL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) += arg2, arg3=mem
+ {name: "SUBLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "SUBL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) -= arg2, arg3=mem
+ {name: "SUBLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "SUBL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+auxint+aux) -= arg2, arg3=mem
+ {name: "SUBLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "SUBL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) -= arg2, arg3=mem
+ {name: "ANDLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ANDL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) &= arg2, arg3=mem
+ {name: "ANDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ANDL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+auxint+aux) &= arg2, arg3=mem
+ {name: "ANDLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ANDL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) &= arg2, arg3=mem
+ {name: "ORLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ORL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) |= arg2, arg3=mem
+ {name: "ORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ORL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+auxint+aux) |= arg2, arg3=mem
+ {name: "ORLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ORL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) |= arg2, arg3=mem
+ {name: "XORLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "XORL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) ^= arg2, arg3=mem
+ {name: "XORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "XORL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+auxint+aux) ^= arg2, arg3=mem
+ {name: "XORLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "XORL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) ^= arg2, arg3=mem
+
+ {name: "ADDQconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ADDQ", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) += ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ADDQconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ADDQ", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) += ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ANDQconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ANDQ", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) &= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ANDQconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ANDQ", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) &= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ORQconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ORQ", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) |= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ORQconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ORQ", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) |= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "XORQconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "XORQ", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) ^= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "XORQconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "XORQ", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) ^= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ADDLconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) += ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ADDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", scale: 4, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+ValAndOff(AuxInt).Off()+aux) += ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ADDLconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) += ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ANDLconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) &= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ANDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", scale: 4, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+ValAndOff(AuxInt).Off()+aux) &= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ANDLconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) &= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ORLconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ORL", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) |= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ORL", scale: 4, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+ValAndOff(AuxInt).Off()+aux) |= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ORLconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ORL", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) |= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "XORLconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "XORL", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) ^= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "XORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "XORL", scale: 4, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+ValAndOff(AuxInt).Off()+aux) ^= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "XORLconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "XORL", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) ^= ValAndOff(AuxInt).Val(), arg2=mem
+
+ // unary ops
+ {name: "NEGQ", argLength: 1, reg: gp11, asm: "NEGQ", resultInArg0: true, clobberFlags: true}, // -arg0
+ {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true, clobberFlags: true}, // -arg0
+
+ {name: "NOTQ", argLength: 1, reg: gp11, asm: "NOTQ", resultInArg0: true, clobberFlags: true}, // ^arg0
+ {name: "NOTL", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true, clobberFlags: true}, // ^arg0
+
+ // BS{F,R}Q returns a tuple [result, flags]
+ // result is undefined if the input is zero.
+ // flags are set to "equal" if the input is zero, "not equal" otherwise.
+ // BS{F,R}L returns only the result.
+ {name: "BSFQ", argLength: 1, reg: gp11flags, asm: "BSFQ", typ: "(UInt64,Flags)"}, // # of low-order zeroes in 64-bit arg
+ {name: "BSFL", argLength: 1, reg: gp11, asm: "BSFL", typ: "UInt32", clobberFlags: true}, // # of low-order zeroes in 32-bit arg
+ {name: "BSRQ", argLength: 1, reg: gp11flags, asm: "BSRQ", typ: "(UInt64,Flags)"}, // # of high-order zeroes in 64-bit arg
+ {name: "BSRL", argLength: 1, reg: gp11, asm: "BSRL", typ: "UInt32", clobberFlags: true}, // # of high-order zeroes in 32-bit arg
+
+ // CMOV instructions: 64, 32 and 16-bit sizes.
+ // if arg2 encodes a true result, return arg1, else arg0
+ {name: "CMOVQEQ", argLength: 3, reg: gp21, asm: "CMOVQEQ", resultInArg0: true},
+ {name: "CMOVQNE", argLength: 3, reg: gp21, asm: "CMOVQNE", resultInArg0: true},
+ {name: "CMOVQLT", argLength: 3, reg: gp21, asm: "CMOVQLT", resultInArg0: true},
+ {name: "CMOVQGT", argLength: 3, reg: gp21, asm: "CMOVQGT", resultInArg0: true},
+ {name: "CMOVQLE", argLength: 3, reg: gp21, asm: "CMOVQLE", resultInArg0: true},
+ {name: "CMOVQGE", argLength: 3, reg: gp21, asm: "CMOVQGE", resultInArg0: true},
+ {name: "CMOVQLS", argLength: 3, reg: gp21, asm: "CMOVQLS", resultInArg0: true},
+ {name: "CMOVQHI", argLength: 3, reg: gp21, asm: "CMOVQHI", resultInArg0: true},
+ {name: "CMOVQCC", argLength: 3, reg: gp21, asm: "CMOVQCC", resultInArg0: true},
+ {name: "CMOVQCS", argLength: 3, reg: gp21, asm: "CMOVQCS", resultInArg0: true},
+
+ {name: "CMOVLEQ", argLength: 3, reg: gp21, asm: "CMOVLEQ", resultInArg0: true},
+ {name: "CMOVLNE", argLength: 3, reg: gp21, asm: "CMOVLNE", resultInArg0: true},
+ {name: "CMOVLLT", argLength: 3, reg: gp21, asm: "CMOVLLT", resultInArg0: true},
+ {name: "CMOVLGT", argLength: 3, reg: gp21, asm: "CMOVLGT", resultInArg0: true},
+ {name: "CMOVLLE", argLength: 3, reg: gp21, asm: "CMOVLLE", resultInArg0: true},
+ {name: "CMOVLGE", argLength: 3, reg: gp21, asm: "CMOVLGE", resultInArg0: true},
+ {name: "CMOVLLS", argLength: 3, reg: gp21, asm: "CMOVLLS", resultInArg0: true},
+ {name: "CMOVLHI", argLength: 3, reg: gp21, asm: "CMOVLHI", resultInArg0: true},
+ {name: "CMOVLCC", argLength: 3, reg: gp21, asm: "CMOVLCC", resultInArg0: true},
+ {name: "CMOVLCS", argLength: 3, reg: gp21, asm: "CMOVLCS", resultInArg0: true},
+
+ {name: "CMOVWEQ", argLength: 3, reg: gp21, asm: "CMOVWEQ", resultInArg0: true},
+ {name: "CMOVWNE", argLength: 3, reg: gp21, asm: "CMOVWNE", resultInArg0: true},
+ {name: "CMOVWLT", argLength: 3, reg: gp21, asm: "CMOVWLT", resultInArg0: true},
+ {name: "CMOVWGT", argLength: 3, reg: gp21, asm: "CMOVWGT", resultInArg0: true},
+ {name: "CMOVWLE", argLength: 3, reg: gp21, asm: "CMOVWLE", resultInArg0: true},
+ {name: "CMOVWGE", argLength: 3, reg: gp21, asm: "CMOVWGE", resultInArg0: true},
+ {name: "CMOVWLS", argLength: 3, reg: gp21, asm: "CMOVWLS", resultInArg0: true},
+ {name: "CMOVWHI", argLength: 3, reg: gp21, asm: "CMOVWHI", resultInArg0: true},
+ {name: "CMOVWCC", argLength: 3, reg: gp21, asm: "CMOVWCC", resultInArg0: true},
+ {name: "CMOVWCS", argLength: 3, reg: gp21, asm: "CMOVWCS", resultInArg0: true},
+
+ // CMOV with floating point instructions. We need separate pseudo-op to handle
+ // InvertFlags correctly, and to generate special code that handles NaN (unordered flag).
+ // NOTE: the fact that CMOV*EQF here is marked to generate CMOV*NE is not a bug. See
+ // code generation in amd64/ssa.go.
+ {name: "CMOVQEQF", argLength: 3, reg: gp21pax, asm: "CMOVQNE", resultInArg0: true},
+ {name: "CMOVQNEF", argLength: 3, reg: gp21, asm: "CMOVQNE", resultInArg0: true},
+ {name: "CMOVQGTF", argLength: 3, reg: gp21, asm: "CMOVQHI", resultInArg0: true},
+ {name: "CMOVQGEF", argLength: 3, reg: gp21, asm: "CMOVQCC", resultInArg0: true},
+ {name: "CMOVLEQF", argLength: 3, reg: gp21pax, asm: "CMOVLNE", resultInArg0: true},
+ {name: "CMOVLNEF", argLength: 3, reg: gp21, asm: "CMOVLNE", resultInArg0: true},
+ {name: "CMOVLGTF", argLength: 3, reg: gp21, asm: "CMOVLHI", resultInArg0: true},
+ {name: "CMOVLGEF", argLength: 3, reg: gp21, asm: "CMOVLCC", resultInArg0: true},
+ {name: "CMOVWEQF", argLength: 3, reg: gp21pax, asm: "CMOVWNE", resultInArg0: true},
+ {name: "CMOVWNEF", argLength: 3, reg: gp21, asm: "CMOVWNE", resultInArg0: true},
+ {name: "CMOVWGTF", argLength: 3, reg: gp21, asm: "CMOVWHI", resultInArg0: true},
+ {name: "CMOVWGEF", argLength: 3, reg: gp21, asm: "CMOVWCC", resultInArg0: true},
+
+ {name: "BSWAPQ", argLength: 1, reg: gp11, asm: "BSWAPQ", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes
+ {name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes
+
+ // POPCNT instructions aren't guaranteed to be on the target platform (they are SSE4).
+ // Any use must be preceded by a successful check of runtime.x86HasPOPCNT.
+ {name: "POPCNTQ", argLength: 1, reg: gp11, asm: "POPCNTQ", clobberFlags: true}, // count number of set bits in arg0
+ {name: "POPCNTL", argLength: 1, reg: gp11, asm: "POPCNTL", clobberFlags: true}, // count number of set bits in arg0
+
+ {name: "SQRTSD", argLength: 1, reg: fp11, asm: "SQRTSD"}, // sqrt(arg0)
+
+ // ROUNDSD instruction isn't guaranteed to be on the target platform (it is SSE4.1)
+ // Any use must be preceded by a successful check of runtime.x86HasSSE41.
+ {name: "ROUNDSD", argLength: 1, reg: fp11, aux: "Int8", asm: "ROUNDSD"}, // rounds arg0 depending on auxint, 1 means math.Floor, 2 Ceil, 3 Trunc
+
+ // VFMADD231SD only exists on platforms with the FMA3 instruction set.
+ // Any use must be preceded by a successful check of runtime.support_fma.
+ {name: "VFMADD231SD", argLength: 3, reg: fp31, resultInArg0: true, asm: "VFMADD231SD"},
+
+ {name: "SBBQcarrymask", argLength: 1, reg: flagsgp, asm: "SBBQ"}, // (int64)(-1) if carry is set, 0 if carry is clear.
+ {name: "SBBLcarrymask", argLength: 1, reg: flagsgp, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear.
+ // Note: SBBW and SBBB are subsumed by SBBL
+
+ {name: "SETEQ", argLength: 1, reg: readflags, asm: "SETEQ"}, // extract == condition from arg0
+ {name: "SETNE", argLength: 1, reg: readflags, asm: "SETNE"}, // extract != condition from arg0
+ {name: "SETL", argLength: 1, reg: readflags, asm: "SETLT"}, // extract signed < condition from arg0
+ {name: "SETLE", argLength: 1, reg: readflags, asm: "SETLE"}, // extract signed <= condition from arg0
+ {name: "SETG", argLength: 1, reg: readflags, asm: "SETGT"}, // extract signed > condition from arg0
+ {name: "SETGE", argLength: 1, reg: readflags, asm: "SETGE"}, // extract signed >= condition from arg0
+ {name: "SETB", argLength: 1, reg: readflags, asm: "SETCS"}, // extract unsigned < condition from arg0
+ {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0
+ {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0
+ {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
+ {name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"}, // extract if overflow flag is set from arg0
+ // Variants that store result to memory
+ {name: "SETEQstore", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETNEstore", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETLstore", argLength: 3, reg: gpstoreconst, asm: "SETLT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed < condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETLEstore", argLength: 3, reg: gpstoreconst, asm: "SETLE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed <= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETGstore", argLength: 3, reg: gpstoreconst, asm: "SETGT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed > condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETGEstore", argLength: 3, reg: gpstoreconst, asm: "SETGE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed >= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETBstore", argLength: 3, reg: gpstoreconst, asm: "SETCS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned < condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETBEstore", argLength: 3, reg: gpstoreconst, asm: "SETLS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned <= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETAstore", argLength: 3, reg: gpstoreconst, asm: "SETHI", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned > condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETAEstore", argLength: 3, reg: gpstoreconst, asm: "SETCC", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned >= condition from arg1 to arg0+auxint+aux, arg2=mem
+ // Need different opcodes for floating point conditions because
+ // any comparison involving a NaN is always FALSE and thus
+ // the patterns for inverting conditions cannot be used.
+ {name: "SETEQF", argLength: 1, reg: flagsgpax, asm: "SETEQ", clobberFlags: true}, // extract == condition from arg0
+ {name: "SETNEF", argLength: 1, reg: flagsgpax, asm: "SETNE", clobberFlags: true}, // extract != condition from arg0
+ {name: "SETORD", argLength: 1, reg: flagsgp, asm: "SETPC"}, // extract "ordered" (No Nan present) condition from arg0
+ {name: "SETNAN", argLength: 1, reg: flagsgp, asm: "SETPS"}, // extract "unordered" (Nan present) condition from arg0
+
+ {name: "SETGF", argLength: 1, reg: flagsgp, asm: "SETHI"}, // extract floating > condition from arg0
+ {name: "SETGEF", argLength: 1, reg: flagsgp, asm: "SETCC"}, // extract floating >= condition from arg0
+
+ {name: "MOVBQSX", argLength: 1, reg: gp11, asm: "MOVBQSX"}, // sign extend arg0 from int8 to int64
+ {name: "MOVBQZX", argLength: 1, reg: gp11, asm: "MOVBLZX"}, // zero extend arg0 from int8 to int64
+ {name: "MOVWQSX", argLength: 1, reg: gp11, asm: "MOVWQSX"}, // sign extend arg0 from int16 to int64
+ {name: "MOVWQZX", argLength: 1, reg: gp11, asm: "MOVWLZX"}, // zero extend arg0 from int16 to int64
+ {name: "MOVLQSX", argLength: 1, reg: gp11, asm: "MOVLQSX"}, // sign extend arg0 from int32 to int64
+ {name: "MOVLQZX", argLength: 1, reg: gp11, asm: "MOVL"}, // zero extend arg0 from int32 to int64
+
+ {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint
+ {name: "MOVQconst", reg: gp01, asm: "MOVQ", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
+
+ {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL"}, // convert float64 to int32
+ {name: "CVTTSD2SQ", argLength: 1, reg: fpgp, asm: "CVTTSD2SQ"}, // convert float64 to int64
+ {name: "CVTTSS2SL", argLength: 1, reg: fpgp, asm: "CVTTSS2SL"}, // convert float32 to int32
+ {name: "CVTTSS2SQ", argLength: 1, reg: fpgp, asm: "CVTTSS2SQ"}, // convert float32 to int64
+ {name: "CVTSL2SS", argLength: 1, reg: gpfp, asm: "CVTSL2SS"}, // convert int32 to float32
+ {name: "CVTSL2SD", argLength: 1, reg: gpfp, asm: "CVTSL2SD"}, // convert int32 to float64
+ {name: "CVTSQ2SS", argLength: 1, reg: gpfp, asm: "CVTSQ2SS"}, // convert int64 to float32
+ {name: "CVTSQ2SD", argLength: 1, reg: gpfp, asm: "CVTSQ2SD"}, // convert int64 to float64
+ {name: "CVTSD2SS", argLength: 1, reg: fp11, asm: "CVTSD2SS"}, // convert float64 to float32
+ {name: "CVTSS2SD", argLength: 1, reg: fp11, asm: "CVTSS2SD"}, // convert float32 to float64
+
+ // Move values between int and float registers, with no conversion.
+ // TODO: should we have generic versions of these?
+ {name: "MOVQi2f", argLength: 1, reg: gpfp, typ: "Float64"}, // move 64 bits from int to float reg
+ {name: "MOVQf2i", argLength: 1, reg: fpgp, typ: "UInt64"}, // move 64 bits from float to int reg
+ {name: "MOVLi2f", argLength: 1, reg: gpfp, typ: "Float32"}, // move 32 bits from int to float reg
+ {name: "MOVLf2i", argLength: 1, reg: fpgp, typ: "UInt32"}, // move 32 bits from float to int reg, zero extend
+
+ {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation.
+
+ {name: "LEAQ", argLength: 1, reg: gp11sb, asm: "LEAQ", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAL", argLength: 1, reg: gp11sb, asm: "LEAL", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAW", argLength: 1, reg: gp11sb, asm: "LEAW", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAQ1", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 1, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAL1", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 1, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAW1", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 1, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAQ2", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 2, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAL2", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 2, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAW2", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 2, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAQ4", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 4, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAL4", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 4, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAW4", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 4, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAQ8", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 8, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
+ {name: "LEAL8", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 8, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
+ {name: "LEAW8", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 8, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
+ // Note: LEAx{1,2,4,8} must not have OpSB as either argument.
+
+ // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBQSXload", argLength: 2, reg: gpload, asm: "MOVBQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVWQSXload", argLength: 2, reg: gpload, asm: "MOVWQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVLQSXload", argLength: 2, reg: gpload, asm: "MOVLQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVQload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes from arg0+auxint+aux. arg1=mem
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVQstore", argLength: 3, reg: gpstore, asm: "MOVQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVOload", argLength: 2, reg: fpload, asm: "MOVUPS", aux: "SymOff", typ: "Int128", faultOnNilArg0: true, symEffect: "Read"}, // load 16 bytes from arg0+auxint+aux. arg1=mem
+ {name: "MOVOstore", argLength: 3, reg: fpstore, asm: "MOVUPS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem
+
+ // indexed loads/stores
+ {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", scale: 1, aux: "SymOff", typ: "UInt8", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", scale: 1, aux: "SymOff", typ: "UInt16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", scale: 2, aux: "SymOff", typ: "UInt16", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVL", scale: 1, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", scale: 4, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx8", argLength: 3, reg: gploadidx, asm: "MOVL", scale: 8, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+8*arg1+auxint+aux. arg2=mem
+ {name: "MOVQloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVQ", scale: 1, aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVQloadidx8", argLength: 3, reg: gploadidx, asm: "MOVQ", scale: 8, aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem
+ // TODO: sign-extending indexed loads
+ {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", scale: 1, aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", scale: 1, aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", scale: 2, aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVL", scale: 1, aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", scale: 4, aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVL", scale: 8, aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem
+ {name: "MOVQstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVQ", scale: 1, aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVQstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVQ", scale: 8, aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem
+ // TODO: add size-mismatched indexed loads, like MOVBstoreidx4.
+
+ // For storeconst ops, the AuxInt field encodes both
+ // the value to store and an address offset of the store.
+ // Cast AuxInt to a ValAndOff to extract Val and Off fields.
+ {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem
+ {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 2 bytes of ...
+ {name: "MOVLstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 4 bytes of ...
+ {name: "MOVQstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVQ", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of ...
+
+ {name: "MOVBstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVB", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+1*arg1+ValAndOff(AuxInt).Off()+aux. arg2=mem
+ {name: "MOVWstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVW", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 2 bytes of ... arg1 ...
+ {name: "MOVWstoreconstidx2", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", scale: 2, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 2 bytes of ... 2*arg1 ...
+ {name: "MOVLstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVL", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 4 bytes of ... arg1 ...
+ {name: "MOVLstoreconstidx4", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", scale: 4, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 4 bytes of ... 4*arg1 ...
+ {name: "MOVQstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVQ", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store 8 bytes of ... arg1 ...
+ {name: "MOVQstoreconstidx8", argLength: 3, reg: gpstoreconstidx, asm: "MOVQ", scale: 8, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store 8 bytes of ... 8*arg1 ...
+
+ // arg0 = pointer to start of memory to zero
+ // arg1 = value to store (will always be zero)
+ // arg2 = mem
+ // auxint = # of bytes to zero
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("X0")},
+ clobbers: buildReg("DI"),
+ },
+ faultOnNilArg0: true,
+ unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
+ },
+ {name: "MOVOconst", reg: regInfo{nil, 0, []regMask{fp}}, typ: "Int128", aux: "Int128", rematerializeable: true},
+
+ // arg0 = address of memory to zero
+ // arg1 = # of 8-byte words to zero
+ // arg2 = value to store (will always be zero)
+ // arg3 = mem
+ // returns mem
+ {
+ name: "REPSTOSQ",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("CX"), buildReg("AX")},
+ clobbers: buildReg("DI CX"),
+ },
+ faultOnNilArg0: true,
+ },
+
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = mem
+ // auxint = # of bytes to copy, must be multiple of 16
+ // returns memory
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI")},
+ clobbers: buildReg("DI SI X0"), // uses X0 as a temporary
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
+ },
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = # of 8-byte words to copy
+ // arg3 = mem
+ // returns memory
+ {
+ name: "REPMOVSQ",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI"), buildReg("CX")},
+ clobbers: buildReg("DI SI CX"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // (InvertFlags (CMPQ a b)) == (CMPQ b a)
+ // So if we want (SETL (CMPQ a b)) but we can't do that because a is a constant,
+ // then we do (SETL (InvertFlags (CMPQ b a))) instead.
+ // Rewrites will convert this to (SETG (CMPQ b a)).
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Pseudo-ops
+ {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of DX (the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}, zeroWidth: true},
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+ //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary, but may clobber others.
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("DI"), buildReg("AX CX DX BX BP SI R8 R9")}, clobbers: callerSave &^ gp}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ {name: "LoweredHasCPUFeature", argLength: 0, reg: gp01, rematerializeable: true, typ: "UInt64", aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{dx, bx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{cx, dx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{ax, cx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+
+ // Constant flag values. For any comparison, there are 5 possible
+ // outcomes: the three from the signed total order (<,==,>) and the
+ // three from the unsigned total order. The == cases overlap.
+ // Note: there's a sixth "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT_ULT"}, // signed < and unsigned <
+ {name: "FlagLT_UGT"}, // signed < and unsigned >
+ {name: "FlagGT_UGT"}, // signed > and unsigned >
+ {name: "FlagGT_ULT"}, // signed > and unsigned <
+
+ // Atomic loads. These are just normal loads but return <value,memory> tuples
+ // so they can be properly ordered with other loads.
+ // load from arg0+auxint+aux. arg1=mem.
+ {name: "MOVBatomicload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVLatomicload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVQatomicload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+
+ // Atomic stores and exchanges. Stores use XCHG to get the right memory ordering semantics.
+ // store arg0 to arg1+auxint+aux, arg2=mem.
+ // These ops return a tuple of <old contents of *(arg1+auxint+aux), memory>.
+ // Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)!
+ {name: "XCHGB", argLength: 3, reg: gpstorexchg, asm: "XCHGB", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "XCHGL", argLength: 3, reg: gpstorexchg, asm: "XCHGL", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "XCHGQ", argLength: 3, reg: gpstorexchg, asm: "XCHGQ", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+
+ // Atomic adds.
+ // *(arg1+auxint+aux) += arg0. arg2=mem.
+ // Returns a tuple of <old contents of *(arg1+auxint+aux), memory>.
+ // Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)!
+ {name: "XADDLlock", argLength: 3, reg: gpstorexchg, asm: "XADDL", typ: "(UInt32,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "XADDQlock", argLength: 3, reg: gpstorexchg, asm: "XADDQ", typ: "(UInt64,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "AddTupleFirst32", argLength: 2}, // arg1=tuple <x,y>. Returns <x+arg0,y>.
+ {name: "AddTupleFirst64", argLength: 2}, // arg1=tuple <x,y>. Returns <x+arg0,y>.
+
+ // Compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *(arg0+auxint+aux) == arg1 {
+ // *(arg0+auxint+aux) = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // Note that these instructions also return the old value in AX, but we ignore it.
+ // TODO: have these return flags instead of bool. The current system generates:
+ // CMPXCHGQ ...
+ // SETEQ AX
+ // CMPB AX, $0
+ // JNE ...
+ // instead of just
+ // CMPXCHGQ ...
+ // JEQ ...
+ // but we can't do that because memory-using ops can't generate flags yet
+ // (flagalloc wants to move flag-generating instructions around).
+ {name: "CMPXCHGLlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "CMPXCHGQlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGQ", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+
+ // Atomic memory updates.
+ {name: "ANDBlock", argLength: 3, reg: gpstore, asm: "ANDB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1
+ {name: "ANDLlock", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1
+ {name: "ORBlock", argLength: 3, reg: gpstore, asm: "ORB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1
+ {name: "ORLlock", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1
+ }
+
+ var AMD64blocks = []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "OS", controls: 1},
+ {name: "OC", controls: 1},
+ {name: "ULT", controls: 1},
+ {name: "ULE", controls: 1},
+ {name: "UGT", controls: 1},
+ {name: "UGE", controls: 1},
+ {name: "EQF", controls: 1},
+ {name: "NEF", controls: 1},
+ {name: "ORD", controls: 1}, // FP, ordered comparison (parity zero)
+ {name: "NAN", controls: 1}, // FP, unordered comparison (parity one)
+ }
+
+ archs = append(archs, arch{
+ name: "AMD64",
+ pkg: "cmd/internal/obj/x86",
+ genfile: "../../amd64/ssa.go",
+ ops: AMD64ops,
+ blocks: AMD64blocks,
+ regnames: regNamesAMD64,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: int8(num["BP"]),
+ linkreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64splitload.rules b/src/cmd/compile/internal/ssa/gen/AMD64splitload.rules
new file mode 100644
index 0000000..a50d509
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/AMD64splitload.rules
@@ -0,0 +1,45 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains rules used by flagalloc and addressingmodes to
+// split a flag-generating merged load op into separate load and op.
+// Unlike with the other rules files, not all of these
+// rules will be applied to all values.
+// Rather, flagalloc will request for rules to be applied
+// to a particular problematic value.
+// These are often the exact inverse of rules in AMD64.rules,
+// only with the conditions removed.
+//
+// For addressingmodes, certain single instructions are slower than the two instruction
+// split generated here (which is different from the inputs to addressingmodes).
+// For example:
+// (CMPBconstload c (ADDQ x y)) -> (CMPBconstloadidx1 c x y) -> (CMPB c (MOVBloadidx1 x y))
+
+(CMP(Q|L|W|B)load {sym} [off] ptr x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)load {sym} [off] ptr mem) x)
+
+(CMP(Q|L|W|B)constload {sym} [vo] ptr mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)load {sym} [vo.Off32()] ptr mem) x)
+
+(CMPQconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPQconst (MOVQload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
+(CMPLconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
+(CMPWconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()])
+(CMPBconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()])
+
+(CMP(Q|L|W|B)loadidx1 {sym} [off] ptr idx x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)loadidx1 {sym} [off] ptr idx mem) x)
+(CMPQloadidx8 {sym} [off] ptr idx x mem) => (CMPQ (MOVQloadidx8 {sym} [off] ptr idx mem) x)
+(CMPLloadidx4 {sym} [off] ptr idx x mem) => (CMPL (MOVLloadidx4 {sym} [off] ptr idx mem) x)
+(CMPWloadidx2 {sym} [off] ptr idx x mem) => (CMPW (MOVWloadidx2 {sym} [off] ptr idx mem) x)
+
+(CMP(Q|L|W|B)constloadidx1 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)loadidx1 {sym} [vo.Off32()] ptr idx mem) x)
+(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTQ x:(MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) x)
+(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTL x:(MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) x)
+(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTW x:(MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) x)
+
+(CMPQconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
+(CMPLconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
+(CMPWconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()])
+(CMPBconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPBconst (MOVBloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val8()])
+
+(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
+(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
+(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()])
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
new file mode 100644
index 0000000..69989b0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -0,0 +1,1475 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|32|16|8) ...) => (ADD ...)
+(Add(32|64)F ...) => (ADD(F|D) ...)
+(Add32carry ...) => (ADDS ...)
+(Add32withcarry ...) => (ADC ...)
+
+(Sub(Ptr|32|16|8) ...) => (SUB ...)
+(Sub(32|64)F ...) => (SUB(F|D) ...)
+(Sub32carry ...) => (SUBS ...)
+(Sub32withcarry ...) => (SBC ...)
+
+(Mul(32|16|8) ...) => (MUL ...)
+(Mul(32|64)F ...) => (MUL(F|D) ...)
+(Hmul(32|32u) ...) => (HMU(L|LU) ...)
+(Mul32uhilo ...) => (MULLU ...)
+
+(Div32 x y) =>
+ (SUB (XOR <typ.UInt32> // negate the result if one operand is negative
+ (Select0 <typ.UInt32> (CALLudiv
+ (SUB <typ.UInt32> (XOR x <typ.UInt32> (Signmask x)) (Signmask x)) // negate x if negative
+ (SUB <typ.UInt32> (XOR y <typ.UInt32> (Signmask y)) (Signmask y)))) // negate y if negative
+ (Signmask (XOR <typ.UInt32> x y))) (Signmask (XOR <typ.UInt32> x y)))
+(Div32u x y) => (Select0 <typ.UInt32> (CALLudiv x y))
+(Div16 x y) => (Div32 (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (Div32 (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Div(32|64)F ...) => (DIV(F|D) ...)
+
+(Mod32 x y) =>
+ (SUB (XOR <typ.UInt32> // negate the result if x is negative
+ (Select1 <typ.UInt32> (CALLudiv
+ (SUB <typ.UInt32> (XOR <typ.UInt32> x (Signmask x)) (Signmask x)) // negate x if negative
+ (SUB <typ.UInt32> (XOR <typ.UInt32> y (Signmask y)) (Signmask y)))) // negate y if negative
+ (Signmask x)) (Signmask x))
+(Mod32u x y) => (Select1 <typ.UInt32> (CALLudiv x y))
+(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+// (x + y) / 2 with x>=y -> (x - y) / 2 + y
+(Avg32u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+
+(And(32|16|8) ...) => (AND ...)
+(Or(32|16|8) ...) => (OR ...)
+(Xor(32|16|8) ...) => (XOR ...)
+
+// unary ops
+(Neg(32|16|8) x) => (RSBconst [0] x)
+(Neg(32|64)F ...) => (NEG(F|D) ...)
+
+(Com(32|16|8) ...) => (MVN ...)
+
+(Sqrt ...) => (SQRTD ...)
+(Abs ...) => (ABSD ...)
+
+// TODO: optimize this for ARMv5 and ARMv6
+(Ctz32NonZero ...) => (Ctz32 ...)
+(Ctz16NonZero ...) => (Ctz32 ...)
+(Ctz8NonZero ...) => (Ctz32 ...)
+
+// count trailing zero for ARMv5 and ARMv6
+// 32 - CLZ(x&-x - 1)
+(Ctz32 <t> x) && objabi.GOARM<=6 =>
+ (RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
+(Ctz16 <t> x) && objabi.GOARM<=6 =>
+ (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x10000] x))) [1])))
+(Ctz8 <t> x) && objabi.GOARM<=6 =>
+ (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x100] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x100] x))) [1])))
+
+// count trailing zero for ARMv7
+(Ctz32 <t> x) && objabi.GOARM==7 => (CLZ <t> (RBIT <t> x))
+(Ctz16 <t> x) && objabi.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+(Ctz8 <t> x) && objabi.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+
+// bit length
+(BitLen32 <t> x) => (RSBconst [32] (CLZ <t> x))
+
+// byte swap for ARMv5
+// let (a, b, c, d) be the bytes of x from high to low
+// t1 = x right rotate 16 bits -- (c, d, a, b )
+// t2 = x ^ t1 -- (a^c, b^d, a^c, b^d)
+// t3 = t2 &^ 0xff0000 -- (a^c, 0, a^c, b^d)
+// t4 = t3 >> 8 -- (0, a^c, 0, a^c)
+// t5 = x right rotate 8 bits -- (d, a, b, c )
+// result = t4 ^ t5 -- (d, c, b, a )
+// using shifted ops this can be done in 4 instructions.
+(Bswap32 <t> x) && objabi.GOARM==5 =>
+ (XOR <t>
+ (SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8])
+ (SRRconst <t> x [8]))
+
+// byte swap for ARMv6 and above
+(Bswap32 x) && objabi.GOARM>=6 => (REV x)
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XORconst [1] (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORconst [1] x)
+
+// shifts
+// hardware instruction uses only the low byte of the shift
+// we compare to 256 to ensure Go semantics for large shifts
+(Lsh32x32 x y) => (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh32x16 x y) => (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh32x8 x y) => (SLL x (ZeroExt8to32 y))
+
+(Lsh16x32 x y) => (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh16x16 x y) => (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh16x8 x y) => (SLL x (ZeroExt8to32 y))
+
+(Lsh8x32 x y) => (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh8x16 x y) => (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh8x8 x y) => (SLL x (ZeroExt8to32 y))
+
+(Rsh32Ux32 x y) => (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+(Rsh32Ux16 x y) => (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh32Ux8 x y) => (SRL x (ZeroExt8to32 y))
+
+(Rsh16Ux32 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+(Rsh16Ux16 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh16Ux8 x y) => (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+
+(Rsh8Ux32 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+(Rsh8Ux16 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh8Ux8 x y) => (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Rsh32x32 x y) => (SRAcond x y (CMPconst [256] y))
+(Rsh32x16 x y) => (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh32x8 x y) => (SRA x (ZeroExt8to32 y))
+
+(Rsh16x32 x y) => (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+(Rsh16x16 x y) => (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh16x8 x y) => (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+
+(Rsh8x32 x y) => (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+(Rsh8x16 x y) => (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh8x8 x y) => (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+
+// constant shifts
+// generic opt rewrites all constant shifts to shift by Const64
+(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SLLconst x [int32(c)])
+(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SRAconst x [int32(c)])
+(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 => (SRLconst x [int32(c)])
+(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SLLconst x [int32(c)])
+(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 => (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SLLconst x [int32(c)])
+(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 => (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+
+// large constant shifts
+(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 => (SRAconst x [31])
+(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+
+// constants
+(Const(8|16|32) [val]) => (MOVWconst [int32(val)])
+(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
+(ConstNil) => (MOVWconst [0])
+(ConstBool [b]) => (MOVWconst [b2i32(b)])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+
+(Signmask x) => (SRAconst x [31])
+(Zeromask x) => (SRAconst (RSBshiftRL <typ.Int32> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
+(Slicemask <t> x) => (SRAconst (RSBconst <t> [0] x) [31])
+
+// float <-> int conversion
+(Cvt32to32F ...) => (MOVWF ...)
+(Cvt32to64F ...) => (MOVWD ...)
+(Cvt32Uto32F ...) => (MOVWUF ...)
+(Cvt32Uto64F ...) => (MOVWUD ...)
+(Cvt32Fto32 ...) => (MOVFW ...)
+(Cvt64Fto32 ...) => (MOVDW ...)
+(Cvt32Fto32U ...) => (MOVFWU ...)
+(Cvt64Fto32U ...) => (MOVDWU ...)
+(Cvt32Fto64F ...) => (MOVFD ...)
+(Cvt64Fto32F ...) => (MOVDF ...)
+
+(Round(32|64)F ...) => (Copy ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+// fused-multiply-add
+(FMA x y z) => (FMULAD z x y)
+
+// comparisons
+(Eq8 x y) => (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (Equal (CMP x y))
+(EqPtr x y) => (Equal (CMP x y))
+(Eq(32|64)F x y) => (Equal (CMP(F|D) x y))
+
+(Neq8 x y) => (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) => (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) => (NotEqual (CMP x y))
+(NeqPtr x y) => (NotEqual (CMP x y))
+(Neq(32|64)F x y) => (NotEqual (CMP(F|D) x y))
+
+(Less8 x y) => (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) => (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+(Less32 x y) => (LessThan (CMP x y))
+(Less(32|64)F x y) => (GreaterThan (CMP(F|D) y x)) // reverse operands to work around NaN
+
+(Less8U x y) => (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) => (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) => (LessThanU (CMP x y))
+
+(Leq8 x y) => (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (LessEqual (CMP x y))
+(Leq(32|64)F x y) => (GreaterEqual (CMP(F|D) y x)) // reverse operands to work around NaN
+
+(Leq8U x y) => (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (LessEqualU (CMP x y))
+
+(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr)
+
+(Addr {sym} base) => (MOVWaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVWaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem)
+
+// zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVWconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore ptr (MOVWconst [0]) mem)
+(Zero [2] ptr mem) =>
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore ptr (MOVWconst [0]) mem)
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] ptr (MOVWconst [0])
+ (MOVHstore [0] ptr (MOVWconst [0]) mem))
+(Zero [4] ptr mem) =>
+ (MOVBstore [3] ptr (MOVWconst [0])
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+
+// Medium zeroing uses a duff device
+// 4 and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] {t} ptr mem)
+ && s%4 == 0 && s > 4 && s <= 512
+ && t.Alignment()%4 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [4 * (128 - s/4)] ptr (MOVWconst [0]) mem)
+
+// Large zeroing uses a loop
+(Zero [s] {t} ptr mem)
+ && (s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0 =>
+ (LoweredZero [t.Alignment()]
+ ptr
+ (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))])
+ (MOVWconst [0])
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore dst (MOVHUload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] dst (MOVHUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem))
+(Move [4] dst src mem) =>
+ (MOVBstore [3] dst (MOVBUload [3] src mem)
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))))
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem)))
+
+// Medium move uses a duff device
+// 8 and 128 are magic constants, see runtime/mkduff.go
+(Move [s] {t} dst src mem)
+ && s%4 == 0 && s > 4 && s <= 512
+ && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [8 * (128 - s/4)] dst src mem)
+
+// Large move uses a loop
+(Move [s] {t} dst src mem)
+ && ((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s) =>
+ (LoweredMove [t.Alignment()]
+ dst
+ src
+ (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) => (LessThanU (CMP idx len))
+(IsSliceInBounds idx len) => (LessEqualU (CMP idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) => (EQ cc yes no)
+(If (NotEqual cc) yes no) => (NE cc yes no)
+(If (LessThan cc) yes no) => (LT cc yes no)
+(If (LessThanU cc) yes no) => (ULT cc yes no)
+(If (LessEqual cc) yes no) => (LE cc yes no)
+(If (LessEqualU cc) yes no) => (ULE cc yes no)
+(If (GreaterThan cc) yes no) => (GT cc yes no)
+(If (GreaterThanU cc) yes no) => (UGT cc yes no)
+(If (GreaterEqual cc) yes no) => (GE cc yes no)
+(If (GreaterEqualU cc) yes no) => (UGE cc yes no)
+
+(If cond yes no) => (NE (CMPconst [0] cond) yes no)
+
+// Absorb boolean tests into block
+(NE (CMPconst [0] (Equal cc)) yes no) => (EQ cc yes no)
+(NE (CMPconst [0] (NotEqual cc)) yes no) => (NE cc yes no)
+(NE (CMPconst [0] (LessThan cc)) yes no) => (LT cc yes no)
+(NE (CMPconst [0] (LessThanU cc)) yes no) => (ULT cc yes no)
+(NE (CMPconst [0] (LessEqual cc)) yes no) => (LE cc yes no)
+(NE (CMPconst [0] (LessEqualU cc)) yes no) => (ULE cc yes no)
+(NE (CMPconst [0] (GreaterThan cc)) yes no) => (GT cc yes no)
+(NE (CMPconst [0] (GreaterThanU cc)) yes no) => (UGT cc yes no)
+(NE (CMPconst [0] (GreaterEqual cc)) yes no) => (GE cc yes no)
+(NE (CMPconst [0] (GreaterEqualU cc)) yes no) => (UGE cc yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
+
+// Optimizations
+
+// fold offset into address
+(ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off1+off2] {sym} ptr)
+(SUBconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off2-off1] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVBload [off1+off2] {sym} ptr mem)
+(MOVBload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVBload [off1-off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVBUload [off1+off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVBUload [off1-off2] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVHload [off1+off2] {sym} ptr mem)
+(MOVHload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVHload [off1-off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVHUload [off1+off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVHUload [off1-off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVWload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVWload [off1-off2] {sym} ptr mem)
+(MOVFload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVFload [off1+off2] {sym} ptr mem)
+(MOVFload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVFload [off1-off2] {sym} ptr mem)
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVDload [off1+off2] {sym} ptr mem)
+(MOVDload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVDload [off1-off2] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVBstore [off1-off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVHstore [off1-off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVWstore [off1-off2] {sym} ptr val mem)
+(MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVFstore [off1+off2] {sym} ptr val mem)
+(MOVFstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVFstore [off1-off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVDstore [off1+off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVDstore [off1-off2] {sym} ptr val mem)
+
+(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x)
+(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBUreg x)
+(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHreg x)
+(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHUreg x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+
+(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+
+(MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => x
+(MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) => x
+(MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) => x
+(MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) => x
+(MOVBUloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVBUreg x)
+(MOVBloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVBreg x)
+(MOVHUloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVHUreg x)
+(MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVHreg x)
+
+// fold constant into arithmatic ops
+(ADD x (MOVWconst [c])) => (ADDconst [c] x)
+(SUB (MOVWconst [c]) x) => (RSBconst [c] x)
+(SUB x (MOVWconst [c])) => (SUBconst [c] x)
+(RSB (MOVWconst [c]) x) => (SUBconst [c] x)
+(RSB x (MOVWconst [c])) => (RSBconst [c] x)
+
+(ADDS x (MOVWconst [c])) => (ADDSconst [c] x)
+(SUBS x (MOVWconst [c])) => (SUBSconst [c] x)
+
+(ADC (MOVWconst [c]) x flags) => (ADCconst [c] x flags)
+(SBC (MOVWconst [c]) x flags) => (RSCconst [c] x flags)
+(SBC x (MOVWconst [c]) flags) => (SBCconst [c] x flags)
+
+(AND x (MOVWconst [c])) => (ANDconst [c] x)
+(OR x (MOVWconst [c])) => (ORconst [c] x)
+(XOR x (MOVWconst [c])) => (XORconst [c] x)
+(BIC x (MOVWconst [c])) => (BICconst [c] x)
+
+(SLL x (MOVWconst [c])) && 0 <= c && c < 32 => (SLLconst x [c])
+(SRL x (MOVWconst [c])) && 0 <= c && c < 32 => (SRLconst x [c])
+(SRA x (MOVWconst [c])) && 0 <= c && c < 32 => (SRAconst x [c])
+
+(CMP x (MOVWconst [c])) => (CMPconst [c] x)
+(CMP (MOVWconst [c]) x) => (InvertFlags (CMPconst [c] x))
+(CMN x (MOVWconst [c])) => (CMNconst [c] x)
+(TST x (MOVWconst [c])) => (TSTconst [c] x)
+(TEQ x (MOVWconst [c])) => (TEQconst [c] x)
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+(CMP x y) && x.ID > y.ID => (InvertFlags (CMP y x))
+
+// don't extend after proper load
+// MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type.
+(MOVBreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVWreg x)
+
+// fold extensions and ANDs together
+(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x)
+(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&0xffff] x)
+(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 => (ANDconst [c&0x7f] x)
+(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 => (ANDconst [c&0x7fff] x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVWreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVWnop doesn't emit instruction, only for ensuring the type.
+(MOVWreg x) && x.Uses == 1 => (MOVWnop x)
+
+// mul by constant
+(MUL x (MOVWconst [c])) && int32(c) == -1 => (RSBconst [0] x)
+(MUL _ (MOVWconst [0])) => (MOVWconst [0])
+(MUL x (MOVWconst [1])) => x
+(MUL x (MOVWconst [c])) && isPowerOfTwo32(c) => (SLLconst [int32(log32(c))] x)
+(MUL x (MOVWconst [c])) && isPowerOfTwo32(c-1) && c >= 3 => (ADDshiftLL x x [int32(log32(c-1))])
+(MUL x (MOVWconst [c])) && isPowerOfTwo32(c+1) && c >= 7 => (RSBshiftLL x x [int32(log32(c+1))])
+(MUL x (MOVWconst [c])) && c%3 == 0 && isPowerOfTwo32(c/3) => (SLLconst [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1]))
+(MUL x (MOVWconst [c])) && c%5 == 0 && isPowerOfTwo32(c/5) => (SLLconst [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2]))
+(MUL x (MOVWconst [c])) && c%7 == 0 && isPowerOfTwo32(c/7) => (SLLconst [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3]))
+(MUL x (MOVWconst [c])) && c%9 == 0 && isPowerOfTwo32(c/9) => (SLLconst [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3]))
+
+(MULA x (MOVWconst [c]) a) && c == -1 => (SUB a x)
+(MULA _ (MOVWconst [0]) a) => a
+(MULA x (MOVWconst [1]) a) => (ADD x a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo32(c) => (ADD (SLLconst <x.Type> [int32(log32(c))] x) a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo32(c-1) && c >= 3 => (ADD (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo32(c+1) && c >= 7 => (ADD (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+(MULA x (MOVWconst [c]) a) && c%3 == 0 && isPowerOfTwo32(c/3) => (ADD (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+(MULA x (MOVWconst [c]) a) && c%5 == 0 && isPowerOfTwo32(c/5) => (ADD (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+(MULA x (MOVWconst [c]) a) && c%7 == 0 && isPowerOfTwo32(c/7) => (ADD (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+(MULA x (MOVWconst [c]) a) && c%9 == 0 && isPowerOfTwo32(c/9) => (ADD (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+
+(MULA (MOVWconst [c]) x a) && c == -1 => (SUB a x)
+(MULA (MOVWconst [0]) _ a) => a
+(MULA (MOVWconst [1]) x a) => (ADD x a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo32(c) => (ADD (SLLconst <x.Type> [int32(log32(c))] x) a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo32(c-1) && c >= 3 => (ADD (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo32(c+1) && c >= 7 => (ADD (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+(MULA (MOVWconst [c]) x a) && c%3 == 0 && isPowerOfTwo32(c/3) => (ADD (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+(MULA (MOVWconst [c]) x a) && c%5 == 0 && isPowerOfTwo32(c/5) => (ADD (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+(MULA (MOVWconst [c]) x a) && c%7 == 0 && isPowerOfTwo32(c/7) => (ADD (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+(MULA (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo32(c/9) => (ADD (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+
+(MULS x (MOVWconst [c]) a) && c == -1 => (ADD a x)
+(MULS _ (MOVWconst [0]) a) => a
+(MULS x (MOVWconst [1]) a) => (RSB x a)
+(MULS x (MOVWconst [c]) a) && isPowerOfTwo32(c) => (RSB (SLLconst <x.Type> [int32(log32(c))] x) a)
+(MULS x (MOVWconst [c]) a) && isPowerOfTwo32(c-1) && c >= 3 => (RSB (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+(MULS x (MOVWconst [c]) a) && isPowerOfTwo32(c+1) && c >= 7 => (RSB (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+(MULS x (MOVWconst [c]) a) && c%3 == 0 && isPowerOfTwo32(c/3) => (RSB (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+(MULS x (MOVWconst [c]) a) && c%5 == 0 && isPowerOfTwo32(c/5) => (RSB (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+(MULS x (MOVWconst [c]) a) && c%7 == 0 && isPowerOfTwo32(c/7) => (RSB (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+(MULS x (MOVWconst [c]) a) && c%9 == 0 && isPowerOfTwo32(c/9) => (RSB (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+
+(MULS (MOVWconst [c]) x a) && c == -1 => (ADD a x)
+(MULS (MOVWconst [0]) _ a) => a
+(MULS (MOVWconst [1]) x a) => (RSB x a)
+(MULS (MOVWconst [c]) x a) && isPowerOfTwo32(c) => (RSB (SLLconst <x.Type> [int32(log32(c))] x) a)
+(MULS (MOVWconst [c]) x a) && isPowerOfTwo32(c-1) && c >= 3 => (RSB (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+(MULS (MOVWconst [c]) x a) && isPowerOfTwo32(c+1) && c >= 7 => (RSB (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+(MULS (MOVWconst [c]) x a) && c%3 == 0 && isPowerOfTwo32(c/3) => (RSB (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+(MULS (MOVWconst [c]) x a) && c%5 == 0 && isPowerOfTwo32(c/5) => (RSB (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+(MULS (MOVWconst [c]) x a) && c%7 == 0 && isPowerOfTwo32(c/7) => (RSB (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+(MULS (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo32(c/9) => (RSB (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+
+// div by constant
+(Select0 (CALLudiv x (MOVWconst [1]))) => x
+(Select1 (CALLudiv _ (MOVWconst [1]))) => (MOVWconst [0])
+(Select0 (CALLudiv x (MOVWconst [c]))) && isPowerOfTwo32(c) => (SRLconst [int32(log32(c))] x)
+(Select1 (CALLudiv x (MOVWconst [c]))) && isPowerOfTwo32(c) => (ANDconst [c-1] x)
+
+// constant comparisons
+(CMPconst (MOVWconst [x]) [y]) => (FlagConstant [subFlags32(x,y)])
+(CMNconst (MOVWconst [x]) [y]) => (FlagConstant [addFlags32(x,y)])
+(TSTconst (MOVWconst [x]) [y]) => (FlagConstant [logicFlags32(x&y)])
+(TEQconst (MOVWconst [x]) [y]) => (FlagConstant [logicFlags32(x^y)])
+
+// other known comparisons
+(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags32(0, 1)])
+(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags32(0, 1)])
+(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n => (FlagConstant [subFlags32(0, 1)])
+(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) => (FlagConstant [subFlags32(0, 1)])
+
+// absorb flag constants into branches
+(EQ (FlagConstant [fc]) yes no) && fc.eq() => (First yes no)
+(EQ (FlagConstant [fc]) yes no) && !fc.eq() => (First no yes)
+
+(NE (FlagConstant [fc]) yes no) && fc.ne() => (First yes no)
+(NE (FlagConstant [fc]) yes no) && !fc.ne() => (First no yes)
+
+(LT (FlagConstant [fc]) yes no) && fc.lt() => (First yes no)
+(LT (FlagConstant [fc]) yes no) && !fc.lt() => (First no yes)
+
+(LE (FlagConstant [fc]) yes no) && fc.le() => (First yes no)
+(LE (FlagConstant [fc]) yes no) && !fc.le() => (First no yes)
+
+(GT (FlagConstant [fc]) yes no) && fc.gt() => (First yes no)
+(GT (FlagConstant [fc]) yes no) && !fc.gt() => (First no yes)
+
+(GE (FlagConstant [fc]) yes no) && fc.ge() => (First yes no)
+(GE (FlagConstant [fc]) yes no) && !fc.ge() => (First no yes)
+
+(ULT (FlagConstant [fc]) yes no) && fc.ult() => (First yes no)
+(ULT (FlagConstant [fc]) yes no) && !fc.ult() => (First no yes)
+
+(ULE (FlagConstant [fc]) yes no) && fc.ule() => (First yes no)
+(ULE (FlagConstant [fc]) yes no) && !fc.ule() => (First no yes)
+
+(UGT (FlagConstant [fc]) yes no) && fc.ugt() => (First yes no)
+(UGT (FlagConstant [fc]) yes no) && !fc.ugt() => (First no yes)
+
+(UGE (FlagConstant [fc]) yes no) && fc.uge() => (First yes no)
+(UGE (FlagConstant [fc]) yes no) && !fc.uge() => (First no yes)
+
+(LTnoov (FlagConstant [fc]) yes no) && fc.ltNoov() => (First yes no)
+(LTnoov (FlagConstant [fc]) yes no) && !fc.ltNoov() => (First no yes)
+
+(LEnoov (FlagConstant [fc]) yes no) && fc.leNoov() => (First yes no)
+(LEnoov (FlagConstant [fc]) yes no) && !fc.leNoov() => (First no yes)
+
+(GTnoov (FlagConstant [fc]) yes no) && fc.gtNoov() => (First yes no)
+(GTnoov (FlagConstant [fc]) yes no) && !fc.gtNoov() => (First no yes)
+
+(GEnoov (FlagConstant [fc]) yes no) && fc.geNoov() => (First yes no)
+(GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+(LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no)
+(GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no)
+(LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no)
+(GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no)
+
+// absorb flag constants into boolean values
+(Equal (FlagConstant [fc])) => (MOVWconst [b2i32(fc.eq())])
+(NotEqual (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ne())])
+(LessThan (FlagConstant [fc])) => (MOVWconst [b2i32(fc.lt())])
+(LessThanU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ult())])
+(LessEqual (FlagConstant [fc])) => (MOVWconst [b2i32(fc.le())])
+(LessEqualU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ule())])
+(GreaterThan (FlagConstant [fc])) => (MOVWconst [b2i32(fc.gt())])
+(GreaterThanU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ugt())])
+(GreaterEqual (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ge())])
+(GreaterEqualU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.uge())])
+
+// absorb InvertFlags into boolean values
+(Equal (InvertFlags x)) => (Equal x)
+(NotEqual (InvertFlags x)) => (NotEqual x)
+(LessThan (InvertFlags x)) => (GreaterThan x)
+(LessThanU (InvertFlags x)) => (GreaterThanU x)
+(GreaterThan (InvertFlags x)) => (LessThan x)
+(GreaterThanU (InvertFlags x)) => (LessThanU x)
+(LessEqual (InvertFlags x)) => (GreaterEqual x)
+(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
+(GreaterEqual (InvertFlags x)) => (LessEqual x)
+(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
+
+// absorb flag constants into conditional instructions
+(CMOVWLSconst _ (FlagConstant [fc]) [c]) && fc.ule() => (MOVWconst [c])
+(CMOVWLSconst x (FlagConstant [fc]) [c]) && fc.ugt() => x
+
+(CMOVWHSconst _ (FlagConstant [fc]) [c]) && fc.uge() => (MOVWconst [c])
+(CMOVWHSconst x (FlagConstant [fc]) [c]) && fc.ult() => x
+
+(CMOVWLSconst x (InvertFlags flags) [c]) => (CMOVWHSconst x flags [c])
+(CMOVWHSconst x (InvertFlags flags) [c]) => (CMOVWLSconst x flags [c])
+
+(SRAcond x _ (FlagConstant [fc])) && fc.uge() => (SRAconst x [31])
+(SRAcond x y (FlagConstant [fc])) && fc.ult() => (SRA x y)
+
+// remove redundant *const ops
+(ADDconst [0] x) => x
+(SUBconst [0] x) => x
+(ANDconst [0] _) => (MOVWconst [0])
+(ANDconst [c] x) && int32(c)==-1 => x
+(ORconst [0] x) => x
+(ORconst [c] _) && int32(c)==-1 => (MOVWconst [-1])
+(XORconst [0] x) => x
+(BICconst [0] x) => x
+(BICconst [c] _) && int32(c)==-1 => (MOVWconst [0])
+
+// generic constant folding
+(ADDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (SUBconst [-c] x)
+(SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (ADDconst [-c] x)
+(ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (BICconst [int32(^uint32(c))] x)
+(BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (ANDconst [int32(^uint32(c))] x)
+(ADDconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x)
+(SUBconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x)
+(ANDconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x)
+(BICconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x)
+(ADDconst [c] (MOVWconst [d])) => (MOVWconst [c+d])
+(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
+(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
+(ADDconst [c] (RSBconst [d] x)) => (RSBconst [c+d] x)
+(ADCconst [c] (ADDconst [d] x) flags) => (ADCconst [c+d] x flags)
+(ADCconst [c] (SUBconst [d] x) flags) => (ADCconst [c-d] x flags)
+(SUBconst [c] (MOVWconst [d])) => (MOVWconst [d-c])
+(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
+(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
+(SUBconst [c] (RSBconst [d] x)) => (RSBconst [-c+d] x)
+(SBCconst [c] (ADDconst [d] x) flags) => (SBCconst [c-d] x flags)
+(SBCconst [c] (SUBconst [d] x) flags) => (SBCconst [c+d] x flags)
+(RSBconst [c] (MOVWconst [d])) => (MOVWconst [c-d])
+(RSBconst [c] (RSBconst [d] x)) => (ADDconst [c-d] x)
+(RSBconst [c] (ADDconst [d] x)) => (RSBconst [c-d] x)
+(RSBconst [c] (SUBconst [d] x)) => (RSBconst [c+d] x)
+(RSCconst [c] (ADDconst [d] x) flags) => (RSCconst [c-d] x flags)
+(RSCconst [c] (SUBconst [d] x) flags) => (RSCconst [c+d] x flags)
+(SLLconst [c] (MOVWconst [d])) => (MOVWconst [d<<uint64(c)])
+(SRLconst [c] (MOVWconst [d])) => (MOVWconst [int32(uint32(d)>>uint64(c))])
+(SRAconst [c] (MOVWconst [d])) => (MOVWconst [d>>uint64(c)])
+(MUL (MOVWconst [c]) (MOVWconst [d])) => (MOVWconst [c*d])
+(MULA (MOVWconst [c]) (MOVWconst [d]) a) => (ADDconst [c*d] a)
+(MULS (MOVWconst [c]) (MOVWconst [d]) a) => (SUBconst [c*d] a)
+(Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)/uint32(d))])
+(Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)%uint32(d))])
+(ANDconst [c] (MOVWconst [d])) => (MOVWconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (MOVWconst [d])) => (MOVWconst [c|d])
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (MOVWconst [d])) => (MOVWconst [c^d])
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(BICconst [c] (MOVWconst [d])) => (MOVWconst [d&^c])
+(BICconst [c] (BICconst [d] x)) => (BICconst [c|d] x)
+(MVN (MOVWconst [c])) => (MOVWconst [^c])
+(MOVBreg (MOVWconst [c])) => (MOVWconst [int32(int8(c))])
+(MOVBUreg (MOVWconst [c])) => (MOVWconst [int32(uint8(c))])
+(MOVHreg (MOVWconst [c])) => (MOVWconst [int32(int16(c))])
+(MOVHUreg (MOVWconst [c])) => (MOVWconst [int32(uint16(c))])
+(MOVWreg (MOVWconst [c])) => (MOVWconst [c])
+// BFX: Width = c >> 8, LSB = c & 0xff, result = d << (32 - Width - LSB) >> (32 - Width)
+(BFX [c] (MOVWconst [d])) => (MOVWconst [d<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))])
+(BFXU [c] (MOVWconst [d])) => (MOVWconst [int32(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))])
+
+// absorb shifts into ops
+(ADD x (SLLconst [c] y)) => (ADDshiftLL x y [c])
+(ADD x (SRLconst [c] y)) => (ADDshiftRL x y [c])
+(ADD x (SRAconst [c] y)) => (ADDshiftRA x y [c])
+(ADD x (SLL y z)) => (ADDshiftLLreg x y z)
+(ADD x (SRL y z)) => (ADDshiftRLreg x y z)
+(ADD x (SRA y z)) => (ADDshiftRAreg x y z)
+(ADC x (SLLconst [c] y) flags) => (ADCshiftLL x y [c] flags)
+(ADC x (SRLconst [c] y) flags) => (ADCshiftRL x y [c] flags)
+(ADC x (SRAconst [c] y) flags) => (ADCshiftRA x y [c] flags)
+(ADC x (SLL y z) flags) => (ADCshiftLLreg x y z flags)
+(ADC x (SRL y z) flags) => (ADCshiftRLreg x y z flags)
+(ADC x (SRA y z) flags) => (ADCshiftRAreg x y z flags)
+(ADDS x (SLLconst [c] y)) => (ADDSshiftLL x y [c])
+(ADDS x (SRLconst [c] y)) => (ADDSshiftRL x y [c])
+(ADDS x (SRAconst [c] y)) => (ADDSshiftRA x y [c])
+(ADDS x (SLL y z)) => (ADDSshiftLLreg x y z)
+(ADDS x (SRL y z)) => (ADDSshiftRLreg x y z)
+(ADDS x (SRA y z)) => (ADDSshiftRAreg x y z)
+(SUB x (SLLconst [c] y)) => (SUBshiftLL x y [c])
+(SUB (SLLconst [c] y) x) => (RSBshiftLL x y [c])
+(SUB x (SRLconst [c] y)) => (SUBshiftRL x y [c])
+(SUB (SRLconst [c] y) x) => (RSBshiftRL x y [c])
+(SUB x (SRAconst [c] y)) => (SUBshiftRA x y [c])
+(SUB (SRAconst [c] y) x) => (RSBshiftRA x y [c])
+(SUB x (SLL y z)) => (SUBshiftLLreg x y z)
+(SUB (SLL y z) x) => (RSBshiftLLreg x y z)
+(SUB x (SRL y z)) => (SUBshiftRLreg x y z)
+(SUB (SRL y z) x) => (RSBshiftRLreg x y z)
+(SUB x (SRA y z)) => (SUBshiftRAreg x y z)
+(SUB (SRA y z) x) => (RSBshiftRAreg x y z)
+(SBC x (SLLconst [c] y) flags) => (SBCshiftLL x y [c] flags)
+(SBC (SLLconst [c] y) x flags) => (RSCshiftLL x y [c] flags)
+(SBC x (SRLconst [c] y) flags) => (SBCshiftRL x y [c] flags)
+(SBC (SRLconst [c] y) x flags) => (RSCshiftRL x y [c] flags)
+(SBC x (SRAconst [c] y) flags) => (SBCshiftRA x y [c] flags)
+(SBC (SRAconst [c] y) x flags) => (RSCshiftRA x y [c] flags)
+(SBC x (SLL y z) flags) => (SBCshiftLLreg x y z flags)
+(SBC (SLL y z) x flags) => (RSCshiftLLreg x y z flags)
+(SBC x (SRL y z) flags) => (SBCshiftRLreg x y z flags)
+(SBC (SRL y z) x flags) => (RSCshiftRLreg x y z flags)
+(SBC x (SRA y z) flags) => (SBCshiftRAreg x y z flags)
+(SBC (SRA y z) x flags) => (RSCshiftRAreg x y z flags)
+(SUBS x (SLLconst [c] y)) => (SUBSshiftLL x y [c])
+(SUBS (SLLconst [c] y) x) => (RSBSshiftLL x y [c])
+(SUBS x (SRLconst [c] y)) => (SUBSshiftRL x y [c])
+(SUBS (SRLconst [c] y) x) => (RSBSshiftRL x y [c])
+(SUBS x (SRAconst [c] y)) => (SUBSshiftRA x y [c])
+(SUBS (SRAconst [c] y) x) => (RSBSshiftRA x y [c])
+(SUBS x (SLL y z)) => (SUBSshiftLLreg x y z)
+(SUBS (SLL y z) x) => (RSBSshiftLLreg x y z)
+(SUBS x (SRL y z)) => (SUBSshiftRLreg x y z)
+(SUBS (SRL y z) x) => (RSBSshiftRLreg x y z)
+(SUBS x (SRA y z)) => (SUBSshiftRAreg x y z)
+(SUBS (SRA y z) x) => (RSBSshiftRAreg x y z)
+(RSB x (SLLconst [c] y)) => (RSBshiftLL x y [c])
+(RSB (SLLconst [c] y) x) => (SUBshiftLL x y [c])
+(RSB x (SRLconst [c] y)) => (RSBshiftRL x y [c])
+(RSB (SRLconst [c] y) x) => (SUBshiftRL x y [c])
+(RSB x (SRAconst [c] y)) => (RSBshiftRA x y [c])
+(RSB (SRAconst [c] y) x) => (SUBshiftRA x y [c])
+(RSB x (SLL y z)) => (RSBshiftLLreg x y z)
+(RSB (SLL y z) x) => (SUBshiftLLreg x y z)
+(RSB x (SRL y z)) => (RSBshiftRLreg x y z)
+(RSB (SRL y z) x) => (SUBshiftRLreg x y z)
+(RSB x (SRA y z)) => (RSBshiftRAreg x y z)
+(RSB (SRA y z) x) => (SUBshiftRAreg x y z)
+(AND x (SLLconst [c] y)) => (ANDshiftLL x y [c])
+(AND x (SRLconst [c] y)) => (ANDshiftRL x y [c])
+(AND x (SRAconst [c] y)) => (ANDshiftRA x y [c])
+(AND x (SLL y z)) => (ANDshiftLLreg x y z)
+(AND x (SRL y z)) => (ANDshiftRLreg x y z)
+(AND x (SRA y z)) => (ANDshiftRAreg x y z)
+(OR x (SLLconst [c] y)) => (ORshiftLL x y [c])
+(OR x (SRLconst [c] y)) => (ORshiftRL x y [c])
+(OR x (SRAconst [c] y)) => (ORshiftRA x y [c])
+(OR x (SLL y z)) => (ORshiftLLreg x y z)
+(OR x (SRL y z)) => (ORshiftRLreg x y z)
+(OR x (SRA y z)) => (ORshiftRAreg x y z)
+(XOR x (SLLconst [c] y)) => (XORshiftLL x y [c])
+(XOR x (SRLconst [c] y)) => (XORshiftRL x y [c])
+(XOR x (SRAconst [c] y)) => (XORshiftRA x y [c])
+(XOR x (SRRconst [c] y)) => (XORshiftRR x y [c])
+(XOR x (SLL y z)) => (XORshiftLLreg x y z)
+(XOR x (SRL y z)) => (XORshiftRLreg x y z)
+(XOR x (SRA y z)) => (XORshiftRAreg x y z)
+(BIC x (SLLconst [c] y)) => (BICshiftLL x y [c])
+(BIC x (SRLconst [c] y)) => (BICshiftRL x y [c])
+(BIC x (SRAconst [c] y)) => (BICshiftRA x y [c])
+(BIC x (SLL y z)) => (BICshiftLLreg x y z)
+(BIC x (SRL y z)) => (BICshiftRLreg x y z)
+(BIC x (SRA y z)) => (BICshiftRAreg x y z)
+(MVN (SLLconst [c] x)) => (MVNshiftLL x [c])
+(MVN (SRLconst [c] x)) => (MVNshiftRL x [c])
+(MVN (SRAconst [c] x)) => (MVNshiftRA x [c])
+(MVN (SLL x y)) => (MVNshiftLLreg x y)
+(MVN (SRL x y)) => (MVNshiftRLreg x y)
+(MVN (SRA x y)) => (MVNshiftRAreg x y)
+
+(CMP x (SLLconst [c] y)) => (CMPshiftLL x y [c])
+(CMP (SLLconst [c] y) x) => (InvertFlags (CMPshiftLL x y [c]))
+(CMP x (SRLconst [c] y)) => (CMPshiftRL x y [c])
+(CMP (SRLconst [c] y) x) => (InvertFlags (CMPshiftRL x y [c]))
+(CMP x (SRAconst [c] y)) => (CMPshiftRA x y [c])
+(CMP (SRAconst [c] y) x) => (InvertFlags (CMPshiftRA x y [c]))
+(CMP x (SLL y z)) => (CMPshiftLLreg x y z)
+(CMP (SLL y z) x) => (InvertFlags (CMPshiftLLreg x y z))
+(CMP x (SRL y z)) => (CMPshiftRLreg x y z)
+(CMP (SRL y z) x) => (InvertFlags (CMPshiftRLreg x y z))
+(CMP x (SRA y z)) => (CMPshiftRAreg x y z)
+(CMP (SRA y z) x) => (InvertFlags (CMPshiftRAreg x y z))
+(TST x (SLLconst [c] y)) => (TSTshiftLL x y [c])
+(TST x (SRLconst [c] y)) => (TSTshiftRL x y [c])
+(TST x (SRAconst [c] y)) => (TSTshiftRA x y [c])
+(TST x (SLL y z)) => (TSTshiftLLreg x y z)
+(TST x (SRL y z)) => (TSTshiftRLreg x y z)
+(TST x (SRA y z)) => (TSTshiftRAreg x y z)
+(TEQ x (SLLconst [c] y)) => (TEQshiftLL x y [c])
+(TEQ x (SRLconst [c] y)) => (TEQshiftRL x y [c])
+(TEQ x (SRAconst [c] y)) => (TEQshiftRA x y [c])
+(TEQ x (SLL y z)) => (TEQshiftLLreg x y z)
+(TEQ x (SRL y z)) => (TEQshiftRLreg x y z)
+(TEQ x (SRA y z)) => (TEQshiftRAreg x y z)
+(CMN x (SLLconst [c] y)) => (CMNshiftLL x y [c])
+(CMN x (SRLconst [c] y)) => (CMNshiftRL x y [c])
+(CMN x (SRAconst [c] y)) => (CMNshiftRA x y [c])
+(CMN x (SLL y z)) => (CMNshiftLLreg x y z)
+(CMN x (SRL y z)) => (CMNshiftRLreg x y z)
+(CMN x (SRA y z)) => (CMNshiftRAreg x y z)
+
+// prefer *const ops to *shift ops
+(ADDshiftLL (MOVWconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
+(ADDshiftRL (MOVWconst [c]) x [d]) => (ADDconst [c] (SRLconst <x.Type> x [d]))
+(ADDshiftRA (MOVWconst [c]) x [d]) => (ADDconst [c] (SRAconst <x.Type> x [d]))
+(ADCshiftLL (MOVWconst [c]) x [d] flags) => (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
+(ADCshiftRL (MOVWconst [c]) x [d] flags) => (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
+(ADCshiftRA (MOVWconst [c]) x [d] flags) => (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
+(ADDSshiftLL (MOVWconst [c]) x [d]) => (ADDSconst [c] (SLLconst <x.Type> x [d]))
+(ADDSshiftRL (MOVWconst [c]) x [d]) => (ADDSconst [c] (SRLconst <x.Type> x [d]))
+(ADDSshiftRA (MOVWconst [c]) x [d]) => (ADDSconst [c] (SRAconst <x.Type> x [d]))
+(SUBshiftLL (MOVWconst [c]) x [d]) => (RSBconst [c] (SLLconst <x.Type> x [d]))
+(SUBshiftRL (MOVWconst [c]) x [d]) => (RSBconst [c] (SRLconst <x.Type> x [d]))
+(SUBshiftRA (MOVWconst [c]) x [d]) => (RSBconst [c] (SRAconst <x.Type> x [d]))
+(SBCshiftLL (MOVWconst [c]) x [d] flags) => (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
+(SBCshiftRL (MOVWconst [c]) x [d] flags) => (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
+(SBCshiftRA (MOVWconst [c]) x [d] flags) => (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+(SUBSshiftLL (MOVWconst [c]) x [d]) => (RSBSconst [c] (SLLconst <x.Type> x [d]))
+(SUBSshiftRL (MOVWconst [c]) x [d]) => (RSBSconst [c] (SRLconst <x.Type> x [d]))
+(SUBSshiftRA (MOVWconst [c]) x [d]) => (RSBSconst [c] (SRAconst <x.Type> x [d]))
+(RSBshiftLL (MOVWconst [c]) x [d]) => (SUBconst [c] (SLLconst <x.Type> x [d]))
+(RSBshiftRL (MOVWconst [c]) x [d]) => (SUBconst [c] (SRLconst <x.Type> x [d]))
+(RSBshiftRA (MOVWconst [c]) x [d]) => (SUBconst [c] (SRAconst <x.Type> x [d]))
+(RSCshiftLL (MOVWconst [c]) x [d] flags) => (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+(RSCshiftRL (MOVWconst [c]) x [d] flags) => (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
+(RSCshiftRA (MOVWconst [c]) x [d] flags) => (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+(RSBSshiftLL (MOVWconst [c]) x [d]) => (SUBSconst [c] (SLLconst <x.Type> x [d]))
+(RSBSshiftRL (MOVWconst [c]) x [d]) => (SUBSconst [c] (SRLconst <x.Type> x [d]))
+(RSBSshiftRA (MOVWconst [c]) x [d]) => (SUBSconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftLL (MOVWconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
+(ANDshiftRL (MOVWconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
+(ANDshiftRA (MOVWconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftLL (MOVWconst [c]) x [d]) => (ORconst [c] (SLLconst <x.Type> x [d]))
+(ORshiftRL (MOVWconst [c]) x [d]) => (ORconst [c] (SRLconst <x.Type> x [d]))
+(ORshiftRA (MOVWconst [c]) x [d]) => (ORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftLL (MOVWconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
+(XORshiftRL (MOVWconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
+(XORshiftRA (MOVWconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftRR (MOVWconst [c]) x [d]) => (XORconst [c] (SRRconst <x.Type> x [d]))
+(CMPshiftLL (MOVWconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+(CMPshiftRL (MOVWconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+(CMPshiftRA (MOVWconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+(TSTshiftLL (MOVWconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
+(TSTshiftRL (MOVWconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
+(TSTshiftRA (MOVWconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
+(TEQshiftLL (MOVWconst [c]) x [d]) => (TEQconst [c] (SLLconst <x.Type> x [d]))
+(TEQshiftRL (MOVWconst [c]) x [d]) => (TEQconst [c] (SRLconst <x.Type> x [d]))
+(TEQshiftRA (MOVWconst [c]) x [d]) => (TEQconst [c] (SRAconst <x.Type> x [d]))
+(CMNshiftLL (MOVWconst [c]) x [d]) => (CMNconst [c] (SLLconst <x.Type> x [d]))
+(CMNshiftRL (MOVWconst [c]) x [d]) => (CMNconst [c] (SRLconst <x.Type> x [d]))
+(CMNshiftRA (MOVWconst [c]) x [d]) => (CMNconst [c] (SRAconst <x.Type> x [d]))
+
+(ADDshiftLLreg (MOVWconst [c]) x y) => (ADDconst [c] (SLL <x.Type> x y))
+(ADDshiftRLreg (MOVWconst [c]) x y) => (ADDconst [c] (SRL <x.Type> x y))
+(ADDshiftRAreg (MOVWconst [c]) x y) => (ADDconst [c] (SRA <x.Type> x y))
+(ADCshiftLLreg (MOVWconst [c]) x y flags) => (ADCconst [c] (SLL <x.Type> x y) flags)
+(ADCshiftRLreg (MOVWconst [c]) x y flags) => (ADCconst [c] (SRL <x.Type> x y) flags)
+(ADCshiftRAreg (MOVWconst [c]) x y flags) => (ADCconst [c] (SRA <x.Type> x y) flags)
+(ADDSshiftLLreg (MOVWconst [c]) x y) => (ADDSconst [c] (SLL <x.Type> x y))
+(ADDSshiftRLreg (MOVWconst [c]) x y) => (ADDSconst [c] (SRL <x.Type> x y))
+(ADDSshiftRAreg (MOVWconst [c]) x y) => (ADDSconst [c] (SRA <x.Type> x y))
+(SUBshiftLLreg (MOVWconst [c]) x y) => (RSBconst [c] (SLL <x.Type> x y))
+(SUBshiftRLreg (MOVWconst [c]) x y) => (RSBconst [c] (SRL <x.Type> x y))
+(SUBshiftRAreg (MOVWconst [c]) x y) => (RSBconst [c] (SRA <x.Type> x y))
+(SBCshiftLLreg (MOVWconst [c]) x y flags) => (RSCconst [c] (SLL <x.Type> x y) flags)
+(SBCshiftRLreg (MOVWconst [c]) x y flags) => (RSCconst [c] (SRL <x.Type> x y) flags)
+(SBCshiftRAreg (MOVWconst [c]) x y flags) => (RSCconst [c] (SRA <x.Type> x y) flags)
+(SUBSshiftLLreg (MOVWconst [c]) x y) => (RSBSconst [c] (SLL <x.Type> x y))
+(SUBSshiftRLreg (MOVWconst [c]) x y) => (RSBSconst [c] (SRL <x.Type> x y))
+(SUBSshiftRAreg (MOVWconst [c]) x y) => (RSBSconst [c] (SRA <x.Type> x y))
+(RSBshiftLLreg (MOVWconst [c]) x y) => (SUBconst [c] (SLL <x.Type> x y))
+(RSBshiftRLreg (MOVWconst [c]) x y) => (SUBconst [c] (SRL <x.Type> x y))
+(RSBshiftRAreg (MOVWconst [c]) x y) => (SUBconst [c] (SRA <x.Type> x y))
+(RSCshiftLLreg (MOVWconst [c]) x y flags) => (SBCconst [c] (SLL <x.Type> x y) flags)
+(RSCshiftRLreg (MOVWconst [c]) x y flags) => (SBCconst [c] (SRL <x.Type> x y) flags)
+(RSCshiftRAreg (MOVWconst [c]) x y flags) => (SBCconst [c] (SRA <x.Type> x y) flags)
+(RSBSshiftLLreg (MOVWconst [c]) x y) => (SUBSconst [c] (SLL <x.Type> x y))
+(RSBSshiftRLreg (MOVWconst [c]) x y) => (SUBSconst [c] (SRL <x.Type> x y))
+(RSBSshiftRAreg (MOVWconst [c]) x y) => (SUBSconst [c] (SRA <x.Type> x y))
+(ANDshiftLLreg (MOVWconst [c]) x y) => (ANDconst [c] (SLL <x.Type> x y))
+(ANDshiftRLreg (MOVWconst [c]) x y) => (ANDconst [c] (SRL <x.Type> x y))
+(ANDshiftRAreg (MOVWconst [c]) x y) => (ANDconst [c] (SRA <x.Type> x y))
+(ORshiftLLreg (MOVWconst [c]) x y) => (ORconst [c] (SLL <x.Type> x y))
+(ORshiftRLreg (MOVWconst [c]) x y) => (ORconst [c] (SRL <x.Type> x y))
+(ORshiftRAreg (MOVWconst [c]) x y) => (ORconst [c] (SRA <x.Type> x y))
+(XORshiftLLreg (MOVWconst [c]) x y) => (XORconst [c] (SLL <x.Type> x y))
+(XORshiftRLreg (MOVWconst [c]) x y) => (XORconst [c] (SRL <x.Type> x y))
+(XORshiftRAreg (MOVWconst [c]) x y) => (XORconst [c] (SRA <x.Type> x y))
+(CMPshiftLLreg (MOVWconst [c]) x y) => (InvertFlags (CMPconst [c] (SLL <x.Type> x y)))
+(CMPshiftRLreg (MOVWconst [c]) x y) => (InvertFlags (CMPconst [c] (SRL <x.Type> x y)))
+(CMPshiftRAreg (MOVWconst [c]) x y) => (InvertFlags (CMPconst [c] (SRA <x.Type> x y)))
+(TSTshiftLLreg (MOVWconst [c]) x y) => (TSTconst [c] (SLL <x.Type> x y))
+(TSTshiftRLreg (MOVWconst [c]) x y) => (TSTconst [c] (SRL <x.Type> x y))
+(TSTshiftRAreg (MOVWconst [c]) x y) => (TSTconst [c] (SRA <x.Type> x y))
+(TEQshiftLLreg (MOVWconst [c]) x y) => (TEQconst [c] (SLL <x.Type> x y))
+(TEQshiftRLreg (MOVWconst [c]) x y) => (TEQconst [c] (SRL <x.Type> x y))
+(TEQshiftRAreg (MOVWconst [c]) x y) => (TEQconst [c] (SRA <x.Type> x y))
+(CMNshiftLLreg (MOVWconst [c]) x y) => (CMNconst [c] (SLL <x.Type> x y))
+(CMNshiftRLreg (MOVWconst [c]) x y) => (CMNconst [c] (SRL <x.Type> x y))
+(CMNshiftRAreg (MOVWconst [c]) x y) => (CMNconst [c] (SRA <x.Type> x y))
+
+// constant folding in *shift ops
+(ADDshiftLL x (MOVWconst [c]) [d]) => (ADDconst x [c<<uint64(d)])
+(ADDshiftRL x (MOVWconst [c]) [d]) => (ADDconst x [int32(uint32(c)>>uint64(d))])
+(ADDshiftRA x (MOVWconst [c]) [d]) => (ADDconst x [c>>uint64(d)])
+(ADCshiftLL x (MOVWconst [c]) [d] flags) => (ADCconst x [c<<uint64(d)] flags)
+(ADCshiftRL x (MOVWconst [c]) [d] flags) => (ADCconst x [int32(uint32(c)>>uint64(d))] flags)
+(ADCshiftRA x (MOVWconst [c]) [d] flags) => (ADCconst x [c>>uint64(d)] flags)
+(ADDSshiftLL x (MOVWconst [c]) [d]) => (ADDSconst x [c<<uint64(d)])
+(ADDSshiftRL x (MOVWconst [c]) [d]) => (ADDSconst x [int32(uint32(c)>>uint64(d))])
+(ADDSshiftRA x (MOVWconst [c]) [d]) => (ADDSconst x [c>>uint64(d)])
+(SUBshiftLL x (MOVWconst [c]) [d]) => (SUBconst x [c<<uint64(d)])
+(SUBshiftRL x (MOVWconst [c]) [d]) => (SUBconst x [int32(uint32(c)>>uint64(d))])
+(SUBshiftRA x (MOVWconst [c]) [d]) => (SUBconst x [c>>uint64(d)])
+(SBCshiftLL x (MOVWconst [c]) [d] flags) => (SBCconst x [c<<uint64(d)] flags)
+(SBCshiftRL x (MOVWconst [c]) [d] flags) => (SBCconst x [int32(uint32(c)>>uint64(d))] flags)
+(SBCshiftRA x (MOVWconst [c]) [d] flags) => (SBCconst x [c>>uint64(d)] flags)
+(SUBSshiftLL x (MOVWconst [c]) [d]) => (SUBSconst x [c<<uint64(d)])
+(SUBSshiftRL x (MOVWconst [c]) [d]) => (SUBSconst x [int32(uint32(c)>>uint64(d))])
+(SUBSshiftRA x (MOVWconst [c]) [d]) => (SUBSconst x [c>>uint64(d)])
+(RSBshiftLL x (MOVWconst [c]) [d]) => (RSBconst x [c<<uint64(d)])
+(RSBshiftRL x (MOVWconst [c]) [d]) => (RSBconst x [int32(uint32(c)>>uint64(d))])
+(RSBshiftRA x (MOVWconst [c]) [d]) => (RSBconst x [c>>uint64(d)])
+(RSCshiftLL x (MOVWconst [c]) [d] flags) => (RSCconst x [c<<uint64(d)] flags)
+(RSCshiftRL x (MOVWconst [c]) [d] flags) => (RSCconst x [int32(uint32(c)>>uint64(d))] flags)
+(RSCshiftRA x (MOVWconst [c]) [d] flags) => (RSCconst x [c>>uint64(d)] flags)
+(RSBSshiftLL x (MOVWconst [c]) [d]) => (RSBSconst x [c<<uint64(d)])
+(RSBSshiftRL x (MOVWconst [c]) [d]) => (RSBSconst x [int32(uint32(c)>>uint64(d))])
+(RSBSshiftRA x (MOVWconst [c]) [d]) => (RSBSconst x [c>>uint64(d)])
+(ANDshiftLL x (MOVWconst [c]) [d]) => (ANDconst x [c<<uint64(d)])
+(ANDshiftRL x (MOVWconst [c]) [d]) => (ANDconst x [int32(uint32(c)>>uint64(d))])
+(ANDshiftRA x (MOVWconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
+(ORshiftLL x (MOVWconst [c]) [d]) => (ORconst x [c<<uint64(d)])
+(ORshiftRL x (MOVWconst [c]) [d]) => (ORconst x [int32(uint32(c)>>uint64(d))])
+(ORshiftRA x (MOVWconst [c]) [d]) => (ORconst x [c>>uint64(d)])
+(XORshiftLL x (MOVWconst [c]) [d]) => (XORconst x [c<<uint64(d)])
+(XORshiftRL x (MOVWconst [c]) [d]) => (XORconst x [int32(uint32(c)>>uint64(d))])
+(XORshiftRA x (MOVWconst [c]) [d]) => (XORconst x [c>>uint64(d)])
+(XORshiftRR x (MOVWconst [c]) [d]) => (XORconst x [int32(uint32(c)>>uint64(d)|uint32(c)<<uint64(32-d))])
+(BICshiftLL x (MOVWconst [c]) [d]) => (BICconst x [c<<uint64(d)])
+(BICshiftRL x (MOVWconst [c]) [d]) => (BICconst x [int32(uint32(c)>>uint64(d))])
+(BICshiftRA x (MOVWconst [c]) [d]) => (BICconst x [c>>uint64(d)])
+(MVNshiftLL (MOVWconst [c]) [d]) => (MOVWconst [^(c<<uint64(d))])
+(MVNshiftRL (MOVWconst [c]) [d]) => (MOVWconst [^int32(uint32(c)>>uint64(d))])
+(MVNshiftRA (MOVWconst [c]) [d]) => (MOVWconst [int32(c)>>uint64(d)])
+(CMPshiftLL x (MOVWconst [c]) [d]) => (CMPconst x [c<<uint64(d)])
+(CMPshiftRL x (MOVWconst [c]) [d]) => (CMPconst x [int32(uint32(c)>>uint64(d))])
+(CMPshiftRA x (MOVWconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
+(TSTshiftLL x (MOVWconst [c]) [d]) => (TSTconst x [c<<uint64(d)])
+(TSTshiftRL x (MOVWconst [c]) [d]) => (TSTconst x [int32(uint32(c)>>uint64(d))])
+(TSTshiftRA x (MOVWconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
+(TEQshiftLL x (MOVWconst [c]) [d]) => (TEQconst x [c<<uint64(d)])
+(TEQshiftRL x (MOVWconst [c]) [d]) => (TEQconst x [int32(uint32(c)>>uint64(d))])
+(TEQshiftRA x (MOVWconst [c]) [d]) => (TEQconst x [c>>uint64(d)])
+(CMNshiftLL x (MOVWconst [c]) [d]) => (CMNconst x [c<<uint64(d)])
+(CMNshiftRL x (MOVWconst [c]) [d]) => (CMNconst x [int32(uint32(c)>>uint64(d))])
+(CMNshiftRA x (MOVWconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
+
+(ADDshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftLL x y [c])
+(ADDshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftRL x y [c])
+(ADDshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftRA x y [c])
+(ADCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftLL x y [c] flags)
+(ADCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftRL x y [c] flags)
+(ADCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftRA x y [c] flags)
+(ADDSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftLL x y [c])
+(ADDSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftRL x y [c])
+(ADDSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftRA x y [c])
+(SUBshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftLL x y [c])
+(SUBshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftRL x y [c])
+(SUBshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftRA x y [c])
+(SBCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftLL x y [c] flags)
+(SBCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftRL x y [c] flags)
+(SBCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftRA x y [c] flags)
+(SUBSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftLL x y [c])
+(SUBSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftRL x y [c])
+(SUBSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftRA x y [c])
+(RSBshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftLL x y [c])
+(RSBshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftRL x y [c])
+(RSBshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftRA x y [c])
+(RSCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftLL x y [c] flags)
+(RSCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftRL x y [c] flags)
+(RSCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftRA x y [c] flags)
+(RSBSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftLL x y [c])
+(RSBSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftRL x y [c])
+(RSBSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftRA x y [c])
+(ANDshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftLL x y [c])
+(ANDshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftRL x y [c])
+(ANDshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftRA x y [c])
+(ORshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftLL x y [c])
+(ORshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftRL x y [c])
+(ORshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftRA x y [c])
+(XORshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftLL x y [c])
+(XORshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftRL x y [c])
+(XORshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftRA x y [c])
+(BICshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftLL x y [c])
+(BICshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftRL x y [c])
+(BICshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftRA x y [c])
+(MVNshiftLLreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftLL x [c])
+(MVNshiftRLreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftRL x [c])
+(MVNshiftRAreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftRA x [c])
+(CMPshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftLL x y [c])
+(CMPshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftRL x y [c])
+(CMPshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftRA x y [c])
+(TSTshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftLL x y [c])
+(TSTshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftRL x y [c])
+(TSTshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftRA x y [c])
+(TEQshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftLL x y [c])
+(TEQshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftRL x y [c])
+(TEQshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftRA x y [c])
+(CMNshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftLL x y [c])
+(CMNshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRL x y [c])
+(CMNshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRA x y [c])
+
+// Generate rotates
+(ADDshiftLL [c] (SRLconst x [32-c]) x) => (SRRconst [32-c] x)
+( ORshiftLL [c] (SRLconst x [32-c]) x) => (SRRconst [32-c] x)
+(XORshiftLL [c] (SRLconst x [32-c]) x) => (SRRconst [32-c] x)
+(ADDshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x)
+( ORshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x)
+(XORshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x)
+
+(RotateLeft32 x (MOVWconst [c])) => (SRRconst [-c&31] x)
+(RotateLeft16 <t> x (MOVWconst [c])) => (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
+(RotateLeft8 <t> x (MOVWconst [c])) => (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
+(RotateLeft32 x y) => (SRR x (RSBconst [0] <y.Type> y))
+
+// ((x>>8) | (x<<8)) -> (REV16 x), the type of x is uint16, "|" can also be "^" or "+".
+// UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by
+// ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL.
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x) => (REV16 x)
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 => (REV16 x)
+
+// use indexed loads and stores
+(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVWloadidx ptr idx mem)
+(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil => (MOVWstoreidx ptr idx val mem)
+(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil => (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil => (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil => (MOVWloadshiftRA ptr idx [c] mem)
+(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil => (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil => (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil => (MOVWstoreshiftRA ptr idx [c] val mem)
+(MOVBUload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVBUloadidx ptr idx mem)
+(MOVBload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVBloadidx ptr idx mem)
+(MOVBstore [0] {sym} (ADD ptr idx) val mem) && sym == nil => (MOVBstoreidx ptr idx val mem)
+(MOVHUload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVHUloadidx ptr idx mem)
+(MOVHload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVHloadidx ptr idx mem)
+(MOVHstore [0] {sym} (ADD ptr idx) val mem) && sym == nil => (MOVHstoreidx ptr idx val mem)
+
+// constant folding in indexed loads and stores
+(MOVWloadidx ptr (MOVWconst [c]) mem) => (MOVWload [c] ptr mem)
+(MOVWloadidx (MOVWconst [c]) ptr mem) => (MOVWload [c] ptr mem)
+(MOVBloadidx ptr (MOVWconst [c]) mem) => (MOVBload [c] ptr mem)
+(MOVBloadidx (MOVWconst [c]) ptr mem) => (MOVBload [c] ptr mem)
+(MOVBUloadidx ptr (MOVWconst [c]) mem) => (MOVBUload [c] ptr mem)
+(MOVBUloadidx (MOVWconst [c]) ptr mem) => (MOVBUload [c] ptr mem)
+(MOVHUloadidx ptr (MOVWconst [c]) mem) => (MOVHUload [c] ptr mem)
+(MOVHUloadidx (MOVWconst [c]) ptr mem) => (MOVHUload [c] ptr mem)
+(MOVHloadidx ptr (MOVWconst [c]) mem) => (MOVHload [c] ptr mem)
+(MOVHloadidx (MOVWconst [c]) ptr mem) => (MOVHload [c] ptr mem)
+
+(MOVWstoreidx ptr (MOVWconst [c]) val mem) => (MOVWstore [c] ptr val mem)
+(MOVWstoreidx (MOVWconst [c]) ptr val mem) => (MOVWstore [c] ptr val mem)
+(MOVBstoreidx ptr (MOVWconst [c]) val mem) => (MOVBstore [c] ptr val mem)
+(MOVBstoreidx (MOVWconst [c]) ptr val mem) => (MOVBstore [c] ptr val mem)
+(MOVHstoreidx ptr (MOVWconst [c]) val mem) => (MOVHstore [c] ptr val mem)
+(MOVHstoreidx (MOVWconst [c]) ptr val mem) => (MOVHstore [c] ptr val mem)
+
+(MOVWloadidx ptr (SLLconst idx [c]) mem) => (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWloadidx (SLLconst idx [c]) ptr mem) => (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWloadidx ptr (SRLconst idx [c]) mem) => (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWloadidx (SRLconst idx [c]) ptr mem) => (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWloadidx ptr (SRAconst idx [c]) mem) => (MOVWloadshiftRA ptr idx [c] mem)
+(MOVWloadidx (SRAconst idx [c]) ptr mem) => (MOVWloadshiftRA ptr idx [c] mem)
+
+(MOVWstoreidx ptr (SLLconst idx [c]) val mem) => (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstoreidx (SLLconst idx [c]) ptr val mem) => (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstoreidx ptr (SRLconst idx [c]) val mem) => (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstoreidx (SRLconst idx [c]) ptr val mem) => (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstoreidx ptr (SRAconst idx [c]) val mem) => (MOVWstoreshiftRA ptr idx [c] val mem)
+(MOVWstoreidx (SRAconst idx [c]) ptr val mem) => (MOVWstoreshiftRA ptr idx [c] val mem)
+
+(MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) => (MOVWload [int32(uint32(c)<<uint64(d))] ptr mem)
+(MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem) => (MOVWload [int32(uint32(c)>>uint64(d))] ptr mem)
+(MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) => (MOVWload [c>>uint64(d)] ptr mem)
+
+(MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [int32(uint32(c)<<uint64(d))] ptr val mem)
+(MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [int32(uint32(c)>>uint64(d))] ptr val mem)
+(MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [c>>uint64(d)] ptr val mem)
+
+// generic simplifications
+(ADD x (RSBconst [0] y)) => (SUB x y)
+(ADD <t> (RSBconst [c] x) (RSBconst [d] y)) => (RSBconst [c+d] (ADD <t> x y))
+(SUB x x) => (MOVWconst [0])
+(RSB x x) => (MOVWconst [0])
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVWconst [0])
+(BIC x x) => (MOVWconst [0])
+
+(ADD (MUL x y) a) => (MULA x y a)
+(SUB a (MUL x y)) && objabi.GOARM == 7 => (MULS x y a)
+(RSB (MUL x y) a) && objabi.GOARM == 7 => (MULS x y a)
+
+(NEGF (MULF x y)) && objabi.GOARM >= 6 => (NMULF x y)
+(NEGD (MULD x y)) && objabi.GOARM >= 6 => (NMULD x y)
+(MULF (NEGF x) y) && objabi.GOARM >= 6 => (NMULF x y)
+(MULD (NEGD x) y) && objabi.GOARM >= 6 => (NMULD x y)
+(NMULF (NEGF x) y) => (MULF x y)
+(NMULD (NEGD x) y) => (MULD x y)
+
+// the result will overwrite the addend, since they are in the same register
+(ADDF a (MULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAF a x y)
+(ADDF a (NMULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSF a x y)
+(ADDD a (MULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAD a x y)
+(ADDD a (NMULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSD a x y)
+(SUBF a (MULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSF a x y)
+(SUBF a (NMULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAF a x y)
+(SUBD a (MULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSD a x y)
+(SUBD a (NMULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAD a x y)
+
+(AND x (MVN y)) => (BIC x y)
+
+// simplification with *shift ops
+(SUBshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(SUBshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(SUBshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(RSBshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(RSBshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(RSBshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(ANDshiftLL y:(SLLconst x [c]) x [c]) => y
+(ANDshiftRL y:(SRLconst x [c]) x [c]) => y
+(ANDshiftRA y:(SRAconst x [c]) x [c]) => y
+(ORshiftLL y:(SLLconst x [c]) x [c]) => y
+(ORshiftRL y:(SRLconst x [c]) x [c]) => y
+(ORshiftRA y:(SRAconst x [c]) x [c]) => y
+(XORshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(XORshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(XORshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(BICshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(BICshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(BICshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(AND x (MVNshiftLL y [c])) => (BICshiftLL x y [c])
+(AND x (MVNshiftRL y [c])) => (BICshiftRL x y [c])
+(AND x (MVNshiftRA y [c])) => (BICshiftRA x y [c])
+
+// floating point optimizations
+(CMPF x (MOVFconst [0])) => (CMPF0 x)
+(CMPD x (MOVDconst [0])) => (CMPD0 x)
+
+// bit extraction
+(SRAconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x)
+(SRLconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x)
+
+// comparison simplification
+((LT|LE|EQ|NE|GE|GT) (CMP x (RSBconst [0] y))) => ((LT|LE|EQ|NE|GE|GT) (CMN x y)) // sense of carry bit not preserved
+((LT|LE|EQ|NE|GE|GT) (CMN x (RSBconst [0] y))) => ((LT|LE|EQ|NE|GE|GT) (CMP x y)) // sense of carry bit not preserved
+(EQ (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (EQ (CMP x y) yes no)
+(EQ (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (EQ (CMP a (MUL <x.Type> x y)) yes no)
+(EQ (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (EQ (CMPconst [c] x) yes no)
+(EQ (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (CMPshiftLL x y [c]) yes no)
+(EQ (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (CMPshiftRL x y [c]) yes no)
+(EQ (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (CMPshiftRA x y [c]) yes no)
+(EQ (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (CMPshiftLLreg x y z) yes no)
+(EQ (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (CMPshiftRLreg x y z) yes no)
+(EQ (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (CMPshiftRAreg x y z) yes no)
+(NE (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (NE (CMP x y) yes no)
+(NE (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (NE (CMP a (MUL <x.Type> x y)) yes no)
+(NE (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (NE (CMPconst [c] x) yes no)
+(NE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (NE (CMPshiftLL x y [c]) yes no)
+(NE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (NE (CMPshiftRL x y [c]) yes no)
+(NE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (NE (CMPshiftRA x y [c]) yes no)
+(NE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (CMPshiftLLreg x y z) yes no)
+(NE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (CMPshiftRLreg x y z) yes no)
+(NE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (CMPshiftRAreg x y z) yes no)
+(EQ (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (EQ (CMN x y) yes no)
+(EQ (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (EQ (CMN a (MUL <x.Type> x y)) yes no)
+(EQ (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (EQ (CMNconst [c] x) yes no)
+(EQ (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (CMNshiftLL x y [c]) yes no)
+(EQ (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (CMNshiftRL x y [c]) yes no)
+(EQ (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (CMNshiftRA x y [c]) yes no)
+(EQ (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (CMNshiftLLreg x y z) yes no)
+(EQ (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (CMNshiftRLreg x y z) yes no)
+(EQ (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (CMNshiftRAreg x y z) yes no)
+(NE (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (NE (CMN x y) yes no)
+(NE (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (NE (CMN a (MUL <x.Type> x y)) yes no)
+(NE (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (NE (CMNconst [c] x) yes no)
+(NE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (NE (CMNshiftLL x y [c]) yes no)
+(NE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (NE (CMNshiftRL x y [c]) yes no)
+(NE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (NE (CMNshiftRA x y [c]) yes no)
+(NE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (CMNshiftLLreg x y z) yes no)
+(NE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (CMNshiftRLreg x y z) yes no)
+(NE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (CMNshiftRAreg x y z) yes no)
+(EQ (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (EQ (TST x y) yes no)
+(EQ (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (EQ (TSTconst [c] x) yes no)
+(EQ (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (TSTshiftLL x y [c]) yes no)
+(EQ (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (TSTshiftRL x y [c]) yes no)
+(EQ (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (TSTshiftRA x y [c]) yes no)
+(EQ (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (TSTshiftLLreg x y z) yes no)
+(EQ (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (TSTshiftRLreg x y z) yes no)
+(EQ (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (TSTshiftRAreg x y z) yes no)
+(NE (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (NE (TST x y) yes no)
+(NE (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (NE (TSTconst [c] x) yes no)
+(NE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (NE (TSTshiftLL x y [c]) yes no)
+(NE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (NE (TSTshiftRL x y [c]) yes no)
+(NE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (NE (TSTshiftRA x y [c]) yes no)
+(NE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (TSTshiftLLreg x y z) yes no)
+(NE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (TSTshiftRLreg x y z) yes no)
+(NE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (TSTshiftRAreg x y z) yes no)
+(EQ (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (EQ (TEQ x y) yes no)
+(EQ (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (EQ (TEQconst [c] x) yes no)
+(EQ (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (TEQshiftLL x y [c]) yes no)
+(EQ (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (TEQshiftRL x y [c]) yes no)
+(EQ (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (TEQshiftRA x y [c]) yes no)
+(EQ (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (TEQshiftLLreg x y z) yes no)
+(EQ (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (TEQshiftRLreg x y z) yes no)
+(EQ (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (TEQshiftRAreg x y z) yes no)
+(NE (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (NE (TEQ x y) yes no)
+(NE (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (NE (TEQconst [c] x) yes no)
+(NE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (NE (TEQshiftLL x y [c]) yes no)
+(NE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (NE (TEQshiftRL x y [c]) yes no)
+(NE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (NE (TEQshiftRA x y [c]) yes no)
+(NE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (TEQshiftLLreg x y z) yes no)
+(NE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (TEQshiftRLreg x y z) yes no)
+(NE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (TEQshiftRAreg x y z) yes no)
+(LT (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (LTnoov (CMP x y) yes no)
+(LT (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (LTnoov (CMP a (MUL <x.Type> x y)) yes no)
+(LT (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (LTnoov (CMPconst [c] x) yes no)
+(LT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMPshiftLL x y [c]) yes no)
+(LT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMPshiftRL x y [c]) yes no)
+(LT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (CMPshiftRA x y [c]) yes no)
+(LT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMPshiftLLreg x y z) yes no)
+(LT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMPshiftRLreg x y z) yes no)
+(LT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMPshiftRAreg x y z) yes no)
+(LE (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (LEnoov (CMP x y) yes no)
+(LE (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (LEnoov (CMP a (MUL <x.Type> x y)) yes no)
+(LE (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (LEnoov (CMPconst [c] x) yes no)
+(LE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMPshiftLL x y [c]) yes no)
+(LE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMPshiftRL x y [c]) yes no)
+(LE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (CMPshiftRA x y [c]) yes no)
+(LE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMPshiftLLreg x y z) yes no)
+(LE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMPshiftRLreg x y z) yes no)
+(LE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMPshiftRAreg x y z) yes no)
+(LT (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (LTnoov (CMN x y) yes no)
+(LT (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (LTnoov (CMN a (MUL <x.Type> x y)) yes no)
+(LT (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (LTnoov (CMNconst [c] x) yes no)
+(LT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMNshiftLL x y [c]) yes no)
+(LT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMNshiftRL x y [c]) yes no)
+(LT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (CMNshiftRA x y [c]) yes no)
+(LT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMNshiftLLreg x y z) yes no)
+(LT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMNshiftRLreg x y z) yes no)
+(LT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMNshiftRAreg x y z) yes no)
+(LE (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (LEnoov (CMN x y) yes no)
+(LE (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (LEnoov (CMN a (MUL <x.Type> x y)) yes no)
+(LE (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (LEnoov (CMNconst [c] x) yes no)
+(LE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMNshiftLL x y [c]) yes no)
+(LE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMNshiftRL x y [c]) yes no)
+(LE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (CMNshiftRA x y [c]) yes no)
+(LE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMNshiftLLreg x y z) yes no)
+(LE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMNshiftRLreg x y z) yes no)
+(LE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMNshiftRAreg x y z) yes no)
+(LT (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (LTnoov (TST x y) yes no)
+(LT (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (LTnoov (TSTconst [c] x) yes no)
+(LT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (TSTshiftLL x y [c]) yes no)
+(LT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (TSTshiftRL x y [c]) yes no)
+(LT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (TSTshiftRA x y [c]) yes no)
+(LT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TSTshiftLLreg x y z) yes no)
+(LT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TSTshiftRLreg x y z) yes no)
+(LT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (TSTshiftRAreg x y z) yes no)
+(LE (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (LEnoov (TST x y) yes no)
+(LE (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (LEnoov (TSTconst [c] x) yes no)
+(LE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (TSTshiftLL x y [c]) yes no)
+(LE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (TSTshiftRL x y [c]) yes no)
+(LE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (TSTshiftRA x y [c]) yes no)
+(LE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TSTshiftLLreg x y z) yes no)
+(LE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TSTshiftRLreg x y z) yes no)
+(LE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (TSTshiftRAreg x y z) yes no)
+(LT (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (LTnoov (TEQ x y) yes no)
+(LT (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (LTnoov (TEQconst [c] x) yes no)
+(LT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (TEQshiftLL x y [c]) yes no)
+(LT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (TEQshiftRL x y [c]) yes no)
+(LT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (TEQshiftRA x y [c]) yes no)
+(LT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TEQshiftLLreg x y z) yes no)
+(LT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TEQshiftRLreg x y z) yes no)
+(LT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (TEQshiftRAreg x y z) yes no)
+(LE (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (LEnoov (TEQ x y) yes no)
+(LE (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (LEnoov (TEQconst [c] x) yes no)
+(LE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (TEQshiftLL x y [c]) yes no)
+(LE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (TEQshiftRL x y [c]) yes no)
+(LE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (TEQshiftRA x y [c]) yes no)
+(LE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TEQshiftLLreg x y z) yes no)
+(LE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TEQshiftRLreg x y z) yes no)
+(LE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (TEQshiftRAreg x y z) yes no)
+(GT (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (GTnoov (CMP x y) yes no)
+(GT (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (GTnoov (CMP a (MUL <x.Type> x y)) yes no)
+(GT (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (GTnoov (CMPconst [c] x) yes no)
+(GT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMPshiftLL x y [c]) yes no)
+(GT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMPshiftRL x y [c]) yes no)
+(GT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (CMPshiftRA x y [c]) yes no)
+(GT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMPshiftLLreg x y z) yes no)
+(GT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMPshiftRLreg x y z) yes no)
+(GT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMPshiftRAreg x y z) yes no)
+(GE (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (GEnoov (CMP x y) yes no)
+(GE (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (GEnoov (CMP a (MUL <x.Type> x y)) yes no)
+(GE (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (GEnoov (CMPconst [c] x) yes no)
+(GE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMPshiftLL x y [c]) yes no)
+(GE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMPshiftRL x y [c]) yes no)
+(GE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (CMPshiftRA x y [c]) yes no)
+(GE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMPshiftLLreg x y z) yes no)
+(GE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMPshiftRLreg x y z) yes no)
+(GE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMPshiftRAreg x y z) yes no)
+(GT (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (GTnoov (CMN x y) yes no)
+(GT (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (GTnoov (CMNconst [c] x) yes no)
+(GT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMNshiftLL x y [c]) yes no)
+(GT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMNshiftRL x y [c]) yes no)
+(GT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (CMNshiftRA x y [c]) yes no)
+(GT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMNshiftLLreg x y z) yes no)
+(GT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMNshiftRLreg x y z) yes no)
+(GT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMNshiftRAreg x y z) yes no)
+(GE (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (GEnoov (CMN x y) yes no)
+(GE (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (GEnoov (CMN a (MUL <x.Type> x y)) yes no)
+(GE (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (GEnoov (CMNconst [c] x) yes no)
+(GE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMNshiftLL x y [c]) yes no)
+(GE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMNshiftRL x y [c]) yes no)
+(GE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (CMNshiftRA x y [c]) yes no)
+(GE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMNshiftLLreg x y z) yes no)
+(GE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMNshiftRLreg x y z) yes no)
+(GE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMNshiftRAreg x y z) yes no)
+(GT (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (GTnoov (CMN a (MUL <x.Type> x y)) yes no)
+(GT (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (GTnoov (TST x y) yes no)
+(GT (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (GTnoov (TSTconst [c] x) yes no)
+(GT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (TSTshiftLL x y [c]) yes no)
+(GT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (TSTshiftRL x y [c]) yes no)
+(GT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (TSTshiftRA x y [c]) yes no)
+(GT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TSTshiftLLreg x y z) yes no)
+(GT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TSTshiftRLreg x y z) yes no)
+(GT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (TSTshiftRAreg x y z) yes no)
+(GE (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (GEnoov (TST x y) yes no)
+(GE (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (GEnoov (TSTconst [c] x) yes no)
+(GE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (TSTshiftLL x y [c]) yes no)
+(GE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (TSTshiftRL x y [c]) yes no)
+(GE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (TSTshiftRA x y [c]) yes no)
+(GE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TSTshiftLLreg x y z) yes no)
+(GE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TSTshiftRLreg x y z) yes no)
+(GE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (TSTshiftRAreg x y z) yes no)
+(GT (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (GTnoov (TEQ x y) yes no)
+(GT (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (GTnoov (TEQconst [c] x) yes no)
+(GT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (TEQshiftLL x y [c]) yes no)
+(GT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (TEQshiftRL x y [c]) yes no)
+(GT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (TEQshiftRA x y [c]) yes no)
+(GT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TEQshiftLLreg x y z) yes no)
+(GT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TEQshiftRLreg x y z) yes no)
+(GT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (TEQshiftRAreg x y z) yes no)
+(GE (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (GEnoov (TEQ x y) yes no)
+(GE (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (GEnoov (TEQconst [c] x) yes no)
+(GE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (TEQshiftLL x y [c]) yes no)
+(GE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (TEQshiftRL x y [c]) yes no)
+(GE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (TEQshiftRA x y [c]) yes no)
+(GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TEQshiftLLreg x y z) yes no)
+(GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TEQshiftRLreg x y z) yes no)
+(GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (TEQshiftRAreg x y z) yes no)
+
+(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read8(sym, int64(off)))])
+(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
new file mode 100644
index 0000000..80b4005
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -0,0 +1,2789 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|64|32|16|8) ...) => (ADD ...)
+(Add(32F|64F) ...) => (FADD(S|D) ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
+(Sub(32F|64F) ...) => (FSUB(S|D) ...)
+
+(Mul64 ...) => (MUL ...)
+(Mul(32|16|8) ...) => (MULW ...)
+(Mul(32F|64F) ...) => (FMUL(S|D) ...)
+
+(Hmul64 ...) => (MULH ...)
+(Hmul64u ...) => (UMULH ...)
+(Hmul32 x y) => (SRAconst (MULL <typ.Int64> x y) [32])
+(Hmul32u x y) => (SRAconst (UMULL <typ.UInt64> x y) [32])
+(Mul64uhilo ...) => (LoweredMuluhilo ...)
+
+(Div64 [false] x y) => (DIV x y)
+(Div64u ...) => (UDIV ...)
+(Div32 [false] x y) => (DIVW x y)
+(Div32u ...) => (UDIVW ...)
+(Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Div32F ...) => (FDIVS ...)
+(Div64F ...) => (FDIVD ...)
+
+(Mod64 x y) => (MOD x y)
+(Mod64u ...) => (UMOD ...)
+(Mod32 x y) => (MODW x y)
+(Mod32u ...) => (UMODW ...)
+(Mod16 x y) => (MODW (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (MODW (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+// (x + y) / 2 with x>=y => (x - y) / 2 + y
+(Avg64u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
+
+// unary ops
+(Neg(64|32|16|8) ...) => (NEG ...)
+(Neg(32F|64F) ...) => (FNEG(S|D) ...)
+(Com(64|32|16|8) ...) => (MVN ...)
+
+// math package intrinsics
+(Abs ...) => (FABSD ...)
+(Sqrt ...) => (FSQRTD ...)
+(Ceil ...) => (FRINTPD ...)
+(Floor ...) => (FRINTMD ...)
+(Round ...) => (FRINTAD ...)
+(RoundToEven ...) => (FRINTND ...)
+(Trunc ...) => (FRINTZD ...)
+(FMA x y z) => (FMADDD z x y)
+
+// lowering rotates
+(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+(RotateLeft32 x y) => (RORW x (NEG <y.Type> y))
+(RotateLeft64 x y) => (ROR x (NEG <y.Type> y))
+
+(Ctz(64|32|16|8)NonZero ...) => (Ctz(64|32|32|32) ...)
+
+(Ctz64 <t> x) => (CLZ (RBIT <t> x))
+(Ctz32 <t> x) => (CLZW (RBITW <t> x))
+(Ctz16 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+(Ctz8 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+
+(PopCount64 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> x))))
+(PopCount32 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt32to64 x)))))
+(PopCount16 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt16to64 x)))))
+
+// Load args directly into the register class where it will be used.
+(FMOVDgpfp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
+(FMOVDfpgp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
+
+// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
+(MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem) => (FMOVDstore [off] {sym} ptr val mem)
+(FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) => (MOVDstore [off] {sym} ptr val mem)
+(MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem) => (FMOVSstore [off] {sym} ptr val mem)
+(FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
+
+// float <=> int register moves, with no conversion.
+// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
+(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) => (FMOVDfpgp val)
+(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (FMOVDgpfp val)
+(MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) => (FMOVSfpgp val)
+(FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (FMOVSgpfp val)
+
+(BitLen64 x) => (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
+(BitLen32 x) => (SUB (MOVDconst [32]) (CLZW <typ.Int> x))
+
+(Bswap64 ...) => (REV ...)
+(Bswap32 ...) => (REVW ...)
+
+(BitRev64 ...) => (RBIT ...)
+(BitRev32 ...) => (RBITW ...)
+(BitRev16 x) => (SRLconst [48] (RBIT <typ.UInt64> x))
+(BitRev8 x) => (SRLconst [56] (RBIT <typ.UInt64> x))
+
+// In fact, UMOD will be translated into UREM instruction, and UREM is originally translated into
+// UDIV and MSUB instructions. But if there is already an identical UDIV instruction just before or
+// after UREM (case like quo, rem := z/y, z%y), then the second UDIV instruction becomes redundant.
+// The purpose of this rule is to have this extra UDIV instruction removed in CSE pass.
+(UMOD <typ.UInt64> x y) => (MSUB <typ.UInt64> x y (UDIV <typ.UInt64> x y))
+(UMODW <typ.UInt32> x y) => (MSUBW <typ.UInt32> x y (UDIVW <typ.UInt32> x y))
+
+// 64-bit addition with carry.
+(Select0 (Add64carry x y c)) => (Select0 <typ.UInt64> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c))))
+(Select1 (Add64carry x y c)) => (ADCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c)))))
+
+// 64-bit subtraction with borrowing.
+(Select0 (Sub64borrow x y bo)) => (Select0 <typ.UInt64> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))
+(Select1 (Sub64borrow x y bo)) => (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))))
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XOR (MOVDconst [1]) (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XOR (MOVDconst [1]) x)
+
+// shifts
+// hardware instruction uses only the low 6 bits of the shift
+// we compare to 64 to ensure Go semantics for large shifts
+// Rules about rotates with non-const shift are based on the following rules,
+// if the following rules change, please also modify the rules based on them.
+(Lsh64x64 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh64x32 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh64x16 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh64x8 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Lsh32x64 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh32x32 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh32x16 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh32x8 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Lsh16x64 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh16x32 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh16x16 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh16x8 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Lsh8x64 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh8x32 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh8x16 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh8x8 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh64Ux64 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh64Ux32 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh64Ux16 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh64Ux8 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh32Ux64 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh32Ux32 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh32Ux16 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh32Ux8 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh16Ux64 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh16Ux32 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh16Ux16 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh16Ux8 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh8Ux64 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh8Ux32 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh8Ux16 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh8Ux8 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh64x64 x y) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh64x32 x y) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh64x16 x y) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh64x8 x y) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+(Rsh32x64 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh32x32 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh32x16 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh32x8 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+(Rsh16x64 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh16x32 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh16x16 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh16x8 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+(Rsh8x64 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh8x32 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh8x16 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh8x8 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+// constants
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
+(Const(32F|64F) [val]) => (FMOV(S|D)const [float64(val)])
+(ConstNil) => (MOVDconst [0])
+(ConstBool [b]) => (MOVDconst [b2i(b)])
+
+(Slicemask <t> x) => (SRAconst (NEG <t> x) [63])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+(ZeroExt8to64 ...) => (MOVBUreg ...)
+(ZeroExt16to64 ...) => (MOVHUreg ...)
+(ZeroExt32to64 ...) => (MOVWUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+(SignExt8to64 ...) => (MOVBreg ...)
+(SignExt16to64 ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+// float <=> int conversion
+(Cvt32to32F ...) => (SCVTFWS ...)
+(Cvt32to64F ...) => (SCVTFWD ...)
+(Cvt64to32F ...) => (SCVTFS ...)
+(Cvt64to64F ...) => (SCVTFD ...)
+(Cvt32Uto32F ...) => (UCVTFWS ...)
+(Cvt32Uto64F ...) => (UCVTFWD ...)
+(Cvt64Uto32F ...) => (UCVTFS ...)
+(Cvt64Uto64F ...) => (UCVTFD ...)
+(Cvt32Fto32 ...) => (FCVTZSSW ...)
+(Cvt64Fto32 ...) => (FCVTZSDW ...)
+(Cvt32Fto64 ...) => (FCVTZSS ...)
+(Cvt64Fto64 ...) => (FCVTZSD ...)
+(Cvt32Fto32U ...) => (FCVTZUSW ...)
+(Cvt64Fto32U ...) => (FCVTZUDW ...)
+(Cvt32Fto64U ...) => (FCVTZUS ...)
+(Cvt64Fto64U ...) => (FCVTZUD ...)
+(Cvt32Fto64F ...) => (FCVTSD ...)
+(Cvt64Fto32F ...) => (FCVTDS ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round32F ...) => (LoweredRound32F ...)
+(Round64F ...) => (LoweredRound64F ...)
+
+// comparisons
+(Eq8 x y) => (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (Equal (CMPW x y))
+(Eq64 x y) => (Equal (CMP x y))
+(EqPtr x y) => (Equal (CMP x y))
+(Eq32F x y) => (Equal (FCMPS x y))
+(Eq64F x y) => (Equal (FCMPD x y))
+
+(Neq8 x y) => (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) => (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) => (NotEqual (CMPW x y))
+(Neq64 x y) => (NotEqual (CMP x y))
+(NeqPtr x y) => (NotEqual (CMP x y))
+(Neq32F x y) => (NotEqual (FCMPS x y))
+(Neq64F x y) => (NotEqual (FCMPD x y))
+
+(Less8 x y) => (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) => (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Less32 x y) => (LessThan (CMPW x y))
+(Less64 x y) => (LessThan (CMP x y))
+
+// Set condition flags for floating-point comparisons "x < y"
+// and "x <= y". Because if either or both of the operands are
+// NaNs, all three of (x < y), (x == y) and (x > y) are false,
+// and ARM Manual says FCMP instruction sets PSTATE.<N,Z,C,V>
+// of this case to (0, 0, 1, 1).
+(Less32F x y) => (LessThanF (FCMPS x y))
+(Less64F x y) => (LessThanF (FCMPD x y))
+
+// For an unsigned integer x, the following rules are useful when combining branch
+// 0 < x => x != 0
+// x <= 0 => x == 0
+// x < 1 => x == 0
+// 1 <= x => x != 0
+(Less(8U|16U|32U|64U) zero:(MOVDconst [0]) x) => (Neq(8|16|32|64) zero x)
+(Leq(8U|16U|32U|64U) x zero:(MOVDconst [0])) => (Eq(8|16|32|64) x zero)
+(Less(8U|16U|32U|64U) x (MOVDconst [1])) => (Eq(8|16|32|64) x (MOVDconst [0]))
+(Leq(8U|16U|32U|64U) (MOVDconst [1]) x) => (Neq(8|16|32|64) (MOVDconst [0]) x)
+
+(Less8U x y) => (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) => (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) => (LessThanU (CMPW x y))
+(Less64U x y) => (LessThanU (CMP x y))
+
+(Leq8 x y) => (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (LessEqual (CMPW x y))
+(Leq64 x y) => (LessEqual (CMP x y))
+
+// Refer to the comments for op Less64F above.
+(Leq32F x y) => (LessEqualF (FCMPS x y))
+(Leq64F x y) => (LessEqualF (FCMPD x y))
+
+(Leq8U x y) => (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (LessEqualU (CMPW x y))
+(Leq64U x y) => (LessEqualU (CMP x y))
+
+// Optimize comparison between a floating-point value and 0.0 with "FCMP $(0.0), Fn"
+(FCMPS x (FMOVSconst [0])) => (FCMPS0 x)
+(FCMPS (FMOVSconst [0]) x) => (InvertFlags (FCMPS0 x))
+(FCMPD x (FMOVDconst [0])) => (FCMPD0 x)
+(FCMPD (FMOVDconst [0]) x) => (InvertFlags (FCMPD0 x))
+
+// CSEL needs a flag-generating argument. Synthesize a CMPW if necessary.
+(CondSelect x y boolval) && flagArg(boolval) != nil => (CSEL [boolval.Op] x y flagArg(boolval))
+(CondSelect x y boolval) && flagArg(boolval) == nil => (CSEL [OpARM64NotEqual] x y (CMPWconst [0] boolval))
+
+(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVDaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDconst [off] ptr)
+
+(Addr {sym} base) => (MOVDaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVDaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
+
+// zeroing
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
+(Zero [2] ptr mem) => (MOVHstore ptr (MOVDconst [0]) mem)
+(Zero [4] ptr mem) => (MOVWstore ptr (MOVDconst [0]) mem)
+(Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst [0]) mem)
+
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVDconst [0])
+ (MOVHstore ptr (MOVDconst [0]) mem))
+(Zero [5] ptr mem) =>
+ (MOVBstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem))
+(Zero [6] ptr mem) =>
+ (MOVHstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem))
+(Zero [7] ptr mem) =>
+ (MOVBstore [6] ptr (MOVDconst [0])
+ (MOVHstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem)))
+(Zero [9] ptr mem) =>
+ (MOVBstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [10] ptr mem) =>
+ (MOVHstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [11] ptr mem) =>
+ (MOVBstore [10] ptr (MOVDconst [0])
+ (MOVHstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem)))
+(Zero [12] ptr mem) =>
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [13] ptr mem) =>
+ (MOVBstore [12] ptr (MOVDconst [0])
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem)))
+(Zero [14] ptr mem) =>
+ (MOVHstore [12] ptr (MOVDconst [0])
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem)))
+(Zero [15] ptr mem) =>
+ (MOVBstore [14] ptr (MOVDconst [0])
+ (MOVHstore [12] ptr (MOVDconst [0])
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))))
+(Zero [16] ptr mem) =>
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)
+
+(Zero [32] ptr mem) =>
+ (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))
+
+(Zero [48] ptr mem) =>
+ (STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))
+
+(Zero [64] ptr mem) =>
+ (STP [48] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))))
+
+// strip off fractional word zeroing
+(Zero [s] ptr mem) && s%16 != 0 && s%16 <= 8 && s > 16 =>
+ (Zero [8]
+ (OffPtr <ptr.Type> ptr [s-8])
+ (Zero [s-s%16] ptr mem))
+(Zero [s] ptr mem) && s%16 != 0 && s%16 > 8 && s > 16 =>
+ (Zero [16]
+ (OffPtr <ptr.Type> ptr [s-16])
+ (Zero [s-s%16] ptr mem))
+
+// medium zeroing uses a duff device
+// 4, 16, and 64 are magic constants, see runtime/mkduff.go
+(Zero [s] ptr mem)
+ && s%16 == 0 && s > 64 && s <= 16*64
+ && !config.noDuffDevice =>
+ (DUFFZERO [4 * (64 - s/16)] ptr mem)
+
+// large zeroing uses a loop
+(Zero [s] ptr mem)
+ && s%16 == 0 && (s > 16*64 || config.noDuffDevice) =>
+ (LoweredZero
+ ptr
+ (ADDconst <ptr.Type> [s-16] ptr)
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
+(Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem)
+(Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem)
+(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBUload [4] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVHstore [4] dst (MOVHUload [4] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVBstore [6] dst (MOVBUload [6] src mem)
+ (MOVHstore [4] dst (MOVHUload [4] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem)))
+(Move [12] dst src mem) =>
+ (MOVWstore [8] dst (MOVWUload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [16] dst src mem) =>
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [24] dst src mem) =>
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem)))
+
+// strip off fractional word move
+(Move [s] dst src mem) && s%8 != 0 && s > 8 =>
+ (Move [s%8]
+ (OffPtr <dst.Type> dst [s-s%8])
+ (OffPtr <src.Type> src [s-s%8])
+ (Move [s-s%8] dst src mem))
+
+// medium move uses a duff device
+(Move [s] dst src mem)
+ && s > 32 && s <= 16*64 && s%16 == 8
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem)
+ (DUFFCOPY <types.TypeMem> [8*(64-(s-8)/16)] dst src mem))
+(Move [s] dst src mem)
+ && s > 32 && s <= 16*64 && s%16 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [8 * (64 - s/16)] dst src mem)
+// 8 is the number of bytes to encode:
+//
+// LDP.P 16(R16), (R26, R27)
+// STP.P (R26, R27), 16(R17)
+//
+// 64 is number of these blocks. See runtime/duff_arm64.s:duffcopy
+
+// large move uses a loop
+(Move [s] dst src mem)
+ && s > 24 && s%8 == 0 && logLargeCopy(v, s) =>
+ (LoweredMove
+ dst
+ src
+ (ADDconst <src.Type> src [s-8])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) => (LessThanU (CMP idx len))
+(IsSliceInBounds idx len) => (LessEqualU (CMP idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) => (EQ cc yes no)
+(If (NotEqual cc) yes no) => (NE cc yes no)
+(If (LessThan cc) yes no) => (LT cc yes no)
+(If (LessThanU cc) yes no) => (ULT cc yes no)
+(If (LessEqual cc) yes no) => (LE cc yes no)
+(If (LessEqualU cc) yes no) => (ULE cc yes no)
+(If (GreaterThan cc) yes no) => (GT cc yes no)
+(If (GreaterThanU cc) yes no) => (UGT cc yes no)
+(If (GreaterEqual cc) yes no) => (GE cc yes no)
+(If (GreaterEqualU cc) yes no) => (UGE cc yes no)
+(If (LessThanF cc) yes no) => (FLT cc yes no)
+(If (LessEqualF cc) yes no) => (FLE cc yes no)
+(If (GreaterThanF cc) yes no) => (FGT cc yes no)
+(If (GreaterEqualF cc) yes no) => (FGE cc yes no)
+
+(If cond yes no) => (NZ cond yes no)
+
+// atomic intrinsics
+// Note: these ops do not accept offset.
+(AtomicLoad8 ...) => (LDARB ...)
+(AtomicLoad32 ...) => (LDARW ...)
+(AtomicLoad64 ...) => (LDAR ...)
+(AtomicLoadPtr ...) => (LDAR ...)
+
+(AtomicStore8 ...) => (STLRB ...)
+(AtomicStore32 ...) => (STLRW ...)
+(AtomicStore64 ...) => (STLR ...)
+(AtomicStorePtrNoWB ...) => (STLR ...)
+
+(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
+(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
+(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...)
+
+(AtomicAdd(32|64)Variant ...) => (LoweredAtomicAdd(32|64)Variant ...)
+(AtomicExchange(32|64)Variant ...) => (LoweredAtomicExchange(32|64)Variant ...)
+(AtomicCompareAndSwap(32|64)Variant ...) => (LoweredAtomicCas(32|64)Variant ...)
+
+// Currently the updated value is not used, but we need a register to temporarily hold it.
+(AtomicAnd8 ptr val mem) => (Select1 (LoweredAtomicAnd8 ptr val mem))
+(AtomicAnd32 ptr val mem) => (Select1 (LoweredAtomicAnd32 ptr val mem))
+(AtomicOr8 ptr val mem) => (Select1 (LoweredAtomicOr8 ptr val mem))
+(AtomicOr32 ptr val mem) => (Select1 (LoweredAtomicOr32 ptr val mem))
+
+(AtomicAnd8Variant ptr val mem) => (Select1 (LoweredAtomicAnd8Variant ptr val mem))
+(AtomicAnd32Variant ptr val mem) => (Select1 (LoweredAtomicAnd32Variant ptr val mem))
+(AtomicOr8Variant ptr val mem) => (Select1 (LoweredAtomicOr8Variant ptr val mem))
+(AtomicOr32Variant ptr val mem) => (Select1 (LoweredAtomicOr32Variant ptr val mem))
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// Optimizations
+
+// Absorb boolean tests into block
+(NZ (Equal cc) yes no) => (EQ cc yes no)
+(NZ (NotEqual cc) yes no) => (NE cc yes no)
+(NZ (LessThan cc) yes no) => (LT cc yes no)
+(NZ (LessThanU cc) yes no) => (ULT cc yes no)
+(NZ (LessEqual cc) yes no) => (LE cc yes no)
+(NZ (LessEqualU cc) yes no) => (ULE cc yes no)
+(NZ (GreaterThan cc) yes no) => (GT cc yes no)
+(NZ (GreaterThanU cc) yes no) => (UGT cc yes no)
+(NZ (GreaterEqual cc) yes no) => (GE cc yes no)
+(NZ (GreaterEqualU cc) yes no) => (UGE cc yes no)
+(NZ (LessThanF cc) yes no) => (FLT cc yes no)
+(NZ (LessEqualF cc) yes no) => (FLE cc yes no)
+(NZ (GreaterThanF cc) yes no) => (FGT cc yes no)
+(NZ (GreaterEqualF cc) yes no) => (FGE cc yes no)
+
+(EQ (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (EQ (TSTWconst [int32(c)] y) yes no)
+(NE (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (NE (TSTWconst [int32(c)] y) yes no)
+(LT (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (LT (TSTWconst [int32(c)] y) yes no)
+(LE (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (LE (TSTWconst [int32(c)] y) yes no)
+(GT (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (GT (TSTWconst [int32(c)] y) yes no)
+(GE (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (GE (TSTWconst [int32(c)] y) yes no)
+
+(EQ (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (EQ (TST x y) yes no)
+(NE (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (NE (TST x y) yes no)
+(LT (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (LT (TST x y) yes no)
+(LE (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (LE (TST x y) yes no)
+(GT (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (GT (TST x y) yes no)
+(GE (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (GE (TST x y) yes no)
+
+(EQ (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (EQ (TSTW x y) yes no)
+(NE (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (NE (TSTW x y) yes no)
+(LT (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (LT (TSTW x y) yes no)
+(LE (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (LE (TSTW x y) yes no)
+(GT (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (GT (TSTW x y) yes no)
+(GE (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (GE (TSTW x y) yes no)
+
+(EQ (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (EQ (TSTconst [c] y) yes no)
+(NE (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (NE (TSTconst [c] y) yes no)
+(LT (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (LT (TSTconst [c] y) yes no)
+(LE (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (LE (TSTconst [c] y) yes no)
+(GT (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (GT (TSTconst [c] y) yes no)
+(GE (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (GE (TSTconst [c] y) yes no)
+
+(EQ (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (EQ (CMNconst [c] y) yes no)
+(NE (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (NE (CMNconst [c] y) yes no)
+(LT (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (LTnoov (CMNconst [c] y) yes no)
+(LE (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (LEnoov (CMNconst [c] y) yes no)
+(GT (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (GTnoov (CMNconst [c] y) yes no)
+(GE (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (GEnoov (CMNconst [c] y) yes no)
+
+(EQ (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (EQ (CMNWconst [int32(c)] y) yes no)
+(NE (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (NE (CMNWconst [int32(c)] y) yes no)
+(LT (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (LTnoov (CMNWconst [int32(c)] y) yes no)
+(LE (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (LEnoov (CMNWconst [int32(c)] y) yes no)
+(GT (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (GTnoov (CMNWconst [int32(c)] y) yes no)
+(GE (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (GEnoov (CMNWconst [int32(c)] y) yes no)
+
+(EQ (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (EQ (CMN x y) yes no)
+(NE (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (NE (CMN x y) yes no)
+(LT (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (LTnoov (CMN x y) yes no)
+(LE (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (LEnoov (CMN x y) yes no)
+(GT (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GTnoov (CMN x y) yes no)
+(GE (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GEnoov (CMN x y) yes no)
+
+(EQ (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (EQ (CMNW x y) yes no)
+(NE (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (NE (CMNW x y) yes no)
+(LT (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (LTnoov (CMNW x y) yes no)
+(LE (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (LEnoov (CMNW x y) yes no)
+(GT (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GTnoov (CMNW x y) yes no)
+(GE (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GEnoov (CMNW x y) yes no)
+
+(EQ (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (EQ (CMN x y) yes no)
+(NE (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (NE (CMN x y) yes no)
+(LT (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (LT (CMN x y) yes no)
+(LE (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (LE (CMN x y) yes no)
+(GT (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (GT (CMN x y) yes no)
+(GE (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (GE (CMN x y) yes no)
+
+(EQ (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (EQ (CMNW x y) yes no)
+(NE (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (NE (CMNW x y) yes no)
+(LT (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (LT (CMNW x y) yes no)
+(LE (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (LE (CMNW x y) yes no)
+(GT (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (GT (CMNW x y) yes no)
+(GE (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (GE (CMNW x y) yes no)
+
+(EQ (CMPconst [0] x) yes no) => (Z x yes no)
+(NE (CMPconst [0] x) yes no) => (NZ x yes no)
+(EQ (CMPWconst [0] x) yes no) => (ZW x yes no)
+(NE (CMPWconst [0] x) yes no) => (NZW x yes no)
+
+(EQ (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (EQ (CMN a (MUL <x.Type> x y)) yes no)
+(NE (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (NE (CMN a (MUL <x.Type> x y)) yes no)
+(LT (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (LTnoov (CMN a (MUL <x.Type> x y)) yes no)
+(LE (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (LEnoov (CMN a (MUL <x.Type> x y)) yes no)
+(GT (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (GTnoov (CMN a (MUL <x.Type> x y)) yes no)
+(GE (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (GEnoov (CMN a (MUL <x.Type> x y)) yes no)
+
+(EQ (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (EQ (CMP a (MUL <x.Type> x y)) yes no)
+(NE (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (NE (CMP a (MUL <x.Type> x y)) yes no)
+(LE (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (LEnoov (CMP a (MUL <x.Type> x y)) yes no)
+(LT (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (LTnoov (CMP a (MUL <x.Type> x y)) yes no)
+(GE (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (GEnoov (CMP a (MUL <x.Type> x y)) yes no)
+(GT (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (GTnoov (CMP a (MUL <x.Type> x y)) yes no)
+
+(EQ (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (EQ (CMNW a (MULW <x.Type> x y)) yes no)
+(NE (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (NE (CMNW a (MULW <x.Type> x y)) yes no)
+(LE (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (LEnoov (CMNW a (MULW <x.Type> x y)) yes no)
+(LT (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (LTnoov (CMNW a (MULW <x.Type> x y)) yes no)
+(GE (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (GEnoov (CMNW a (MULW <x.Type> x y)) yes no)
+(GT (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (GTnoov (CMNW a (MULW <x.Type> x y)) yes no)
+
+(EQ (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (EQ (CMPW a (MULW <x.Type> x y)) yes no)
+(NE (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (NE (CMPW a (MULW <x.Type> x y)) yes no)
+(LE (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (LEnoov (CMPW a (MULW <x.Type> x y)) yes no)
+(LT (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (LTnoov (CMPW a (MULW <x.Type> x y)) yes no)
+(GE (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (GEnoov (CMPW a (MULW <x.Type> x y)) yes no)
+(GT (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (GTnoov (CMPW a (MULW <x.Type> x y)) yes no)
+
+// Absorb bit-tests into block
+(Z (ANDconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no)
+(NZ (ANDconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
+(ZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+(NZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+(EQ (TSTconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no)
+(NE (TSTconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
+(EQ (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+(NE (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+
+// Test sign-bit for signed comparisons against zero
+(GE (CMPWconst [0] x) yes no) => (TBZ [31] x yes no)
+(GE (CMPconst [0] x) yes no) => (TBZ [63] x yes no)
+(LT (CMPWconst [0] x) yes no) => (TBNZ [31] x yes no)
+(LT (CMPconst [0] x) yes no) => (TBNZ [63] x yes no)
+
+// fold offset into address
+(ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) =>
+ (MOVDaddr [int32(off1)+off2] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBload [off1+int32(off2)] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHload [off1+int32(off2)] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWload [off1+int32(off2)] {sym} ptr mem)
+(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDload [off1+int32(off2)] {sym} ptr mem)
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVSload [off1+int32(off2)] {sym} ptr mem)
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVDload [off1+int32(off2)] {sym} ptr mem)
+
+// register indexed load
+(MOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem)
+(MOVWUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem)
+(MOVWload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem)
+(MOVHUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem)
+(MOVHload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem)
+(MOVBUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem)
+(MOVBload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem)
+(FMOVSload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx ptr idx mem)
+(FMOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx ptr idx mem)
+(MOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
+(MOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
+(MOVWUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
+(MOVWUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
+(MOVWloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
+(MOVWloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
+(MOVHUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
+(MOVHUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
+(MOVHloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
+(MOVHloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
+(MOVBUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
+(MOVBUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
+(MOVBloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
+(MOVBloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
+(FMOVSloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
+(FMOVSloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
+(FMOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
+(FMOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
+
+// shifted register indexed load
+(MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx8 ptr idx mem)
+(MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx4 ptr idx mem)
+(MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx4 ptr idx mem)
+(MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx2 ptr idx mem)
+(MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx2 ptr idx mem)
+(MOVDloadidx ptr (SLLconst [3] idx) mem) => (MOVDloadidx8 ptr idx mem)
+(MOVWloadidx ptr (SLLconst [2] idx) mem) => (MOVWloadidx4 ptr idx mem)
+(MOVWUloadidx ptr (SLLconst [2] idx) mem) => (MOVWUloadidx4 ptr idx mem)
+(MOVHloadidx ptr (SLLconst [1] idx) mem) => (MOVHloadidx2 ptr idx mem)
+(MOVHUloadidx ptr (SLLconst [1] idx) mem) => (MOVHUloadidx2 ptr idx mem)
+(MOVHloadidx ptr (ADD idx idx) mem) => (MOVHloadidx2 ptr idx mem)
+(MOVHUloadidx ptr (ADD idx idx) mem) => (MOVHUloadidx2 ptr idx mem)
+(MOVDloadidx (SLLconst [3] idx) ptr mem) => (MOVDloadidx8 ptr idx mem)
+(MOVWloadidx (SLLconst [2] idx) ptr mem) => (MOVWloadidx4 ptr idx mem)
+(MOVWUloadidx (SLLconst [2] idx) ptr mem) => (MOVWUloadidx4 ptr idx mem)
+(MOVHloadidx (ADD idx idx) ptr mem) => (MOVHloadidx2 ptr idx mem)
+(MOVHUloadidx (ADD idx idx) ptr mem) => (MOVHUloadidx2 ptr idx mem)
+(MOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDload [int32(c)<<3] ptr mem)
+(MOVWUloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWUload [int32(c)<<2] ptr mem)
+(MOVWloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWload [int32(c)<<2] ptr mem)
+(MOVHUloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHUload [int32(c)<<1] ptr mem)
+(MOVHloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHload [int32(c)<<1] ptr mem)
+
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+(STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (STP [off1+int32(off2)] {sym} ptr val1 val2 mem)
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVQstorezero [off1+int32(off2)] {sym} ptr mem)
+
+// register indexed store
+(MOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem)
+(MOVWstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem)
+(MOVHstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem)
+(MOVBstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem)
+(FMOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx ptr idx val mem)
+(FMOVSstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx ptr idx val mem)
+(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVDstore [int32(c)] ptr val mem)
+(MOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVDstore [int32(c)] idx val mem)
+(MOVWstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVWstore [int32(c)] ptr val mem)
+(MOVWstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVWstore [int32(c)] idx val mem)
+(MOVHstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVHstore [int32(c)] ptr val mem)
+(MOVHstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVHstore [int32(c)] idx val mem)
+(MOVBstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVBstore [int32(c)] ptr val mem)
+(MOVBstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVBstore [int32(c)] idx val mem)
+(FMOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVDstore [int32(c)] ptr val mem)
+(FMOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVDstore [int32(c)] idx val mem)
+(FMOVSstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVSstore [int32(c)] ptr val mem)
+(FMOVSstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVSstore [int32(c)] idx val mem)
+
+// shifted register indexed store
+(MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx8 ptr idx val mem)
+(MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx4 ptr idx val mem)
+(MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx2 ptr idx val mem)
+(MOVDstoreidx ptr (SLLconst [3] idx) val mem) => (MOVDstoreidx8 ptr idx val mem)
+(MOVWstoreidx ptr (SLLconst [2] idx) val mem) => (MOVWstoreidx4 ptr idx val mem)
+(MOVHstoreidx ptr (SLLconst [1] idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVHstoreidx ptr (ADD idx idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVDstoreidx (SLLconst [3] idx) ptr val mem) => (MOVDstoreidx8 ptr idx val mem)
+(MOVWstoreidx (SLLconst [2] idx) ptr val mem) => (MOVWstoreidx4 ptr idx val mem)
+(MOVHstoreidx (SLLconst [1] idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVHstoreidx (ADD idx idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (MOVDstore [int32(c)<<3] ptr val mem)
+(MOVWstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (MOVWstore [int32(c)<<2] ptr val mem)
+(MOVHstoreidx2 ptr (MOVDconst [c]) val mem) && is32Bit(c<<1) => (MOVHstore [int32(c)<<1] ptr val mem)
+
+(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem)
+(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+// store zero
+(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
+(STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem) => (MOVQstorezero [off] {sym} ptr mem)
+
+// register indexed store zero
+(MOVDstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDstorezeroidx ptr idx mem)
+(MOVWstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWstorezeroidx ptr idx mem)
+(MOVHstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHstorezeroidx ptr idx mem)
+(MOVBstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBstorezeroidx ptr idx mem)
+(MOVDstoreidx ptr idx (MOVDconst [0]) mem) => (MOVDstorezeroidx ptr idx mem)
+(MOVWstoreidx ptr idx (MOVDconst [0]) mem) => (MOVWstorezeroidx ptr idx mem)
+(MOVHstoreidx ptr idx (MOVDconst [0]) mem) => (MOVHstorezeroidx ptr idx mem)
+(MOVBstoreidx ptr idx (MOVDconst [0]) mem) => (MOVBstorezeroidx ptr idx mem)
+(MOVDstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDstorezero [int32(c)] ptr mem)
+(MOVDstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVDstorezero [int32(c)] idx mem)
+(MOVWstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWstorezero [int32(c)] ptr mem)
+(MOVWstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVWstorezero [int32(c)] idx mem)
+(MOVHstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHstorezero [int32(c)] ptr mem)
+(MOVHstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVHstorezero [int32(c)] idx mem)
+(MOVBstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBstorezero [int32(c)] ptr mem)
+(MOVBstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVBstorezero [int32(c)] idx mem)
+
+// shifted register indexed store zero
+(MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstorezeroidx ptr (SLLconst [3] idx) mem) => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstorezeroidx ptr (SLLconst [2] idx) mem) => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstorezeroidx ptr (SLLconst [1] idx) mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVHstorezeroidx ptr (ADD idx idx) mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstorezeroidx (SLLconst [3] idx) ptr mem) => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstorezeroidx (SLLconst [2] idx) ptr mem) => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstorezeroidx (SLLconst [1] idx) ptr mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVHstorezeroidx (ADD idx idx) ptr mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstoreidx8 ptr idx (MOVDconst [0]) mem) => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstoreidx4 ptr idx (MOVDconst [0]) mem) => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstoreidx2 ptr idx (MOVDconst [0]) mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstorezeroidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDstorezero [int32(c<<3)] ptr mem)
+(MOVWstorezeroidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWstorezero [int32(c<<2)] ptr mem)
+(MOVHstorezeroidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHstorezero [int32(c<<1)] ptr mem)
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+// these seem to have bad interaction with other rules, resulting in slower code
+//(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBreg x)
+//(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBUreg x)
+//(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHreg x)
+//(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHUreg x)
+//(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWreg x)
+//(MOVWUload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWUreg x)
+//(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+//(FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+//(FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+
+(MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+
+(MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+
+(MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
+(MOVBreg x:(MOVBloadidx _ _ _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHloadidx _ _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWloadidx _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHloadidx2 _ _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWloadidx4 _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUloadidx4 _ _ _)) => (MOVDreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVWreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstoreidx ptr idx (MOVBreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVBUreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVHreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVHUreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVWreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVWUreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVHreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVHUreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVWreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVWUreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVWstoreidx ptr idx (MOVWreg x) mem) => (MOVWstoreidx ptr idx x mem)
+(MOVWstoreidx ptr idx (MOVWUreg x) mem) => (MOVWstoreidx ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVHreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVHUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVWreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVWUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVWstoreidx4 ptr idx (MOVWreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
+(MOVWstoreidx4 ptr idx (MOVWUreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVDnop doesn't emit instruction, only for ensuring the type.
+(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
+
+// fold constant into arithmatic ops
+(ADD x (MOVDconst [c])) => (ADDconst [c] x)
+(SUB x (MOVDconst [c])) => (SUBconst [c] x)
+(AND x (MOVDconst [c])) => (ANDconst [c] x)
+(OR x (MOVDconst [c])) => (ORconst [c] x)
+(XOR x (MOVDconst [c])) => (XORconst [c] x)
+(TST x (MOVDconst [c])) => (TSTconst [c] x)
+(TSTW x (MOVDconst [c])) => (TSTWconst [int32(c)] x)
+(CMN x (MOVDconst [c])) => (CMNconst [c] x)
+(CMNW x (MOVDconst [c])) => (CMNWconst [int32(c)] x)
+(BIC x (MOVDconst [c])) => (ANDconst [^c] x)
+(EON x (MOVDconst [c])) => (XORconst [^c] x)
+(ORN x (MOVDconst [c])) => (ORconst [^c] x)
+
+(SLL x (MOVDconst [c])) => (SLLconst x [c&63]) // Note: I don't think we ever generate bad constant shifts (i.e. c>=64)
+(SRL x (MOVDconst [c])) => (SRLconst x [c&63])
+(SRA x (MOVDconst [c])) => (SRAconst x [c&63])
+
+(CMP x (MOVDconst [c])) => (CMPconst [c] x)
+(CMP (MOVDconst [c]) x) => (InvertFlags (CMPconst [c] x))
+(CMPW x (MOVDconst [c])) => (CMPWconst [int32(c)] x)
+(CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x))
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+((CMP|CMPW) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW) y x))
+
+// mul-neg => mneg
+(NEG (MUL x y)) => (MNEG x y)
+(NEG (MULW x y)) => (MNEGW x y)
+(MUL (NEG x) y) => (MNEG x y)
+(MULW (NEG x) y) => (MNEGW x y)
+
+// madd/msub
+(ADD a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
+(SUB a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
+(ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
+(SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
+
+(ADD a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MADDW a x y)
+(SUB a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MSUBW a x y)
+(ADD a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MSUBW a x y)
+(SUB a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MADDW a x y)
+
+// optimize ADCSflags, SBCSflags and friends
+(ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c)))) => (ADCSflags x y c)
+(ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (MOVDconst [0])))) => (ADDSflags x y)
+(SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> bo))))) => (SBCSflags x y bo)
+(SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (MOVDconst [0])))) => (SUBSflags x y)
+
+// mul by constant
+(MUL x (MOVDconst [-1])) => (NEG x)
+(MUL _ (MOVDconst [0])) => (MOVDconst [0])
+(MUL x (MOVDconst [1])) => x
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x)
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (ADDshiftLL x x [log64(c-1)])
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+(MULW x (MOVDconst [c])) && int32(c)==-1 => (NEG x)
+(MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
+(MULW x (MOVDconst [c])) && int32(c)==1 => x
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x)
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (ADDshiftLL x x [log64(c-1)])
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+// mneg by constant
+(MNEG x (MOVDconst [-1])) => x
+(MNEG _ (MOVDconst [0])) => (MOVDconst [0])
+(MNEG x (MOVDconst [1])) => (NEG x)
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+
+
+(MNEGW x (MOVDconst [c])) && int32(c)==-1 => x
+(MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
+(MNEGW x (MOVDconst [c])) && int32(c)==1 => (NEG x)
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+
+
+(MADD a x (MOVDconst [-1])) => (SUB a x)
+(MADD a _ (MOVDconst [0])) => a
+(MADD a x (MOVDconst [1])) => (ADD a x)
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MADD a (MOVDconst [-1]) x) => (SUB a x)
+(MADD a (MOVDconst [0]) _) => a
+(MADD a (MOVDconst [1]) x) => (ADD a x)
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (SUB a x)
+(MADDW a _ (MOVDconst [c])) && int32(c)==0 => a
+(MADDW a x (MOVDconst [c])) && int32(c)==1 => (ADD a x)
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (SUB a x)
+(MADDW a (MOVDconst [c]) _) && int32(c)==0 => a
+(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (ADD a x)
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUB a x (MOVDconst [-1])) => (ADD a x)
+(MSUB a _ (MOVDconst [0])) => a
+(MSUB a x (MOVDconst [1])) => (SUB a x)
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUB a (MOVDconst [-1]) x) => (ADD a x)
+(MSUB a (MOVDconst [0]) _) => a
+(MSUB a (MOVDconst [1]) x) => (SUB a x)
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (ADD a x)
+(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => a
+(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (SUB a x)
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (ADD a x)
+(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => a
+(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (SUB a x)
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+// div by constant
+(UDIV x (MOVDconst [1])) => x
+(UDIV x (MOVDconst [c])) && isPowerOfTwo64(c) => (SRLconst [log64(c)] x)
+(UDIVW x (MOVDconst [c])) && uint32(c)==1 => x
+(UDIVW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (SRLconst [log64(c)] x)
+(UMOD _ (MOVDconst [1])) => (MOVDconst [0])
+(UMOD x (MOVDconst [c])) && isPowerOfTwo64(c) => (ANDconst [c-1] x)
+(UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0])
+(UMODW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (ANDconst [c-1] x)
+
+// generic simplifications
+(ADD x (NEG y)) => (SUB x y)
+(SUB x x) => (MOVDconst [0])
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVDconst [0])
+(BIC x x) => (MOVDconst [0])
+(EON x x) => (MOVDconst [-1])
+(ORN x x) => (MOVDconst [-1])
+(AND x (MVN y)) => (BIC x y)
+(XOR x (MVN y)) => (EON x y)
+(OR x (MVN y)) => (ORN x y)
+(MVN (XOR x y)) => (EON x y)
+(CSEL [cc] x (MOVDconst [0]) flag) => (CSEL0 [cc] x flag)
+(CSEL [cc] (MOVDconst [0]) y flag) => (CSEL0 [arm64Negate(cc)] y flag)
+(SUB x (SUB y z)) => (SUB (ADD <v.Type> x z) y)
+(SUB (SUB x y) z) => (SUB x (ADD <y.Type> y z))
+
+// remove redundant *const ops
+(ADDconst [0] x) => x
+(SUBconst [0] x) => x
+(ANDconst [0] _) => (MOVDconst [0])
+(ANDconst [-1] x) => x
+(ORconst [0] x) => x
+(ORconst [-1] _) => (MOVDconst [-1])
+(XORconst [0] x) => x
+(XORconst [-1] x) => (MVN x)
+
+// generic constant folding
+(ADDconst [c] (MOVDconst [d])) => (MOVDconst [c+d])
+(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
+(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
+(SUBconst [c] (MOVDconst [d])) => (MOVDconst [d-c])
+(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
+(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
+(SLLconst [c] (MOVDconst [d])) => (MOVDconst [d<<uint64(c)])
+(SRLconst [c] (MOVDconst [d])) => (MOVDconst [int64(uint64(d)>>uint64(c))])
+(SRAconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)])
+(MUL (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c*d])
+(MULW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(int32(c)*int32(d))])
+(MNEG (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-c*d])
+(MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-int64(int32(c)*int32(d))])
+(MADD (MOVDconst [c]) x y) => (ADDconst [c] (MUL <x.Type> x y))
+(MADDW (MOVDconst [c]) x y) => (ADDconst [c] (MULW <x.Type> x y))
+(MSUB (MOVDconst [c]) x y) => (ADDconst [c] (MNEG <x.Type> x y))
+(MSUBW (MOVDconst [c]) x y) => (ADDconst [c] (MNEGW <x.Type> x y))
+(MADD a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [c*d] a)
+(MADDW a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [int64(int32(c)*int32(d))] a)
+(MSUB a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [c*d] a)
+(MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [int64(int32(c)*int32(d))] a)
+(DIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c/d])
+(UDIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)/uint64(d))])
+(DIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(int32(c)/int32(d))])
+(UDIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)/uint32(d))])
+(MOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c%d])
+(UMOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)%uint64(d))])
+(MODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(int32(c)%int32(d))])
+(UMODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)%uint32(d))])
+(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ANDconst [c] (MOVWUreg x)) => (ANDconst [c&(1<<32-1)] x)
+(ANDconst [c] (MOVHUreg x)) => (ANDconst [c&(1<<16-1)] x)
+(ANDconst [c] (MOVBUreg x)) => (ANDconst [c&(1<<8-1)] x)
+(MOVWUreg (ANDconst [c] x)) => (ANDconst [c&(1<<32-1)] x)
+(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&(1<<16-1)] x)
+(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&(1<<8-1)] x)
+(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d])
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d])
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(MVN (MOVDconst [c])) => (MOVDconst [^c])
+(NEG (MOVDconst [c])) => (MOVDconst [-c])
+(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
+(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
+(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
+(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
+(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
+(MOVDreg (MOVDconst [c])) => (MOVDconst [c])
+
+// constant comparisons
+(CMPconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags64(x,y)])
+(CMPWconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags32(int32(x),y)])
+(TSTconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags64(x&y)])
+(TSTWconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags32(int32(x)&y)])
+(CMNconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags64(x,y)])
+(CMNWconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags32(int32(x),y)])
+
+// other known comparisons
+(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)])
+(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
+(CMPconst (MOVWUreg _) [c]) && 0xffffffff < c => (FlagConstant [subFlags64(0,1)])
+(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n => (FlagConstant [subFlags64(0,1)])
+(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n) => (FlagConstant [subFlags64(0,1)])
+(CMPWconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)])
+(CMPWconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
+
+// absorb flag constants into branches
+(EQ (FlagConstant [fc]) yes no) && fc.eq() => (First yes no)
+(EQ (FlagConstant [fc]) yes no) && !fc.eq() => (First no yes)
+
+(NE (FlagConstant [fc]) yes no) && fc.ne() => (First yes no)
+(NE (FlagConstant [fc]) yes no) && !fc.ne() => (First no yes)
+
+(LT (FlagConstant [fc]) yes no) && fc.lt() => (First yes no)
+(LT (FlagConstant [fc]) yes no) && !fc.lt() => (First no yes)
+
+(LE (FlagConstant [fc]) yes no) && fc.le() => (First yes no)
+(LE (FlagConstant [fc]) yes no) && !fc.le() => (First no yes)
+
+(GT (FlagConstant [fc]) yes no) && fc.gt() => (First yes no)
+(GT (FlagConstant [fc]) yes no) && !fc.gt() => (First no yes)
+
+(GE (FlagConstant [fc]) yes no) && fc.ge() => (First yes no)
+(GE (FlagConstant [fc]) yes no) && !fc.ge() => (First no yes)
+
+(ULT (FlagConstant [fc]) yes no) && fc.ult() => (First yes no)
+(ULT (FlagConstant [fc]) yes no) && !fc.ult() => (First no yes)
+
+(ULE (FlagConstant [fc]) yes no) && fc.ule() => (First yes no)
+(ULE (FlagConstant [fc]) yes no) && !fc.ule() => (First no yes)
+
+(UGT (FlagConstant [fc]) yes no) && fc.ugt() => (First yes no)
+(UGT (FlagConstant [fc]) yes no) && !fc.ugt() => (First no yes)
+
+(UGE (FlagConstant [fc]) yes no) && fc.uge() => (First yes no)
+(UGE (FlagConstant [fc]) yes no) && !fc.uge() => (First no yes)
+
+(LTnoov (FlagConstant [fc]) yes no) && fc.ltNoov() => (First yes no)
+(LTnoov (FlagConstant [fc]) yes no) && !fc.ltNoov() => (First no yes)
+
+(LEnoov (FlagConstant [fc]) yes no) && fc.leNoov() => (First yes no)
+(LEnoov (FlagConstant [fc]) yes no) && !fc.leNoov() => (First no yes)
+
+(GTnoov (FlagConstant [fc]) yes no) && fc.gtNoov() => (First yes no)
+(GTnoov (FlagConstant [fc]) yes no) && !fc.gtNoov() => (First no yes)
+
+(GEnoov (FlagConstant [fc]) yes no) && fc.geNoov() => (First yes no)
+(GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes)
+
+(Z (MOVDconst [0]) yes no) => (First yes no)
+(Z (MOVDconst [c]) yes no) && c != 0 => (First no yes)
+(NZ (MOVDconst [0]) yes no) => (First no yes)
+(NZ (MOVDconst [c]) yes no) && c != 0 => (First yes no)
+(ZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First yes no)
+(ZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First no yes)
+(NZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First no yes)
+(NZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First yes no)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+(FLT (InvertFlags cmp) yes no) => (FGT cmp yes no)
+(FGT (InvertFlags cmp) yes no) => (FLT cmp yes no)
+(FLE (InvertFlags cmp) yes no) => (FGE cmp yes no)
+(FGE (InvertFlags cmp) yes no) => (FLE cmp yes no)
+(LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no)
+(GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no)
+(LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no)
+(GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no)
+
+// absorb InvertFlags into CSEL(0)
+(CSEL [cc] x y (InvertFlags cmp)) => (CSEL [arm64Invert(cc)] x y cmp)
+(CSEL0 [cc] x (InvertFlags cmp)) => (CSEL0 [arm64Invert(cc)] x cmp)
+
+// absorb flag constants into boolean values
+(Equal (FlagConstant [fc])) => (MOVDconst [b2i(fc.eq())])
+(NotEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ne())])
+(LessThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.lt())])
+(LessThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ult())])
+(LessEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.le())])
+(LessEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ule())])
+(GreaterThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.gt())])
+(GreaterThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ugt())])
+(GreaterEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ge())])
+(GreaterEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.uge())])
+
+// absorb InvertFlags into boolean values
+(Equal (InvertFlags x)) => (Equal x)
+(NotEqual (InvertFlags x)) => (NotEqual x)
+(LessThan (InvertFlags x)) => (GreaterThan x)
+(LessThanU (InvertFlags x)) => (GreaterThanU x)
+(GreaterThan (InvertFlags x)) => (LessThan x)
+(GreaterThanU (InvertFlags x)) => (LessThanU x)
+(LessEqual (InvertFlags x)) => (GreaterEqual x)
+(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
+(GreaterEqual (InvertFlags x)) => (LessEqual x)
+(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
+(LessThanF (InvertFlags x)) => (GreaterThanF x)
+(LessEqualF (InvertFlags x)) => (GreaterEqualF x)
+(GreaterThanF (InvertFlags x)) => (LessThanF x)
+(GreaterEqualF (InvertFlags x)) => (LessEqualF x)
+
+// Boolean-generating instructions always
+// zero upper bit of the register; no need to zero-extend
+(MOVBUreg x) && x.Type.IsBoolean() => (MOVDreg x)
+
+// absorb flag constants into conditional instructions
+(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
+(CSEL0 [cc] x flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSEL0 [cc] _ flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
+
+// absorb flags back into boolean CSEL
+(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
+ (CSEL [boolval.Op] x y flagArg(boolval))
+(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
+ (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval))
+(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
+ (CSEL0 [boolval.Op] x flagArg(boolval))
+(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
+ (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval))
+
+// absorb shifts into ops
+(NEG x:(SLLconst [c] y)) && clobberIfDead(x) => (NEGshiftLL [c] y)
+(NEG x:(SRLconst [c] y)) && clobberIfDead(x) => (NEGshiftRL [c] y)
+(NEG x:(SRAconst [c] y)) && clobberIfDead(x) => (NEGshiftRA [c] y)
+(MVN x:(SLLconst [c] y)) && clobberIfDead(x) => (MVNshiftLL [c] y)
+(MVN x:(SRLconst [c] y)) && clobberIfDead(x) => (MVNshiftRL [c] y)
+(MVN x:(SRAconst [c] y)) && clobberIfDead(x) => (MVNshiftRA [c] y)
+(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ADDshiftLL x0 y [c])
+(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ADDshiftRL x0 y [c])
+(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ADDshiftRA x0 y [c])
+(SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (SUBshiftLL x0 y [c])
+(SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (SUBshiftRL x0 y [c])
+(SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (SUBshiftRA x0 y [c])
+(AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ANDshiftLL x0 y [c])
+(AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ANDshiftRL x0 y [c])
+(AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ANDshiftRA x0 y [c])
+(OR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORshiftLL x0 y [c]) // useful for combined load
+(OR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORshiftRL x0 y [c])
+(OR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORshiftRA x0 y [c])
+(XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (XORshiftLL x0 y [c])
+(XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (XORshiftRL x0 y [c])
+(XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (XORshiftRA x0 y [c])
+(BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (BICshiftLL x0 y [c])
+(BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (BICshiftRL x0 y [c])
+(BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (BICshiftRA x0 y [c])
+(ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORNshiftLL x0 y [c])
+(ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORNshiftRL x0 y [c])
+(ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORNshiftRA x0 y [c])
+(EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (EONshiftLL x0 y [c])
+(EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (EONshiftRL x0 y [c])
+(EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (EONshiftRA x0 y [c])
+(CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMPshiftLL x0 y [c])
+(CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftLL x1 y [c]))
+(CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMPshiftRL x0 y [c])
+(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRL x1 y [c]))
+(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMPshiftRA x0 y [c])
+(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRA x1 y [c]))
+(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMNshiftLL x0 y [c])
+(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMNshiftRL x0 y [c])
+(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMNshiftRA x0 y [c])
+(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (TSTshiftLL x0 y [c])
+(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (TSTshiftRL x0 y [c])
+(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (TSTshiftRA x0 y [c])
+
+// prefer *const ops to *shift ops
+(ADDshiftLL (MOVDconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
+(ADDshiftRL (MOVDconst [c]) x [d]) => (ADDconst [c] (SRLconst <x.Type> x [d]))
+(ADDshiftRA (MOVDconst [c]) x [d]) => (ADDconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftLL (MOVDconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
+(ANDshiftRL (MOVDconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
+(ANDshiftRA (MOVDconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftLL (MOVDconst [c]) x [d]) => (ORconst [c] (SLLconst <x.Type> x [d]))
+(ORshiftRL (MOVDconst [c]) x [d]) => (ORconst [c] (SRLconst <x.Type> x [d]))
+(ORshiftRA (MOVDconst [c]) x [d]) => (ORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftLL (MOVDconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
+(XORshiftRL (MOVDconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
+(XORshiftRA (MOVDconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
+(CMPshiftLL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+(CMPshiftRL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+(CMPshiftRA (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+(CMNshiftLL (MOVDconst [c]) x [d]) => (CMNconst [c] (SLLconst <x.Type> x [d]))
+(CMNshiftRL (MOVDconst [c]) x [d]) => (CMNconst [c] (SRLconst <x.Type> x [d]))
+(CMNshiftRA (MOVDconst [c]) x [d]) => (CMNconst [c] (SRAconst <x.Type> x [d]))
+(TSTshiftLL (MOVDconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
+(TSTshiftRL (MOVDconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
+(TSTshiftRA (MOVDconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
+
+// constant folding in *shift ops
+(MVNshiftLL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)<<uint64(d))])
+(MVNshiftRL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)>>uint64(d))])
+(MVNshiftRA (MOVDconst [c]) [d]) => (MOVDconst [^(c>>uint64(d))])
+(NEGshiftLL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)<<uint64(d))])
+(NEGshiftRL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)>>uint64(d))])
+(NEGshiftRA (MOVDconst [c]) [d]) => (MOVDconst [-(c>>uint64(d))])
+(ADDshiftLL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)<<uint64(d))])
+(ADDshiftRL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)>>uint64(d))])
+(ADDshiftRA x (MOVDconst [c]) [d]) => (ADDconst x [c>>uint64(d)])
+(SUBshiftLL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)<<uint64(d))])
+(SUBshiftRL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)>>uint64(d))])
+(SUBshiftRA x (MOVDconst [c]) [d]) => (SUBconst x [c>>uint64(d)])
+(ANDshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)<<uint64(d))])
+(ANDshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)>>uint64(d))])
+(ANDshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
+(ORshiftLL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)<<uint64(d))])
+(ORshiftRL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)>>uint64(d))])
+(ORshiftRA x (MOVDconst [c]) [d]) => (ORconst x [c>>uint64(d)])
+(XORshiftLL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)<<uint64(d))])
+(XORshiftRL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)>>uint64(d))])
+(XORshiftRA x (MOVDconst [c]) [d]) => (XORconst x [c>>uint64(d)])
+(BICshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)<<uint64(d))])
+(BICshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)>>uint64(d))])
+(BICshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [^(c>>uint64(d))])
+(ORNshiftLL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)<<uint64(d))])
+(ORNshiftRL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)>>uint64(d))])
+(ORNshiftRA x (MOVDconst [c]) [d]) => (ORconst x [^(c>>uint64(d))])
+(EONshiftLL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)<<uint64(d))])
+(EONshiftRL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)>>uint64(d))])
+(EONshiftRA x (MOVDconst [c]) [d]) => (XORconst x [^(c>>uint64(d))])
+(CMPshiftLL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)<<uint64(d))])
+(CMPshiftRL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)>>uint64(d))])
+(CMPshiftRA x (MOVDconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
+(CMNshiftLL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)<<uint64(d))])
+(CMNshiftRL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)>>uint64(d))])
+(CMNshiftRA x (MOVDconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
+(TSTshiftLL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)<<uint64(d))])
+(TSTshiftRL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)>>uint64(d))])
+(TSTshiftRA x (MOVDconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
+
+// simplification with *shift ops
+(SUBshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(SUBshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(SUBshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(ANDshiftLL y:(SLLconst x [c]) x [c]) => y
+(ANDshiftRL y:(SRLconst x [c]) x [c]) => y
+(ANDshiftRA y:(SRAconst x [c]) x [c]) => y
+(ORshiftLL y:(SLLconst x [c]) x [c]) => y
+(ORshiftRL y:(SRLconst x [c]) x [c]) => y
+(ORshiftRA y:(SRAconst x [c]) x [c]) => y
+(XORshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(XORshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(XORshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(EONshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
+(EONshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
+(EONshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
+
+// Generate rotates with const shift
+(ADDshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+( ORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+(XORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+(ADDshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
+( ORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
+(XORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
+
+(ADDshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (RORWconst [32-c] x)
+( ORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (RORWconst [32-c] x)
+(XORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (RORWconst [32-c] x)
+(ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
+( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
+(XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
+
+(RORconst [c] (RORconst [d] x)) => (RORconst [(c+d)&63] x)
+(RORWconst [c] (RORWconst [d] x)) => (RORWconst [(c+d)&31] x)
+
+// Generate rotates with non-const shift.
+// These rules match the Go source code like
+// y &= 63
+// x << y | x >> (64-y)
+// "|" can also be "^" or "+".
+// As arm64 does not have a ROL instruction, so ROL(x, y) is replaced by ROR(x, -y).
+((ADD|OR|XOR) (SLL x (ANDconst <t> [63] y))
+ (CSEL0 <typ.UInt64> [cc] (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
+ => (ROR x (NEG <t> y))
+((ADD|OR|XOR) (SRL <typ.UInt64> x (ANDconst <t> [63] y))
+ (CSEL0 <typ.UInt64> [cc] (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
+ => (ROR x y)
+
+// These rules match the Go source code like
+// y &= 31
+// x << y | x >> (32-y)
+// "|" can also be "^" or "+".
+// As arm64 does not have a ROLW instruction, so ROLW(x, y) is replaced by RORW(x, -y).
+((ADD|OR|XOR) (SLL x (ANDconst <t> [31] y))
+ (CSEL0 <typ.UInt32> [cc] (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
+ => (RORW x (NEG <t> y))
+((ADD|OR|XOR) (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y))
+ (CSEL0 <typ.UInt32> [cc] (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
+ => (RORW x y)
+
+// ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) => (REV16W x)
+
+// Extract from reg pair
+(ADDshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+( ORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+(XORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+
+(ADDshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (EXTRWconst [32-c] x2 x)
+( ORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (EXTRWconst [32-c] x2 x)
+(XORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (EXTRWconst [32-c] x2 x)
+
+// Rewrite special pairs of shifts to AND.
+// On ARM64 the bitmask can fit into an instruction.
+(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 => (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
+(SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 => (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
+
+// Special case setting bit as 1. An example is math.Copysign(c,-1)
+(ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0 => (ORconst [c1] x)
+
+// bitfield ops
+
+// sbfiz
+// (x << lc) >> rc
+(SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+(MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
+(MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
+(MOVBreg (SLLconst [lc] x)) && lc < 8 => (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
+
+// sbfx
+// (x << lc) >> rc
+(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+(SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x)
+(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
+(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x)
+
+// sbfiz/sbfx combinations: merge shifts into bitfield ops
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
+ => (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+
+// ubfiz
+// (x & ac) << sc
+(SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+(SLLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFIZ [armBFAuxInt(sc, 32)] x)
+(SLLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFIZ [armBFAuxInt(sc, 16)] x)
+(SLLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFIZ [armBFAuxInt(sc, 8)] x)
+// (x << sc) & ac
+(ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+(MOVWUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, sc)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+(MOVHUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, sc)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+(MOVBUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, sc)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+// (x << lc) >> rc
+(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+
+// ubfx
+// (x >> sc) & ac
+(ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+(MOVWUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFX [armBFAuxInt(sc, 32)] x)
+(MOVHUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFX [armBFAuxInt(sc, 16)] x)
+(MOVBUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFX [armBFAuxInt(sc, 8)] x)
+// (x & ac) >> sc
+(SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+(SRLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, sc)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+(SRLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, sc)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+(SRLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, sc)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+// (x << lc) >> rc
+(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+
+// ubfiz/ubfx combinations: merge shifts into bitfield ops
+(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.getARM64BFwidth()
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+(UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+(SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+(UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFwidth()
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+// ((x << c1) >> c2) >> c3
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.getARM64BFlsb()
+ => (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+// ((x << c1) << c2) >> c3
+(UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.getARM64BFlsb()
+ => (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFlsb()
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+
+// bfi
+(OR (UBFIZ [bfc] x) (ANDconst [ac] y))
+ && ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
+ => (BFI [bfc] y x)
+(ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
+ && lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
+ => (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
+// bfxil
+(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
+ => (BFXIL [bfc] y x)
+(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.getARM64BFwidth()
+ => (BFXIL [bfc] y x)
+(ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1<<uint(64-rc)-1))
+ => (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
+
+// do combined loads
+// little endian loads
+// b[0] | b[1]<<8 => load 16-bit
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ && i1 == i0+1
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))
+ y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr0 idx0 mem)
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr idx mem)
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 => load 32-bit
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ x0:(MOVHUload [i0] {s} p mem)
+ y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem)))
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ x0:(MOVHUloadidx ptr0 idx0 mem)
+ y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [3] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 idx0 mem)
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ x0:(MOVHUloadidx ptr idx mem)
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr idx mem)
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ x0:(MOVHUloadidx2 ptr0 idx0 mem)
+ y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADDshiftLL [1] ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [3] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 (SLLconst <idx0.Type> [1] idx0) mem)
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 => load 64-bit
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ x0:(MOVWUload [i0] {s} p mem)
+ y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem)))
+ && i4 == i0+4
+ && i5 == i0+5
+ && i6 == i0+6
+ && i7 == i0+7
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ x0:(MOVWUloadidx ptr0 idx0 mem)
+ y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [6] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [7] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 idx0 mem)
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ x0:(MOVWUloadidx4 ptr0 idx0 mem)
+ y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADDshiftLL [2] ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [6] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [7] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 (SLLconst <idx0.Type> [2] idx0) mem)
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ x0:(MOVWUloadidx ptr idx mem)
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [4] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr idx mem)
+
+// b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] => load 32-bit
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUload [3] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [2] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr0 idx0 mem)
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr idx mem)
+
+// b[7]<<56 | b[6]<<48 | b[5]<<40 | b[4]<<32 | b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] => load 64-bit
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem)))
+ y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem)))
+ y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem)))
+ y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && i4 == i0+4
+ && i5 == i0+5
+ && i6 == i0+6
+ && i7 == i0+7
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUload [7] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [6] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [4] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [3] {s} p mem)))
+ y5:(MOVDnop x5:(MOVBUload [2] {s} p mem)))
+ y6:(MOVDnop x6:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y7:(MOVDnop x7:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr0 idx0 mem)
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [4] idx) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y7:(MOVDnop x7:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr idx mem)
+
+// big endian loads
+// b[1] | b[0]<<8 => load 16-bit, reverse
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr0 idx0 mem))
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [1] idx) mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr idx mem))
+
+// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 => load 32-bit, reverse
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ y0:(REV16W x0:(MOVHUload [i2] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, y0, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ y0:(REV16W x0:(MOVHUload [2] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, y0, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ y0:(REV16W x0:(MOVHUloadidx ptr (ADDconst [2] idx) mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, y0, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+
+// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 => load 64-bit, reverse
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ y0:(REVW x0:(MOVWUload [i4] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && i4 == i0+4
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ y0:(REVW x0:(MOVWUload [4] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [3] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ y0:(REVW x0:(MOVWUloadidx ptr (ADDconst [4] idx) mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr idx mem))
+
+// b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] => load 32-bit, reverse
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)))
+ y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [3] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+
+// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] => load 64-bit, reverse
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem)))
+ y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem)))
+ y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem)))
+ y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && i4 == i0+4
+ && i5 == i0+5
+ && i6 == i0+6
+ && i7 == i0+7
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)))
+ y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [3] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [4] {s} p mem)))
+ y5:(MOVDnop x5:(MOVBUload [5] {s} p mem)))
+ y6:(MOVDnop x6:(MOVBUload [6] {s} p mem)))
+ y7:(MOVDnop x7:(MOVBUload [7] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [4] idx) mem)))
+ y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [5] idx) mem)))
+ y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))
+ y7:(MOVDnop x7:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr idx mem))
+
+// Combine zero stores into larger (unaligned) stores.
+(MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem))
+ && x.Uses == 1
+ && areAdjacentOffsets(int64(i),int64(j),1)
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+(MOVBstorezero [1] {s} (ADD ptr0 idx0) x:(MOVBstorezeroidx ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstorezeroidx ptr1 idx1 mem)
+(MOVBstorezeroidx ptr (ADDconst [1] idx) x:(MOVBstorezeroidx ptr idx mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstorezeroidx ptr idx mem)
+(MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem))
+ && x.Uses == 1
+ && areAdjacentOffsets(int64(i),int64(j),2)
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+(MOVHstorezero [2] {s} (ADD ptr0 idx0) x:(MOVHstorezeroidx ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstorezeroidx ptr1 idx1 mem)
+(MOVHstorezeroidx ptr (ADDconst [2] idx) x:(MOVHstorezeroidx ptr idx mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstorezeroidx ptr idx mem)
+(MOVHstorezero [2] {s} (ADDshiftLL [1] ptr0 idx0) x:(MOVHstorezeroidx2 ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstorezeroidx ptr1 (SLLconst <idx1.Type> [1] idx1) mem)
+(MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem))
+ && x.Uses == 1
+ && areAdjacentOffsets(int64(i),int64(j),4)
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVDstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+(MOVWstorezero [4] {s} (ADD ptr0 idx0) x:(MOVWstorezeroidx ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVDstorezeroidx ptr1 idx1 mem)
+(MOVWstorezeroidx ptr (ADDconst [4] idx) x:(MOVWstorezeroidx ptr idx mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDstorezeroidx ptr idx mem)
+(MOVWstorezero [4] {s} (ADDshiftLL [2] ptr0 idx0) x:(MOVWstorezeroidx4 ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVDstorezeroidx ptr1 (SLLconst <idx1.Type> [2] idx1) mem)
+(MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem))
+ && x.Uses == 1
+ && areAdjacentOffsets(int64(i),int64(j),8)
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVQstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+(MOVDstorezero [8] {s} p0:(ADD ptr0 idx0) x:(MOVDstorezeroidx ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVQstorezero [0] {s} p0 mem)
+(MOVDstorezero [8] {s} p0:(ADDshiftLL [3] ptr0 idx0) x:(MOVDstorezeroidx8 ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVQstorezero [0] {s} p0 mem)
+
+// Combine stores into larger (unaligned) stores.
+(MOVBstore [i] {s} ptr0 (SRLconst [8] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w mem)
+(MOVBstoreidx ptr (ADDconst [1] idx) (SRLconst [8] w) x:(MOVBstoreidx ptr idx w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstoreidx ptr idx w mem)
+(MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w mem)
+(MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w mem)
+(MOVBstore [i] {s} ptr0 (SRLconst [8] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w mem)
+(MOVBstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] w) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w0 mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w0 mem)
+(MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb()
+ && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb()
+ && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w0 mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb()
+ && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb()
+ && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w0 mem)
+(MOVBstore [i] {s} ptr0 (SRLconst [j] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w0 mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w0 mem)
+(MOVHstore [i] {s} ptr0 (SRLconst [16] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstore [i-2] {s} ptr0 w mem)
+(MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstoreidx ptr1 idx1 w mem)
+(MOVHstoreidx ptr (ADDconst [2] idx) (SRLconst [16] w) x:(MOVHstoreidx ptr idx w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstoreidx ptr idx w mem)
+(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+(MOVHstore [i] {s} ptr0 (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstore [i-2] {s} ptr0 w mem)
+(MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstoreidx ptr1 idx1 w mem)
+(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+(MOVHstore [i] {s} ptr0 (SRLconst [16] (MOVDreg w)) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstore [i-2] {s} ptr0 w mem)
+(MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstoreidx ptr1 idx1 w mem)
+(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+(MOVHstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVHstore [i-2] {s} ptr1 w0:(SRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstore [i-2] {s} ptr0 w0 mem)
+(MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx ptr1 idx1 w0:(SRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstoreidx ptr1 idx1 w0 mem)
+(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx2 ptr1 idx1 w0:(SRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w0 mem)
+(MOVWstore [i] {s} ptr0 (SRLconst [32] w) x:(MOVWstore [i-4] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVDstore [i-4] {s} ptr0 w mem)
+(MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVDstoreidx ptr1 idx1 w mem)
+(MOVWstoreidx ptr (ADDconst [4] idx) (SRLconst [32] w) x:(MOVWstoreidx ptr idx w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDstoreidx ptr idx w mem)
+(MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx4 ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w mem)
+(MOVWstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVWstore [i-4] {s} ptr1 w0:(SRLconst [j-32] w) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVDstore [i-4] {s} ptr0 w0 mem)
+(MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx ptr1 idx1 w0:(SRLconst [j-32] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVDstoreidx ptr1 idx1 w0 mem)
+(MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx4 ptr1 idx1 w0:(SRLconst [j-32] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w0 mem)
+(MOVBstore [i] {s} ptr w
+ x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w)
+ x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w)
+ x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w)
+ x3:(MOVBstore [i-4] {s} ptr (SRLconst [32] w)
+ x4:(MOVBstore [i-5] {s} ptr (SRLconst [40] w)
+ x5:(MOVBstore [i-6] {s} ptr (SRLconst [48] w)
+ x6:(MOVBstore [i-7] {s} ptr (SRLconst [56] w) mem))))))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && clobber(x0, x1, x2, x3, x4, x5, x6)
+ => (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
+(MOVBstore [7] {s} p w
+ x0:(MOVBstore [6] {s} p (SRLconst [8] w)
+ x1:(MOVBstore [5] {s} p (SRLconst [16] w)
+ x2:(MOVBstore [4] {s} p (SRLconst [24] w)
+ x3:(MOVBstore [3] {s} p (SRLconst [32] w)
+ x4:(MOVBstore [2] {s} p (SRLconst [40] w)
+ x5:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [48] w)
+ x6:(MOVBstoreidx ptr0 idx0 (SRLconst [56] w) mem))))))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, x5, x6)
+ => (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w
+ x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstore [i-3] {s} ptr (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+(MOVBstore [3] {s} p w
+ x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+(MOVBstoreidx ptr (ADDconst [3] idx) w
+ x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
+(MOVBstoreidx ptr idx w
+ x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr idx w mem)
+(MOVBstore [i] {s} ptr w
+ x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w))
+ x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] (MOVDreg w))
+ x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] (MOVDreg w)) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+(MOVBstore [3] {s} p w
+ x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w))
+ x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] (MOVDreg w))
+ x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] (MOVDreg w)) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w
+ x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w)
+ x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w)
+ x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+(MOVBstore [3] {s} p w
+ x0:(MOVBstore [2] {s} p (SRLconst [8] w)
+ x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] w)
+ x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+(MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
+(MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstoreidx ptr idx w mem)
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+
+// FP simplification
+(FNEGS (FMULS x y)) => (FNMULS x y)
+(FNEGD (FMULD x y)) => (FNMULD x y)
+(FMULS (FNEGS x) y) => (FNMULS x y)
+(FMULD (FNEGD x) y) => (FNMULD x y)
+(FNEGS (FNMULS x y)) => (FMULS x y)
+(FNEGD (FNMULD x y)) => (FMULD x y)
+(FNMULS (FNEGS x) y) => (FMULS x y)
+(FNMULD (FNEGD x) y) => (FMULD x y)
+(FADDS a (FMULS x y)) => (FMADDS a x y)
+(FADDD a (FMULD x y)) => (FMADDD a x y)
+(FSUBS a (FMULS x y)) => (FMSUBS a x y)
+(FSUBD a (FMULD x y)) => (FMSUBD a x y)
+(FSUBS (FMULS x y) a) => (FNMSUBS a x y)
+(FSUBD (FMULD x y) a) => (FNMSUBD a x y)
+(FADDS a (FNMULS x y)) => (FMSUBS a x y)
+(FADDD a (FNMULD x y)) => (FMSUBD a x y)
+(FSUBS a (FNMULS x y)) => (FMADDS a x y)
+(FSUBD a (FNMULD x y)) => (FMADDD a x y)
+(FSUBS (FNMULS x y) a) => (FNMADDS a x y)
+(FSUBD (FNMULD x y) a) => (FNMADDD a x y)
+
+(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read8(sym, int64(off)))])
+(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVDload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
new file mode 100644
index 0000000..4d1d14e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
@@ -0,0 +1,762 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R27).
+
+// Suffixes encode the bit width of various instructions.
+// D (double word) = 64 bit
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// S (single) = 32 bit float
+// D (double) = 64 bit float
+
+// Note: registers not used in regalloc are not included in this list,
+// so that regmask stays within int64
+// Be careful when hand coding regmasks.
+var regNamesARM64 = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18", // platform register, not used
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ "R23",
+ "R24",
+ "R25",
+ "R26",
+ // R27 = REGTMP not used in regalloc
+ "g", // aka R28
+ "R29", // frame pointer, not used
+ "R30", // aka REGLINK
+ "SP", // aka R31
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28",
+ "F29",
+ "F30",
+ "F31",
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesARM64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesARM64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
+ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r0 = buildReg("R0")
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp0flags1 = regInfo{inputs: []regMask{0}, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp1flags = regInfo{inputs: []regMask{gpg}}
+ gp1flags1 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11flags = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp21nog = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
+ gp2flags = regInfo{inputs: []regMask{gpg, gpg}}
+ gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gp2flags1flags = regInfo{inputs: []regMask{gp, gp, 0}, outputs: []regMask{gp, 0}}
+ gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gp22 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, gp}}
+ gp31 = regInfo{inputs: []regMask{gpg, gpg, gpg}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
+ gpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}}
+ gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fp1flags = regInfo{inputs: []regMask{fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ fpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ )
+ ops := []opData{
+ // binary ops
+ {name: "ADCSflags", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "ADCS", commutative: true}, // arg0+arg1+carry, set flags.
+ {name: "ADCzerocarry", argLength: 1, reg: gp0flags1, typ: "UInt64", asm: "ADC"}, // ZR+ZR+carry
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int64"}, // arg0 + auxInt
+ {name: "ADDSconstflags", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "ADDS", aux: "Int64"}, // arg0+auxint, set flags.
+ {name: "ADDSflags", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "ADDS", commutative: true}, // arg0+arg1, set flags.
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int64"}, // arg0 - auxInt
+ {name: "SBCSflags", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "SBCS"}, // arg0-(arg1+borrowing), set flags.
+ {name: "SUBSflags", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "SUBS"}, // arg0 - arg1, set flags.
+ {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true}, // arg0 * arg1
+ {name: "MULW", argLength: 2, reg: gp21, asm: "MULW", commutative: true}, // arg0 * arg1, 32-bit
+ {name: "MNEG", argLength: 2, reg: gp21, asm: "MNEG", commutative: true}, // -arg0 * arg1
+ {name: "MNEGW", argLength: 2, reg: gp21, asm: "MNEGW", commutative: true}, // -arg0 * arg1, 32-bit
+ {name: "MULH", argLength: 2, reg: gp21, asm: "SMULH", commutative: true}, // (arg0 * arg1) >> 64, signed
+ {name: "UMULH", argLength: 2, reg: gp21, asm: "UMULH", commutative: true}, // (arg0 * arg1) >> 64, unsigned
+ {name: "MULL", argLength: 2, reg: gp21, asm: "SMULL", commutative: true}, // arg0 * arg1, signed, 32-bit mult results in 64-bit
+ {name: "UMULL", argLength: 2, reg: gp21, asm: "UMULL", commutative: true}, // arg0 * arg1, unsigned, 32-bit mult results in 64-bit
+ {name: "DIV", argLength: 2, reg: gp21, asm: "SDIV"}, // arg0 / arg1, signed
+ {name: "UDIV", argLength: 2, reg: gp21, asm: "UDIV"}, // arg0 / arg1, unsighed
+ {name: "DIVW", argLength: 2, reg: gp21, asm: "SDIVW"}, // arg0 / arg1, signed, 32 bit
+ {name: "UDIVW", argLength: 2, reg: gp21, asm: "UDIVW"}, // arg0 / arg1, unsighed, 32 bit
+ {name: "MOD", argLength: 2, reg: gp21, asm: "REM"}, // arg0 % arg1, signed
+ {name: "UMOD", argLength: 2, reg: gp21, asm: "UREM"}, // arg0 % arg1, unsigned
+ {name: "MODW", argLength: 2, reg: gp21, asm: "REMW"}, // arg0 % arg1, signed, 32 bit
+ {name: "UMODW", argLength: 2, reg: gp21, asm: "UREMW"}, // arg0 % arg1, unsigned, 32 bit
+
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0 + arg1
+ {name: "FADDD", argLength: 2, reg: fp21, asm: "FADDD", commutative: true}, // arg0 + arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0 - arg1
+ {name: "FSUBD", argLength: 2, reg: fp21, asm: "FSUBD"}, // arg0 - arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0 * arg1
+ {name: "FMULD", argLength: 2, reg: fp21, asm: "FMULD", commutative: true}, // arg0 * arg1
+ {name: "FNMULS", argLength: 2, reg: fp21, asm: "FNMULS", commutative: true}, // -(arg0 * arg1)
+ {name: "FNMULD", argLength: 2, reg: fp21, asm: "FNMULD", commutative: true}, // -(arg0 * arg1)
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0 / arg1
+ {name: "FDIVD", argLength: 2, reg: fp21, asm: "FDIVD"}, // arg0 / arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "ORR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "ORR", aux: "Int64"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "EOR", commutative: true}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "EOR", aux: "Int64"}, // arg0 ^ auxInt
+ {name: "BIC", argLength: 2, reg: gp21, asm: "BIC"}, // arg0 &^ arg1
+ {name: "EON", argLength: 2, reg: gp21, asm: "EON"}, // arg0 ^ ^arg1
+ {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0 | ^arg1
+
+ {name: "LoweredMuluhilo", argLength: 2, reg: gp22, resultNotInArgs: true}, // arg0 * arg1, returns (hi, lo)
+
+ // unary ops
+ {name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0
+ {name: "NEGSflags", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "NEGS"}, // -arg0, set flags.
+ {name: "NGCzerocarry", argLength: 1, reg: gp0flags1, typ: "UInt64", asm: "NGC"}, // -1 if borrowing, 0 otherwise.
+ {name: "FABSD", argLength: 1, reg: fp11, asm: "FABSD"}, // abs(arg0), float64
+ {name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS"}, // -arg0, float32
+ {name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD"}, // -arg0, float64
+ {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD"}, // sqrt(arg0), float64
+ {name: "REV", argLength: 1, reg: gp11, asm: "REV"}, // byte reverse, 64-bit
+ {name: "REVW", argLength: 1, reg: gp11, asm: "REVW"}, // byte reverse, 32-bit
+ {name: "REV16W", argLength: 1, reg: gp11, asm: "REV16W"}, // byte reverse in each 16-bit halfword, 32-bit
+ {name: "RBIT", argLength: 1, reg: gp11, asm: "RBIT"}, // bit reverse, 64-bit
+ {name: "RBITW", argLength: 1, reg: gp11, asm: "RBITW"}, // bit reverse, 32-bit
+ {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"}, // count leading zero, 64-bit
+ {name: "CLZW", argLength: 1, reg: gp11, asm: "CLZW"}, // count leading zero, 32-bit
+ {name: "VCNT", argLength: 1, reg: fp11, asm: "VCNT"}, // count set bits for each 8-bit unit and store the result in each 8-bit unit
+ {name: "VUADDLV", argLength: 1, reg: fp11, asm: "VUADDLV"}, // unsigned sum of eight bytes in a 64-bit value, zero extended to 64-bit.
+ {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+ {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+
+ // 3-operand, the addend comes first
+ {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS"}, // +arg0 + (arg1 * arg2)
+ {name: "FMADDD", argLength: 3, reg: fp31, asm: "FMADDD"}, // +arg0 + (arg1 * arg2)
+ {name: "FNMADDS", argLength: 3, reg: fp31, asm: "FNMADDS"}, // -arg0 - (arg1 * arg2)
+ {name: "FNMADDD", argLength: 3, reg: fp31, asm: "FNMADDD"}, // -arg0 - (arg1 * arg2)
+ {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS"}, // +arg0 - (arg1 * arg2)
+ {name: "FMSUBD", argLength: 3, reg: fp31, asm: "FMSUBD"}, // +arg0 - (arg1 * arg2)
+ {name: "FNMSUBS", argLength: 3, reg: fp31, asm: "FNMSUBS"}, // -arg0 + (arg1 * arg2)
+ {name: "FNMSUBD", argLength: 3, reg: fp31, asm: "FNMSUBD"}, // -arg0 + (arg1 * arg2)
+ {name: "MADD", argLength: 3, reg: gp31, asm: "MADD"}, // +arg0 + (arg1 * arg2)
+ {name: "MADDW", argLength: 3, reg: gp31, asm: "MADDW"}, // +arg0 + (arg1 * arg2), 32-bit
+ {name: "MSUB", argLength: 3, reg: gp31, asm: "MSUB"}, // +arg0 - (arg1 * arg2)
+ {name: "MSUBW", argLength: 3, reg: gp31, asm: "MSUBW"}, // +arg0 - (arg1 * arg2), 32-bit
+
+ // shifts
+ {name: "SLL", argLength: 2, reg: gp21, asm: "LSL"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLLconst", argLength: 1, reg: gp11, asm: "LSL", aux: "Int64"}, // arg0 << auxInt
+ {name: "SRL", argLength: 2, reg: gp21, asm: "LSR"}, // arg0 >> arg1, unsigned, shift amount is mod 64
+ {name: "SRLconst", argLength: 1, reg: gp11, asm: "LSR", aux: "Int64"}, // arg0 >> auxInt, unsigned
+ {name: "SRA", argLength: 2, reg: gp21, asm: "ASR"}, // arg0 >> arg1, signed, shift amount is mod 64
+ {name: "SRAconst", argLength: 1, reg: gp11, asm: "ASR", aux: "Int64"}, // arg0 >> auxInt, signed
+ {name: "ROR", argLength: 2, reg: gp21, asm: "ROR"}, // arg0 right rotate by (arg1 mod 64) bits
+ {name: "RORW", argLength: 2, reg: gp21, asm: "RORW"}, // arg0 right rotate by (arg1 mod 32) bits
+ {name: "RORconst", argLength: 1, reg: gp11, asm: "ROR", aux: "Int64"}, // arg0 right rotate by auxInt bits
+ {name: "RORWconst", argLength: 1, reg: gp11, asm: "RORW", aux: "Int64"}, // uint32(arg0) right rotate by auxInt bits
+ {name: "EXTRconst", argLength: 2, reg: gp21, asm: "EXTR", aux: "Int64"}, // extract 64 bits from arg0:arg1 starting at lsb auxInt
+ {name: "EXTRWconst", argLength: 2, reg: gp21, asm: "EXTRW", aux: "Int64"}, // extract 32 bits from arg0[31:0]:arg1[31:0] starting at lsb auxInt and zero top 32 bits
+
+ // comparisons
+ {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to auxInt
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1, 32 bit
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt, 32 bit
+ {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1
+ {name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // arg0 compare to -auxInt
+ {name: "CMNW", argLength: 2, reg: gp2flags, asm: "CMNW", typ: "Flags", commutative: true}, // arg0 compare to -arg1, 32 bit
+ {name: "CMNWconst", argLength: 1, reg: gp1flags, asm: "CMNW", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt, 32 bit
+ {name: "TST", argLength: 2, reg: gp2flags, asm: "TST", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0
+ {name: "TSTconst", argLength: 1, reg: gp1flags, asm: "TST", aux: "Int64", typ: "Flags"}, // arg0 & auxInt compare to 0
+ {name: "TSTW", argLength: 2, reg: gp2flags, asm: "TSTW", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0, 32 bit
+ {name: "TSTWconst", argLength: 1, reg: gp1flags, asm: "TSTW", aux: "Int32", typ: "Flags"}, // arg0 & auxInt compare to 0, 32 bit
+ {name: "FCMPS", argLength: 2, reg: fp2flags, asm: "FCMPS", typ: "Flags"}, // arg0 compare to arg1, float32
+ {name: "FCMPD", argLength: 2, reg: fp2flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to arg1, float64
+ {name: "FCMPS0", argLength: 1, reg: fp1flags, asm: "FCMPS", typ: "Flags"}, // arg0 compare to 0, float32
+ {name: "FCMPD0", argLength: 1, reg: fp1flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to 0, float64
+
+ // shifted ops
+ {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0<<auxInt)
+ {name: "MVNshiftRL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), unsigned shift
+ {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), signed shift
+ {name: "NEGshiftLL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0<<auxInt)
+ {name: "NEGshiftRL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), unsigned shift
+ {name: "NEGshiftRA", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), signed shift
+ {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1<<auxInt
+ {name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, unsigned shift
+ {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, signed shift
+ {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1<<auxInt
+ {name: "SUBshiftRL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1>>auxInt, unsigned shift
+ {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1>>auxInt, signed shift
+ {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1<<auxInt)
+ {name: "ANDshiftRL", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1>>auxInt), unsigned shift
+ {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1>>auxInt), signed shift
+ {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1<<auxInt
+ {name: "ORshiftRL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1>>auxInt, unsigned shift
+ {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1>>auxInt, signed shift
+ {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1<<auxInt
+ {name: "XORshiftRL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1>>auxInt, unsigned shift
+ {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1>>auxInt, signed shift
+ {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1<<auxInt)
+ {name: "BICshiftRL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1>>auxInt), unsigned shift
+ {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1>>auxInt), signed shift
+ {name: "EONshiftLL", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1<<auxInt)
+ {name: "EONshiftRL", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1>>auxInt), unsigned shift
+ {name: "EONshiftRA", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1>>auxInt), signed shift
+ {name: "ORNshiftLL", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1<<auxInt)
+ {name: "ORNshiftRL", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1>>auxInt), unsigned shift
+ {name: "ORNshiftRA", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1>>auxInt), signed shift
+ {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1<<auxInt
+ {name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift
+ {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift
+ {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1<<auxInt) compare to 0
+ {name: "CMNshiftRL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, unsigned shift
+ {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, signed shift
+ {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1<<auxInt) compare to 0
+ {name: "TSTshiftRL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, unsigned shift
+ {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, signed shift
+
+ // bitfield ops
+ // for all bitfield ops lsb is auxInt>>8, width is auxInt&0xff
+ // insert low width bits of arg1 into the result starting at bit lsb, copy other bits from arg0
+ {name: "BFI", argLength: 2, reg: gp21nog, asm: "BFI", aux: "ARM64BitField", resultInArg0: true},
+ // extract width bits of arg1 starting at bit lsb and insert at low end of result, copy other bits from arg0
+ {name: "BFXIL", argLength: 2, reg: gp21nog, asm: "BFXIL", aux: "ARM64BitField", resultInArg0: true},
+ // insert low width bits of arg0 into the result starting at bit lsb, bits to the left of the inserted bit field are set to the high/sign bit of the inserted bit field, bits to the right are zeroed
+ {name: "SBFIZ", argLength: 1, reg: gp11, asm: "SBFIZ", aux: "ARM64BitField"},
+ // extract width bits of arg0 starting at bit lsb and insert at low end of result, remaining high bits are set to the high/sign bit of the extracted bitfield
+ {name: "SBFX", argLength: 1, reg: gp11, asm: "SBFX", aux: "ARM64BitField"},
+ // insert low width bits of arg0 into the result starting at bit lsb, bits to the left and right of the inserted bit field are zeroed
+ {name: "UBFIZ", argLength: 1, reg: gp11, asm: "UBFIZ", aux: "ARM64BitField"},
+ // extract width bits of arg0 starting at bit lsb and insert at low end of result, remaining high bits are zeroed
+ {name: "UBFX", argLength: 1, reg: gp11, asm: "UBFX", aux: "ARM64BitField"},
+
+ // moves
+ {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", typ: "UInt64", rematerializeable: true}, // 64 bits from auxint
+ {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVS", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVD", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "FMOVSload", argLength: 2, reg: fpload, aux: "SymOff", asm: "FMOVS", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "FMOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "FMOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ // register indexed load
+ {name: "MOVDloadidx", argLength: 3, reg: gp2load, asm: "MOVD", typ: "UInt64"}, // load 64-bit dword from arg0 + arg1, arg2 = mem.
+ {name: "MOVWloadidx", argLength: 3, reg: gp2load, asm: "MOVW", typ: "Int32"}, // load 32-bit word from arg0 + arg1, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVWUloadidx", argLength: 3, reg: gp2load, asm: "MOVWU", typ: "UInt32"}, // load 32-bit word from arg0 + arg1, zero-extended to 64-bit, arg2=mem.
+ {name: "MOVHloadidx", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load 16-bit word from arg0 + arg1, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVHUloadidx", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load 16-bit word from arg0 + arg1, zero-extended to 64-bit, arg2=mem.
+ {name: "MOVBloadidx", argLength: 3, reg: gp2load, asm: "MOVB", typ: "Int8"}, // load 8-bit word from arg0 + arg1, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVBUloadidx", argLength: 3, reg: gp2load, asm: "MOVBU", typ: "UInt8"}, // load 8-bit word from arg0 + arg1, zero-extended to 64-bit, arg2=mem.
+ {name: "FMOVSloadidx", argLength: 3, reg: fp2load, asm: "FMOVS", typ: "Float32"}, // load 32-bit float from arg0 + arg1, arg2=mem.
+ {name: "FMOVDloadidx", argLength: 3, reg: fp2load, asm: "FMOVD", typ: "Float64"}, // load 64-bit float from arg0 + arg1, arg2=mem.
+
+ // shifted register indexed load
+ {name: "MOVHloadidx2", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load 16-bit half-word from arg0 + arg1*2, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVHUloadidx2", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load 16-bit half-word from arg0 + arg1*2, zero-extended to 64-bit, arg2=mem.
+ {name: "MOVWloadidx4", argLength: 3, reg: gp2load, asm: "MOVW", typ: "Int32"}, // load 32-bit word from arg0 + arg1*4, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVWUloadidx4", argLength: 3, reg: gp2load, asm: "MOVWU", typ: "UInt32"}, // load 32-bit word from arg0 + arg1*4, zero-extended to 64-bit, arg2=mem.
+ {name: "MOVDloadidx8", argLength: 3, reg: gp2load, asm: "MOVD", typ: "UInt64"}, // load 64-bit double-word from arg0 + arg1*8, arg2 = mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "STP", argLength: 4, reg: gpstore2, aux: "SymOff", asm: "STP", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes of arg1 and arg2 to arg0 + auxInt + aux. arg3=mem.
+ {name: "FMOVSstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVS", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ // register indexed store
+ {name: "MOVBstoreidx", argLength: 4, reg: gpstore2, asm: "MOVB", typ: "Mem"}, // store 1 byte of arg2 to arg0 + arg1, arg3 = mem.
+ {name: "MOVHstoreidx", argLength: 4, reg: gpstore2, asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg2 to arg0 + arg1, arg3 = mem.
+ {name: "MOVWstoreidx", argLength: 4, reg: gpstore2, asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg2 to arg0 + arg1, arg3 = mem.
+ {name: "MOVDstoreidx", argLength: 4, reg: gpstore2, asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg2 to arg0 + arg1, arg3 = mem.
+ {name: "FMOVSstoreidx", argLength: 4, reg: fpstore2, asm: "FMOVS", typ: "Mem"}, // store 32-bit float of arg2 to arg0 + arg1, arg3=mem.
+ {name: "FMOVDstoreidx", argLength: 4, reg: fpstore2, asm: "FMOVD", typ: "Mem"}, // store 64-bit float of arg2 to arg0 + arg1, arg3=mem.
+
+ // shifted register indexed store
+ {name: "MOVHstoreidx2", argLength: 4, reg: gpstore2, asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg2 to arg0 + arg1*2, arg3 = mem.
+ {name: "MOVWstoreidx4", argLength: 4, reg: gpstore2, asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg2 to arg0 + arg1*4, arg3 = mem.
+ {name: "MOVDstoreidx8", argLength: 4, reg: gpstore2, asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg2 to arg0 + arg1*8, arg3 = mem.
+
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVQstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "STP", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+
+ // register indexed store zero
+ {name: "MOVBstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVB", typ: "Mem"}, // store 1 byte of zero to arg0 + arg1, arg2 = mem.
+ {name: "MOVHstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVH", typ: "Mem"}, // store 2 bytes of zero to arg0 + arg1, arg2 = mem.
+ {name: "MOVWstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVW", typ: "Mem"}, // store 4 bytes of zero to arg0 + arg1, arg2 = mem.
+ {name: "MOVDstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVD", typ: "Mem"}, // store 8 bytes of zero to arg0 + arg1, arg2 = mem.
+
+ // shifted register indexed store zero
+ {name: "MOVHstorezeroidx2", argLength: 3, reg: gpstore, asm: "MOVH", typ: "Mem"}, // store 2 bytes of zero to arg0 + arg1*2, arg2 = mem.
+ {name: "MOVWstorezeroidx4", argLength: 3, reg: gpstore, asm: "MOVW", typ: "Mem"}, // store 4 bytes of zero to arg0 + arg1*4, arg2 = mem.
+ {name: "MOVDstorezeroidx8", argLength: 3, reg: gpstore, asm: "MOVD", typ: "Mem"}, // store 8 bytes of zero to arg0 + arg1*8, arg2 = mem.
+
+ {name: "FMOVDgpfp", argLength: 1, reg: gpfp, asm: "FMOVD"}, // move int64 to float64 (no conversion)
+ {name: "FMOVDfpgp", argLength: 1, reg: fpgp, asm: "FMOVD"}, // move float64 to int64 (no conversion)
+ {name: "FMOVSgpfp", argLength: 1, reg: gpfp, asm: "FMOVS"}, // move 32bits from int to float reg (no conversion)
+ {name: "FMOVSfpgp", argLength: 1, reg: fpgp, asm: "FMOVS"}, // move 32bits from float to int reg, zero extend (no conversion)
+
+ // conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
+ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
+ {name: "MOVDreg", argLength: 1, reg: gp11, asm: "MOVD"}, // move from arg0
+
+ {name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ {name: "SCVTFWS", argLength: 1, reg: gpfp, asm: "SCVTFWS"}, // int32 -> float32
+ {name: "SCVTFWD", argLength: 1, reg: gpfp, asm: "SCVTFWD"}, // int32 -> float64
+ {name: "UCVTFWS", argLength: 1, reg: gpfp, asm: "UCVTFWS"}, // uint32 -> float32
+ {name: "UCVTFWD", argLength: 1, reg: gpfp, asm: "UCVTFWD"}, // uint32 -> float64
+ {name: "SCVTFS", argLength: 1, reg: gpfp, asm: "SCVTFS"}, // int64 -> float32
+ {name: "SCVTFD", argLength: 1, reg: gpfp, asm: "SCVTFD"}, // int64 -> float64
+ {name: "UCVTFS", argLength: 1, reg: gpfp, asm: "UCVTFS"}, // uint64 -> float32
+ {name: "UCVTFD", argLength: 1, reg: gpfp, asm: "UCVTFD"}, // uint64 -> float64
+ {name: "FCVTZSSW", argLength: 1, reg: fpgp, asm: "FCVTZSSW"}, // float32 -> int32
+ {name: "FCVTZSDW", argLength: 1, reg: fpgp, asm: "FCVTZSDW"}, // float64 -> int32
+ {name: "FCVTZUSW", argLength: 1, reg: fpgp, asm: "FCVTZUSW"}, // float32 -> uint32
+ {name: "FCVTZUDW", argLength: 1, reg: fpgp, asm: "FCVTZUDW"}, // float64 -> uint32
+ {name: "FCVTZSS", argLength: 1, reg: fpgp, asm: "FCVTZSS"}, // float32 -> int64
+ {name: "FCVTZSD", argLength: 1, reg: fpgp, asm: "FCVTZSD"}, // float64 -> int64
+ {name: "FCVTZUS", argLength: 1, reg: fpgp, asm: "FCVTZUS"}, // float32 -> uint64
+ {name: "FCVTZUD", argLength: 1, reg: fpgp, asm: "FCVTZUD"}, // float64 -> uint64
+ {name: "FCVTSD", argLength: 1, reg: fp11, asm: "FCVTSD"}, // float32 -> float64
+ {name: "FCVTDS", argLength: 1, reg: fp11, asm: "FCVTDS"}, // float64 -> float32
+
+ // floating-point round to integral
+ {name: "FRINTAD", argLength: 1, reg: fp11, asm: "FRINTAD"},
+ {name: "FRINTMD", argLength: 1, reg: fp11, asm: "FRINTMD"},
+ {name: "FRINTND", argLength: 1, reg: fp11, asm: "FRINTND"},
+ {name: "FRINTPD", argLength: 1, reg: fp11, asm: "FRINTPD"},
+ {name: "FRINTZD", argLength: 1, reg: fp11, asm: "FRINTZD"},
+
+ // conditional instructions; auxint is
+ // one of the arm64 comparison pseudo-ops (LessThan, LessThanU, etc.)
+ {name: "CSEL", argLength: 3, reg: gp2flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : arg1
+ {name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : 0
+
+ // function calls
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x<=y false otherwise.
+ {name: "GreaterThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise.
+ {name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<y false otherwise.
+ {name: "LessEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<=y false otherwise.
+ {name: "GreaterThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>y false otherwise.
+ {name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise.
+ {name: "LessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<y false otherwise.
+ {name: "LessEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<=y false otherwise.
+ {name: "GreaterThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>y false otherwise.
+ {name: "GreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y false otherwise.
+ {name: "NotLessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y || x is unordered with y, false otherwise.
+ {name: "NotLessEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>y || x is unordered with y, false otherwise.
+ {name: "NotGreaterThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<=y || x is unordered with y, false otherwise.
+ {name: "NotGreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<y || x is unordered with y, false otherwise.
+ // duffzero
+ // arg0 = address of memory to zero
+ // arg1 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ // R20 changed as side effect
+ // R16 and R17 may be clobbered by linker trampoline.
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20")},
+ clobbers: buildReg("R16 R17 R20 R30"),
+ },
+ faultOnNilArg0: true,
+ unsafePoint: true, // FP maintenance around DUFFZERO can be clobbered by interrupts
+ },
+
+ // large zeroing
+ // arg0 = address of memory to zero (in R16 aka arm64.REGRT1, changed as side effect)
+ // arg1 = address of the last 16-byte unit to zero
+ // arg2 = mem
+ // returns mem
+ // STP.P (ZR,ZR), 16(R16)
+ // CMP Rarg1, R16
+ // BLE -2(PC)
+ // Note: the-end-of-the-memory may be not a valid pointer. it's a problem if it is spilled.
+ // the-end-of-the-memory - 16 is with the area to zero, ok to spill.
+ {
+ name: "LoweredZero",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R16"), gp},
+ clobbers: buildReg("R16"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy
+ // arg0 = address of dst memory (in R21, changed as side effect)
+ // arg1 = address of src memory (in R20, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // returns mem
+ // R20, R21 changed as side effect
+ // R16 and R17 may be clobbered by linker trampoline.
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R21"), buildReg("R20")},
+ clobbers: buildReg("R16 R17 R20 R21 R26 R30"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
+ },
+
+ // large move
+ // arg0 = address of dst memory (in R17 aka arm64.REGRT2, changed as side effect)
+ // arg1 = address of src memory (in R16 aka arm64.REGRT1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // returns mem
+ // MOVD.P 8(R16), Rtmp
+ // MOVD.P Rtmp, 8(R17)
+ // CMP Rarg2, R16
+ // BLE -3(PC)
+ // Note: the-end-of-src may be not a valid pointer. it's a problem if it is spilled.
+ // the-end-of-src - 8 is within the area to copy, ok to spill.
+ {
+ name: "LoweredMove",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R17"), buildReg("R16"), gp},
+ clobbers: buildReg("R16 R17"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R26 (arm64.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R26")}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // Constant flag value.
+ // Note: there's an "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // This op is for temporary use by rewrite rules. It
+ // cannot appear in the generated assembly.
+ {name: "FlagConstant", aux: "FlagConstant"},
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // atomic loads.
+ // load from arg0. arg1=mem. auxint must be zero.
+ // returns <value,memory> so they can be properly ordered with other loads.
+ {name: "LDAR", argLength: 2, reg: gpload, asm: "LDAR", faultOnNilArg0: true},
+ {name: "LDARB", argLength: 2, reg: gpload, asm: "LDARB", faultOnNilArg0: true},
+ {name: "LDARW", argLength: 2, reg: gpload, asm: "LDARW", faultOnNilArg0: true},
+
+ // atomic stores.
+ // store arg1 to arg0. arg2=mem. returns memory. auxint must be zero.
+ {name: "STLRB", argLength: 3, reg: gpstore, asm: "STLRB", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "STLR", argLength: 3, reg: gpstore, asm: "STLR", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "STLRW", argLength: 3, reg: gpstore, asm: "STLRW", faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic exchange.
+ // store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>. auxint must be zero.
+ // LDAXR (Rarg0), Rout
+ // STLXR Rarg1, (Rarg0), Rtmp
+ // CBNZ Rtmp, -2(PC)
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic exchange variant.
+ // store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>. auxint must be zero.
+ // SWPALD Rarg1, (Rarg0), Rout
+ {name: "LoweredAtomicExchange64Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicExchange32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic add.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // LDAXR (Rarg0), Rout
+ // ADD Rarg1, Rout
+ // STLXR Rout, (Rarg0), Rtmp
+ // CBNZ Rtmp, -3(PC)
+ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic add variant.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // LDADDAL (Rarg0), Rarg1, Rout
+ // ADD Rarg1, Rout
+ {name: "LoweredAtomicAdd64Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAdd32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // LDAXR (Rarg0), Rtmp
+ // CMP Rarg1, Rtmp
+ // BNE 3(PC)
+ // STLXR Rarg2, (Rarg0), Rtmp
+ // CBNZ Rtmp, -4(PC)
+ // CSET EQ, Rout
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic compare and swap variant.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // MOV Rarg1, Rtmp
+ // CASAL Rtmp, (Rarg0), Rarg2
+ // CMP Rarg1, Rtmp
+ // CSET EQ, Rout
+ {name: "LoweredAtomicCas64Variant", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicCas32Variant", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic and/or.
+ // *arg0 &= (|=) arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // LDAXR (Rarg0), Rout
+ // AND/OR Rarg1, Rout
+ // STLXR Rout, (Rarg0), Rtmp
+ // CBNZ Rtmp, -3(PC)
+ {name: "LoweredAtomicAnd8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic and/or variant.
+ // *arg0 &= (|=) arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // AND:
+ // MNV Rarg1, Rtemp
+ // LDANDALB Rtemp, (Rarg0), Rout
+ // AND Rarg1, Rout
+ // OR:
+ // LDORALB Rarg1, (Rarg0), Rout
+ // ORR Rarg1, Rout
+ {name: "LoweredAtomicAnd8Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAnd32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr8Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary,
+ // but clobbers R30 (LR) because it's a call.
+ // R16 and R17 may be clobbered by linker trampoline.
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R2"), buildReg("R3")}, clobbers: (callerSave &^ gpg) | buildReg("R16 R17 R30")}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "ULT", controls: 1},
+ {name: "ULE", controls: 1},
+ {name: "UGT", controls: 1},
+ {name: "UGE", controls: 1},
+ {name: "Z", controls: 1}, // Control == 0 (take a register instead of flags)
+ {name: "NZ", controls: 1}, // Control != 0
+ {name: "ZW", controls: 1}, // Control == 0, 32-bit
+ {name: "NZW", controls: 1}, // Control != 0, 32-bit
+ {name: "TBZ", controls: 1, aux: "Int64"}, // Control & (1 << AuxInt) == 0
+ {name: "TBNZ", controls: 1, aux: "Int64"}, // Control & (1 << AuxInt) != 0
+ {name: "FLT", controls: 1},
+ {name: "FLE", controls: 1},
+ {name: "FGT", controls: 1},
+ {name: "FGE", controls: 1},
+ {name: "LTnoov", controls: 1}, // 'LT' but without honoring overflow
+ {name: "LEnoov", controls: 1}, // 'LE' but without honoring overflow
+ {name: "GTnoov", controls: 1}, // 'GT' but without honoring overflow
+ {name: "GEnoov", controls: 1}, // 'GE' but without honoring overflow
+ }
+
+ archs = append(archs, arch{
+ name: "ARM64",
+ pkg: "cmd/internal/obj/arm64",
+ genfile: "../../arm64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesARM64,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R30"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go
new file mode 100644
index 0000000..1a7eefa
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go
@@ -0,0 +1,600 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R11).
+
+// Suffixes encode the bit width of various instructions.
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// F (float) = 32 bit float
+// D (double) = 64 bit float
+
+var regNamesARM = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "g", // aka R10
+ "R11", // tmp
+ "R12",
+ "SP", // aka R13
+ "R14", // link
+ "R15", // pc
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15", // tmp
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesARM) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesARM {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15")
+ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r0 = buildReg("R0")
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ r4 = buildReg("R4")
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11carry = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp1flags = regInfo{inputs: []regMask{gpg}}
+ gp1flags1 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp21carry = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, 0}}
+ gp2flags = regInfo{inputs: []regMask{gpg, gpg}}
+ gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gp22 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, gp}}
+ gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gp31carry = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp, 0}}
+ gp3flags = regInfo{inputs: []regMask{gp, gp, gp}}
+ gp3flags1 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gp2store = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fp1flags = regInfo{inputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}, clobbers: buildReg("F15")} // int-float conversion uses F15 as tmp
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}, clobbers: buildReg("F15")}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ )
+ ops := []opData{
+ // binary ops
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int32"}, // arg0 + auxInt
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int32"}, // arg0 - auxInt
+ {name: "RSB", argLength: 2, reg: gp21, asm: "RSB"}, // arg1 - arg0
+ {name: "RSBconst", argLength: 1, reg: gp11, asm: "RSB", aux: "Int32"}, // auxInt - arg0
+ {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true}, // arg0 * arg1
+ {name: "HMUL", argLength: 2, reg: gp21, asm: "MULL", commutative: true}, // (arg0 * arg1) >> 32, signed
+ {name: "HMULU", argLength: 2, reg: gp21, asm: "MULLU", commutative: true}, // (arg0 * arg1) >> 32, unsigned
+
+ // udiv runtime call for soft division
+ // output0 = arg0/arg1, output1 = arg0%arg1
+ // see ../../../../../runtime/vlop_arm.s
+ {
+ name: "CALLudiv",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), buildReg("R0")},
+ outputs: []regMask{buildReg("R0"), buildReg("R1")},
+ clobbers: buildReg("R2 R3 R14"),
+ },
+ clobberFlags: true,
+ typ: "(UInt32,UInt32)",
+ call: false, // TODO(mdempsky): Should this be true?
+ },
+
+ {name: "ADDS", argLength: 2, reg: gp21carry, asm: "ADD", commutative: true}, // arg0 + arg1, set carry flag
+ {name: "ADDSconst", argLength: 1, reg: gp11carry, asm: "ADD", aux: "Int32"}, // arg0 + auxInt, set carry flag
+ {name: "ADC", argLength: 3, reg: gp2flags1, asm: "ADC", commutative: true}, // arg0 + arg1 + carry, arg2=flags
+ {name: "ADCconst", argLength: 2, reg: gp1flags1, asm: "ADC", aux: "Int32"}, // arg0 + auxInt + carry, arg1=flags
+ {name: "SUBS", argLength: 2, reg: gp21carry, asm: "SUB"}, // arg0 - arg1, set carry flag
+ {name: "SUBSconst", argLength: 1, reg: gp11carry, asm: "SUB", aux: "Int32"}, // arg0 - auxInt, set carry flag
+ {name: "RSBSconst", argLength: 1, reg: gp11carry, asm: "RSB", aux: "Int32"}, // auxInt - arg0, set carry flag
+ {name: "SBC", argLength: 3, reg: gp2flags1, asm: "SBC"}, // arg0 - arg1 - carry, arg2=flags
+ {name: "SBCconst", argLength: 2, reg: gp1flags1, asm: "SBC", aux: "Int32"}, // arg0 - auxInt - carry, arg1=flags
+ {name: "RSCconst", argLength: 2, reg: gp1flags1, asm: "RSC", aux: "Int32"}, // auxInt - arg0 - carry, arg1=flags
+
+ {name: "MULLU", argLength: 2, reg: gp22, asm: "MULLU", commutative: true}, // arg0 * arg1, high 32 bits in out0, low 32 bits in out1
+ {name: "MULA", argLength: 3, reg: gp31, asm: "MULA"}, // arg0 * arg1 + arg2
+ {name: "MULS", argLength: 3, reg: gp31, asm: "MULS"}, // arg2 - arg0 * arg1
+
+ {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1
+ {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1
+ {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1
+ {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1
+ {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1
+ {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1
+ {name: "NMULF", argLength: 2, reg: fp21, asm: "NMULF", commutative: true}, // -(arg0 * arg1)
+ {name: "NMULD", argLength: 2, reg: fp21, asm: "NMULD", commutative: true}, // -(arg0 * arg1)
+ {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1
+ {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1
+
+ {name: "MULAF", argLength: 3, reg: fp31, asm: "MULAF", resultInArg0: true}, // arg0 + (arg1 * arg2)
+ {name: "MULAD", argLength: 3, reg: fp31, asm: "MULAD", resultInArg0: true}, // arg0 + (arg1 * arg2)
+ {name: "MULSF", argLength: 3, reg: fp31, asm: "MULSF", resultInArg0: true}, // arg0 - (arg1 * arg2)
+ {name: "MULSD", argLength: 3, reg: fp31, asm: "MULSD", resultInArg0: true}, // arg0 - (arg1 * arg2)
+
+ // FMULAD only exists on platforms with the VFPv4 instruction set.
+ // Any use must be preceded by a successful check of runtime.arm_support_vfpv4.
+ {name: "FMULAD", argLength: 3, reg: fp31, asm: "FMULAD", resultInArg0: true}, // arg0 + (arg1 * arg2)
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "ORR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "ORR", aux: "Int32"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "EOR", commutative: true}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "EOR", aux: "Int32"}, // arg0 ^ auxInt
+ {name: "BIC", argLength: 2, reg: gp21, asm: "BIC"}, // arg0 &^ arg1
+ {name: "BICconst", argLength: 1, reg: gp11, asm: "BIC", aux: "Int32"}, // arg0 &^ auxInt
+
+ // bit extraction, AuxInt = Width<<8 | LSB
+ {name: "BFX", argLength: 1, reg: gp11, asm: "BFX", aux: "Int32"}, // extract W bits from bit L in arg0, then signed extend
+ {name: "BFXU", argLength: 1, reg: gp11, asm: "BFXU", aux: "Int32"}, // extract W bits from bit L in arg0, then unsigned extend
+
+ // unary ops
+ {name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0
+
+ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
+ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64
+ {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64
+ {name: "ABSD", argLength: 1, reg: fp11, asm: "ABSD"}, // abs(arg0), float64
+
+ {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"}, // count leading zero
+ {name: "REV", argLength: 1, reg: gp11, asm: "REV"}, // reverse byte order
+ {name: "REV16", argLength: 1, reg: gp11, asm: "REV16"}, // reverse byte order in 16-bit halfwords
+ {name: "RBIT", argLength: 1, reg: gp11, asm: "RBIT"}, // reverse bit order
+
+ // shifts
+ {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 256
+ {name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int32"}, // arg0 << auxInt, 0 <= auxInt < 32
+ {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> arg1, unsigned, shift amount is mod 256
+ {name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int32"}, // arg0 >> auxInt, unsigned, 0 <= auxInt < 32
+ {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> arg1, signed, shift amount is mod 256
+ {name: "SRAconst", argLength: 1, reg: gp11, asm: "SRA", aux: "Int32"}, // arg0 >> auxInt, signed, 0 <= auxInt < 32
+ {name: "SRR", argLength: 2, reg: gp21}, // arg0 right rotate by arg1 bits
+ {name: "SRRconst", argLength: 1, reg: gp11, aux: "Int32"}, // arg0 right rotate by auxInt bits, 0 <= auxInt < 32
+
+ // auxInt for all of these satisfy 0 <= auxInt < 32
+ {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1<<auxInt
+ {name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, unsigned shift
+ {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift
+ {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1<<auxInt
+ {name: "SUBshiftRL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, unsigned shift
+ {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift
+ {name: "RSBshiftLL", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1<<auxInt - arg0
+ {name: "RSBshiftRL", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, unsigned shift
+ {name: "RSBshiftRA", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift
+ {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1<<auxInt)
+ {name: "ANDshiftRL", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1>>auxInt), unsigned shift
+ {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1>>auxInt), signed shift
+ {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1<<auxInt
+ {name: "ORshiftRL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1>>auxInt, unsigned shift
+ {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1>>auxInt, signed shift
+ {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1<<auxInt
+ {name: "XORshiftRL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1>>auxInt, unsigned shift
+ {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1>>auxInt, signed shift
+ {name: "XORshiftRR", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ (arg1 right rotate by auxInt)
+ {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1<<auxInt)
+ {name: "BICshiftRL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1>>auxInt), unsigned shift
+ {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1>>auxInt), signed shift
+ {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0<<auxInt)
+ {name: "MVNshiftRL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0>>auxInt), unsigned shift
+ {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0>>auxInt), signed shift
+
+ {name: "ADCshiftLL", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1<<auxInt + carry, arg2=flags
+ {name: "ADCshiftRL", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1>>auxInt + carry, unsigned shift, arg2=flags
+ {name: "ADCshiftRA", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1>>auxInt + carry, signed shift, arg2=flags
+ {name: "SBCshiftLL", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1<<auxInt - carry, arg2=flags
+ {name: "SBCshiftRL", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1>>auxInt - carry, unsigned shift, arg2=flags
+ {name: "SBCshiftRA", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1>>auxInt - carry, signed shift, arg2=flags
+ {name: "RSCshiftLL", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1<<auxInt - arg0 - carry, arg2=flags
+ {name: "RSCshiftRL", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1>>auxInt - arg0 - carry, unsigned shift, arg2=flags
+ {name: "RSCshiftRA", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1>>auxInt - arg0 - carry, signed shift, arg2=flags
+
+ {name: "ADDSshiftLL", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1<<auxInt, set carry flag
+ {name: "ADDSshiftRL", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, unsigned shift, set carry flag
+ {name: "ADDSshiftRA", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift, set carry flag
+ {name: "SUBSshiftLL", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1<<auxInt, set carry flag
+ {name: "SUBSshiftRL", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, unsigned shift, set carry flag
+ {name: "SUBSshiftRA", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift, set carry flag
+ {name: "RSBSshiftLL", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1<<auxInt - arg0, set carry flag
+ {name: "RSBSshiftRL", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, unsigned shift, set carry flag
+ {name: "RSBSshiftRA", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift, set carry flag
+
+ {name: "ADDshiftLLreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1<<arg2
+ {name: "ADDshiftRLreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1>>arg2, unsigned shift
+ {name: "ADDshiftRAreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift
+ {name: "SUBshiftLLreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1<<arg2
+ {name: "SUBshiftRLreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1>>arg2, unsigned shift
+ {name: "SUBshiftRAreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift
+ {name: "RSBshiftLLreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1<<arg2 - arg0
+ {name: "RSBshiftRLreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1>>arg2 - arg0, unsigned shift
+ {name: "RSBshiftRAreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift
+ {name: "ANDshiftLLreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1<<arg2)
+ {name: "ANDshiftRLreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1>>arg2), unsigned shift
+ {name: "ANDshiftRAreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1>>arg2), signed shift
+ {name: "ORshiftLLreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1<<arg2
+ {name: "ORshiftRLreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1>>arg2, unsigned shift
+ {name: "ORshiftRAreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1>>arg2, signed shift
+ {name: "XORshiftLLreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1<<arg2
+ {name: "XORshiftRLreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1>>arg2, unsigned shift
+ {name: "XORshiftRAreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1>>arg2, signed shift
+ {name: "BICshiftLLreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1<<arg2)
+ {name: "BICshiftRLreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1>>arg2), unsigned shift
+ {name: "BICshiftRAreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1>>arg2), signed shift
+ {name: "MVNshiftLLreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0<<arg1)
+ {name: "MVNshiftRLreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0>>arg1), unsigned shift
+ {name: "MVNshiftRAreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0>>arg1), signed shift
+
+ {name: "ADCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1<<arg2 + carry, arg3=flags
+ {name: "ADCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1>>arg2 + carry, unsigned shift, arg3=flags
+ {name: "ADCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1>>arg2 + carry, signed shift, arg3=flags
+ {name: "SBCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1<<arg2 - carry, arg3=flags
+ {name: "SBCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1>>arg2 - carry, unsigned shift, arg3=flags
+ {name: "SBCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1>>arg2 - carry, signed shift, arg3=flags
+ {name: "RSCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1<<arg2 - arg0 - carry, arg3=flags
+ {name: "RSCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1>>arg2 - arg0 - carry, unsigned shift, arg3=flags
+ {name: "RSCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1>>arg2 - arg0 - carry, signed shift, arg3=flags
+
+ {name: "ADDSshiftLLreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1<<arg2, set carry flag
+ {name: "ADDSshiftRLreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1>>arg2, unsigned shift, set carry flag
+ {name: "ADDSshiftRAreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift, set carry flag
+ {name: "SUBSshiftLLreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1<<arg2, set carry flag
+ {name: "SUBSshiftRLreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1>>arg2, unsigned shift, set carry flag
+ {name: "SUBSshiftRAreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift, set carry flag
+ {name: "RSBSshiftLLreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1<<arg2 - arg0, set carry flag
+ {name: "RSBSshiftRLreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1>>arg2 - arg0, unsigned shift, set carry flag
+ {name: "RSBSshiftRAreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift, set carry flag
+
+ // comparisons
+ {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt
+ {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1
+ {name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt
+ {name: "TST", argLength: 2, reg: gp2flags, asm: "TST", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0
+ {name: "TSTconst", argLength: 1, reg: gp1flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & auxInt compare to 0
+ {name: "TEQ", argLength: 2, reg: gp2flags, asm: "TEQ", typ: "Flags", commutative: true}, // arg0 ^ arg1 compare to 0
+ {name: "TEQconst", argLength: 1, reg: gp1flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ auxInt compare to 0
+ {name: "CMPF", argLength: 2, reg: fp2flags, asm: "CMPF", typ: "Flags"}, // arg0 compare to arg1, float32
+ {name: "CMPD", argLength: 2, reg: fp2flags, asm: "CMPD", typ: "Flags"}, // arg0 compare to arg1, float64
+
+ {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1<<auxInt
+ {name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift
+ {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift
+ {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1<<auxInt)
+ {name: "CMNshiftRL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1>>auxInt), unsigned shift
+ {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1>>auxInt), signed shift
+ {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1<<auxInt) compare to 0
+ {name: "TSTshiftRL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1>>auxInt) compare to 0, unsigned shift
+ {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1>>auxInt) compare to 0, signed shift
+ {name: "TEQshiftLL", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1<<auxInt) compare to 0
+ {name: "TEQshiftRL", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1>>auxInt) compare to 0, unsigned shift
+ {name: "TEQshiftRA", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1>>auxInt) compare to 0, signed shift
+
+ {name: "CMPshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1<<arg2
+ {name: "CMPshiftRLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, unsigned shift
+ {name: "CMPshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, signed shift
+ {name: "CMNshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1<<arg2) compare to 0
+ {name: "CMNshiftRLreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1>>arg2) compare to 0, unsigned shift
+ {name: "CMNshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1>>arg2) compare to 0, signed shift
+ {name: "TSTshiftLLreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1<<arg2) compare to 0
+ {name: "TSTshiftRLreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1>>arg2) compare to 0, unsigned shift
+ {name: "TSTshiftRAreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1>>arg2) compare to 0, signed shift
+ {name: "TEQshiftLLreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1<<arg2) compare to 0
+ {name: "TEQshiftRLreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1>>arg2) compare to 0, unsigned shift
+ {name: "TEQshiftRAreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1>>arg2) compare to 0, signed shift
+
+ {name: "CMPF0", argLength: 1, reg: fp1flags, asm: "CMPF", typ: "Flags"}, // arg0 compare to 0, float32
+ {name: "CMPD0", argLength: 1, reg: fp1flags, asm: "CMPD", typ: "Flags"}, // arg0 compare to 0, float64
+
+ // moves
+ {name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", typ: "UInt32", rematerializeable: true}, // 32 low bits of auxint
+ {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVWaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVW", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ {name: "MOVWloadidx", argLength: 3, reg: gp2load, asm: "MOVW", typ: "UInt32"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVWloadshiftLL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32", typ: "UInt32"}, // load from arg0 + arg1<<auxInt. arg2=mem
+ {name: "MOVWloadshiftRL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32", typ: "UInt32"}, // load from arg0 + arg1>>auxInt, unsigned shift. arg2=mem
+ {name: "MOVWloadshiftRA", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32", typ: "UInt32"}, // load from arg0 + arg1>>auxInt, signed shift. arg2=mem
+ {name: "MOVBUloadidx", argLength: 3, reg: gp2load, asm: "MOVBU", typ: "UInt8"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVBloadidx", argLength: 3, reg: gp2load, asm: "MOVB", typ: "Int8"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVHUloadidx", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVHloadidx", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load from arg0 + arg1. arg2=mem
+
+ {name: "MOVWstoreidx", argLength: 4, reg: gp2store, asm: "MOVW", typ: "Mem"}, // store arg2 to arg0 + arg1. arg3=mem
+ {name: "MOVWstoreshiftLL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32", typ: "Mem"}, // store arg2 to arg0 + arg1<<auxInt. arg3=mem
+ {name: "MOVWstoreshiftRL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32", typ: "Mem"}, // store arg2 to arg0 + arg1>>auxInt, unsigned shift. arg3=mem
+ {name: "MOVWstoreshiftRA", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32", typ: "Mem"}, // store arg2 to arg0 + arg1>>auxInt, signed shift. arg3=mem
+ {name: "MOVBstoreidx", argLength: 4, reg: gp2store, asm: "MOVB", typ: "Mem"}, // store arg2 to arg0 + arg1. arg3=mem
+ {name: "MOVHstoreidx", argLength: 4, reg: gp2store, asm: "MOVH", typ: "Mem"}, // store arg2 to arg0 + arg1. arg3=mem
+
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVBS"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVHS"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0
+
+ {name: "MOVWnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ {name: "MOVWF", argLength: 1, reg: gpfp, asm: "MOVWF"}, // int32 -> float32
+ {name: "MOVWD", argLength: 1, reg: gpfp, asm: "MOVWD"}, // int32 -> float64
+ {name: "MOVWUF", argLength: 1, reg: gpfp, asm: "MOVWF"}, // uint32 -> float32, set U bit in the instruction
+ {name: "MOVWUD", argLength: 1, reg: gpfp, asm: "MOVWD"}, // uint32 -> float64, set U bit in the instruction
+ {name: "MOVFW", argLength: 1, reg: fpgp, asm: "MOVFW"}, // float32 -> int32
+ {name: "MOVDW", argLength: 1, reg: fpgp, asm: "MOVDW"}, // float64 -> int32
+ {name: "MOVFWU", argLength: 1, reg: fpgp, asm: "MOVFW"}, // float32 -> uint32, set U bit in the instruction
+ {name: "MOVDWU", argLength: 1, reg: fpgp, asm: "MOVDW"}, // float64 -> uint32, set U bit in the instruction
+ {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
+ {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
+
+ // conditional instructions, for lowering shifts
+ {name: "CMOVWHSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates HS, arg1=flags
+ {name: "CMOVWLSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates LS, arg1=flags
+ {name: "SRAcond", argLength: 3, reg: gp2flags1, asm: "SRA"}, // arg0 >> 31 if flags indicates HS, arg0 >> arg1 otherwise, signed shift, arg2=flags
+
+ // function calls
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x<=y false otherwise.
+ {name: "GreaterThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise.
+ {name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<y false otherwise.
+ {name: "LessEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<=y false otherwise.
+ {name: "GreaterThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>y false otherwise.
+ {name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise.
+
+ // duffzero (must be 4-byte aligned)
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = value to store (always zero)
+ // arg2 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), buildReg("R0")},
+ clobbers: buildReg("R1 R14"),
+ },
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy (must be 4-byte aligned)
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // returns mem
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1")},
+ clobbers: buildReg("R0 R1 R2 R14"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = address of the last element to zero
+ // arg2 = value to store (always zero)
+ // arg3 = mem
+ // returns mem
+ // MOVW.P Rarg2, 4(R1)
+ // CMP R1, Rarg1
+ // BLE -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), gp, gp},
+ clobbers: buildReg("R1"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ },
+
+ // large or unaligned move
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // returns mem
+ // MOVW.P 4(R1), Rtmp
+ // MOVW.P Rtmp, 4(R2)
+ // CMP R1, Rarg2
+ // BLE -3(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1"), gp},
+ clobbers: buildReg("R1 R2"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R7 (arm.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R7")}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // Extend ops are the same as Bounds ops except the indexes are 64-bit.
+ {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r2, r3}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r1, r2}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r0, r1}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+
+ // Constant flag value.
+ // Note: there's an "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // This op is for temporary use by rewrite rules. It
+ // cannot appear in the generated assembly.
+ {name: "FlagConstant", aux: "FlagConstant"},
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary,
+ // but clobbers R14 (LR) because it's a call.
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R2"), buildReg("R3")}, clobbers: (callerSave &^ gpg) | buildReg("R14")}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "ULT", controls: 1},
+ {name: "ULE", controls: 1},
+ {name: "UGT", controls: 1},
+ {name: "UGE", controls: 1},
+ {name: "LTnoov", controls: 1}, // 'LT' but without honoring overflow
+ {name: "LEnoov", controls: 1}, // 'LE' but without honoring overflow
+ {name: "GTnoov", controls: 1}, // 'GT' but without honoring overflow
+ {name: "GEnoov", controls: 1}, // 'GE' but without honoring overflow
+ }
+
+ archs = append(archs, arch{
+ name: "ARM",
+ pkg: "cmd/internal/obj/arm",
+ genfile: "../../arm/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesARM,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R14"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules
new file mode 100644
index 0000000..8ad2c90
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules
@@ -0,0 +1,697 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|32|16|8) ...) => (ADD ...)
+(Add(32|64)F ...) => (ADD(F|D) ...)
+
+(Select0 (Add32carry <t> x y)) => (ADD <t.FieldType(0)> x y)
+(Select1 (Add32carry <t> x y)) => (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
+(Add32withcarry <t> x y c) => (ADD c (ADD <t> x y))
+
+(Sub(Ptr|32|16|8) ...) => (SUB ...)
+(Sub(32|64)F ...) => (SUB(F|D) ...)
+
+(Select0 (Sub32carry <t> x y)) => (SUB <t.FieldType(0)> x y)
+(Select1 (Sub32carry <t> x y)) => (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
+(Sub32withcarry <t> x y c) => (SUB (SUB <t> x y) c)
+
+(Mul(32|16|8) ...) => (MUL ...)
+(Mul(32|64)F ...) => (MUL(F|D) ...)
+
+(Hmul(32|32u) x y) => (Select0 (MUL(T|TU) x y))
+(Mul32uhilo ...) => (MULTU ...)
+
+(Div32 x y) => (Select1 (DIV x y))
+(Div32u x y) => (Select1 (DIVU x y))
+(Div16 x y) => (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+(Div16u x y) => (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Div8 x y) => (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+(Div8u x y) => (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Div(32|64)F ...) => (DIV(F|D) ...)
+
+(Mod32 x y) => (Select0 (DIV x y))
+(Mod32u x y) => (Select0 (DIVU x y))
+(Mod16 x y) => (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+(Mod16u x y) => (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Mod8 x y) => (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+(Mod8u x y) => (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+
+// (x + y) / 2 with x>=y becomes (x - y) / 2 + y
+(Avg32u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+
+(And(32|16|8) ...) => (AND ...)
+(Or(32|16|8) ...) => (OR ...)
+(Xor(32|16|8) ...) => (XOR ...)
+
+// constant shifts
+// generic opt rewrites all constant shifts to shift by Const64
+(Lsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SLLconst x [int32(c)])
+(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SRAconst x [int32(c)])
+(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 => (SRLconst x [int32(c)])
+(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SLLconst x [int32(c)])
+(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 => (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SLLconst x [int32(c)])
+(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 => (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+
+// large constant shifts
+(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0])
+(Rsh32Ux64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0])
+(Lsh16x64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0])
+(Rsh16Ux64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0])
+(Lsh8x64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0])
+(Rsh8Ux64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 => (SRAconst x [31])
+(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+
+// shifts
+// hardware instruction uses only the low 5 bits of the shift
+// we compare to 32 to ensure Go semantics for large shifts
+(Lsh32x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Lsh32x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Lsh32x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Lsh16x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Lsh16x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Lsh16x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Lsh8x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Lsh8x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Lsh8x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Rsh32Ux32 <t> x y) => (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Rsh32Ux16 <t> x y) => (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Rsh32Ux8 <t> x y) => (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Rsh16Ux32 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+(Rsh16Ux16 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Rsh16Ux8 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Rsh8Ux32 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+(Rsh8Ux16 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Rsh8Ux8 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Rsh32x32 x y) => (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+(Rsh32x16 x y) => (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh32x8 x y) => (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+
+(Rsh16x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+(Rsh16x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh16x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+
+(Rsh8x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+(Rsh8x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh8x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+
+// rotates
+(RotateLeft8 <t> x (MOVWconst [c])) => (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
+(RotateLeft16 <t> x (MOVWconst [c])) => (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
+(RotateLeft32 <t> x (MOVWconst [c])) => (Or32 (Lsh32x32 <t> x (MOVWconst [c&31])) (Rsh32Ux32 <t> x (MOVWconst [-c&31])))
+(RotateLeft64 <t> x (MOVWconst [c])) => (Or64 (Lsh64x32 <t> x (MOVWconst [c&63])) (Rsh64Ux32 <t> x (MOVWconst [-c&63])))
+
+// unary ops
+(Neg(32|16|8) ...) => (NEG ...)
+(Neg(32|64)F ...) => (NEG(F|D) ...)
+
+(Com(32|16|8) x) => (NORconst [0] x)
+
+(Sqrt ...) => (SQRTD ...)
+
+// TODO: optimize this case?
+(Ctz32NonZero ...) => (Ctz32 ...)
+
+// count trailing zero
+// 32 - CLZ(x&-x - 1)
+(Ctz32 <t> x) => (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
+
+// bit length
+(BitLen32 <t> x) => (SUB (MOVWconst [32]) (CLZ <t> x))
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XORconst [1] (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORconst [1] x)
+
+// constants
+(Const(32|16|8) [val]) => (MOVWconst [int32(val)])
+(Const(32|64)F ...) => (MOV(F|D)const ...)
+(ConstNil) => (MOVWconst [0])
+(ConstBool [b]) => (MOVWconst [b2i32(b)])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+
+(Signmask x) => (SRAconst x [31])
+(Zeromask x) => (NEG (SGTU x (MOVWconst [0])))
+(Slicemask <t> x) => (SRAconst (NEG <t> x) [31])
+
+// float-int conversion
+(Cvt32to(32|64)F ...) => (MOVW(F|D) ...)
+(Cvt(32|64)Fto32 ...) => (TRUNC(F|D)W ...)
+(Cvt32Fto64F ...) => (MOVFD ...)
+(Cvt64Fto32F ...) => (MOVDF ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (Copy ...)
+
+// comparisons
+(Eq8 x y) => (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (SGTUconst [1] (XOR x y))
+(EqPtr x y) => (SGTUconst [1] (XOR x y))
+(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
+
+(Neq8 x y) => (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
+(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
+(Neq32 x y) => (SGTU (XOR x y) (MOVWconst [0]))
+(NeqPtr x y) => (SGTU (XOR x y) (MOVWconst [0]))
+(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
+
+(Less8 x y) => (SGT (SignExt8to32 y) (SignExt8to32 x))
+(Less16 x y) => (SGT (SignExt16to32 y) (SignExt16to32 x))
+(Less32 x y) => (SGT y x)
+(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
+
+(Less8U x y) => (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
+(Less16U x y) => (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
+(Less32U x y) => (SGTU y x)
+
+(Leq8 x y) => (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (XORconst [1] (SGT x y))
+(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
+
+(Leq8U x y) => (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (XORconst [1] (SGTU x y))
+
+(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr)
+
+(Addr {sym} base) => (MOVWaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVWaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem)
+
+// zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVWconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore ptr (MOVWconst [0]) mem)
+(Zero [2] ptr mem) =>
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore ptr (MOVWconst [0]) mem)
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] ptr (MOVWconst [0])
+ (MOVHstore [0] ptr (MOVWconst [0]) mem))
+(Zero [4] ptr mem) =>
+ (MOVBstore [3] ptr (MOVWconst [0])
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] ptr (MOVWconst [0])
+ (MOVHstore [2] ptr (MOVWconst [0])
+ (MOVHstore [0] ptr (MOVWconst [0]) mem)))
+(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] ptr (MOVWconst [0])
+ (MOVWstore [0] ptr (MOVWconst [0]) mem))
+(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] ptr (MOVWconst [0])
+ (MOVWstore [4] ptr (MOVWconst [0])
+ (MOVWstore [0] ptr (MOVWconst [0]) mem)))
+(Zero [16] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [12] ptr (MOVWconst [0])
+ (MOVWstore [8] ptr (MOVWconst [0])
+ (MOVWstore [4] ptr (MOVWconst [0])
+ (MOVWstore [0] ptr (MOVWconst [0]) mem))))
+
+// large or unaligned zeroing uses a loop
+(Zero [s] {t} ptr mem)
+ && (s > 16 || t.Alignment()%4 != 0) =>
+ (LoweredZero [int32(t.Alignment())]
+ ptr
+ (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))])
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore dst (MOVHUload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] dst (MOVHUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem))
+(Move [4] dst src mem) =>
+ (MOVBstore [3] dst (MOVBUload [3] src mem)
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))))
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem)))
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] dst (MOVHload [6] src mem)
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))))
+(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem)))
+(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem)))
+(Move [16] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [12] dst (MOVWload [12] src mem)
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))))
+
+
+// large or unaligned move uses a loop
+(Move [s] {t} dst src mem)
+ && (s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) =>
+ (LoweredMove [int32(t.Alignment())]
+ dst
+ src
+ (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+
+// atomic intrinsics
+(AtomicLoad(8|32) ...) => (LoweredAtomicLoad(8|32) ...)
+(AtomicLoadPtr ...) => (LoweredAtomicLoad32 ...)
+
+(AtomicStore(8|32) ...) => (LoweredAtomicStore(8|32) ...)
+(AtomicStorePtrNoWB ...) => (LoweredAtomicStore32 ...)
+
+(AtomicExchange32 ...) => (LoweredAtomicExchange ...)
+(AtomicAdd32 ...) => (LoweredAtomicAdd ...)
+
+(AtomicCompareAndSwap32 ...) => (LoweredAtomicCas ...)
+
+// AtomicOr8(ptr,val) => LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
+(AtomicOr8 ptr val mem) && !config.BigEndian =>
+ (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3] ptr))) mem)
+
+// AtomicAnd8(ptr,val) => LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
+(AtomicAnd8 ptr val mem) && !config.BigEndian =>
+ (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3] ptr)))
+ (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
+ (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3] ptr))))) mem)
+
+// AtomicOr8(ptr,val) => LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
+(AtomicOr8 ptr val mem) && config.BigEndian =>
+ (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3]
+ (XORconst <typ.UInt32> [3] ptr)))) mem)
+
+// AtomicAnd8(ptr,val) => LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
+(AtomicAnd8 ptr val mem) && config.BigEndian =>
+ (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3]
+ (XORconst <typ.UInt32> [3] ptr))))
+ (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
+ (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3]
+ (XORconst <typ.UInt32> [3] ptr)))))) mem)
+
+(AtomicAnd32 ...) => (LoweredAtomicAnd ...)
+(AtomicOr32 ...) => (LoweredAtomicOr ...)
+
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (SGTU ptr (MOVWconst [0]))
+(IsInBounds idx len) => (SGTU len idx)
+(IsSliceInBounds idx len) => (XORconst [1] (SGTU idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+(If cond yes no) => (NE cond yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
+
+// Optimizations
+
+// Absorb boolean tests into block
+(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
+(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
+(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTzero _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUzero _)) yes no) => (EQ cmp yes no)
+(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTzero _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUzero _)) yes no) => (NE cmp yes no)
+(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
+(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
+(NE (SGTUzero x) yes no) => (NE x yes no)
+(EQ (SGTUzero x) yes no) => (EQ x yes no)
+(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
+(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
+(NE (SGTzero x) yes no) => (GTZ x yes no)
+(EQ (SGTzero x) yes no) => (LEZ x yes no)
+
+// fold offset into address
+(ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off1+off2] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBload [off1+off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBUload [off1+off2] {sym} ptr mem)
+(MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHload [off1+off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHUload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWload [off1+off2] {sym} ptr mem)
+(MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVFload [off1+off2] {sym} ptr mem)
+(MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVDload [off1+off2] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVFstore [off1+off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVDstore [off1+off2] {sym} ptr val mem)
+
+(MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBstorezero [off1+off2] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHstorezero [off1+off2] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWstorezero [off1+off2] {sym} ptr mem)
+
+(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x)
+(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBUreg x)
+(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHreg x)
+(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHUreg x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+
+// store zero
+(MOVBstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVWreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVWreg x)
+
+// sign extended loads
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
+(MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+(MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
+(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+
+// fold extensions and ANDs together
+(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x)
+(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&0xffff] x)
+(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 => (ANDconst [c&0x7f] x)
+(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 => (ANDconst [c&0x7fff] x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVWnop doesn't emit instruction, only for ensuring the type.
+(MOVWreg x) && x.Uses == 1 => (MOVWnop x)
+
+// fold constant into arithmatic ops
+(ADD x (MOVWconst [c])) => (ADDconst [c] x)
+(SUB x (MOVWconst [c])) => (SUBconst [c] x)
+(AND x (MOVWconst [c])) => (ANDconst [c] x)
+(OR x (MOVWconst [c])) => (ORconst [c] x)
+(XOR x (MOVWconst [c])) => (XORconst [c] x)
+(NOR x (MOVWconst [c])) => (NORconst [c] x)
+
+(SLL x (MOVWconst [c])) => (SLLconst x [c&31])
+(SRL x (MOVWconst [c])) => (SRLconst x [c&31])
+(SRA x (MOVWconst [c])) => (SRAconst x [c&31])
+
+(SGT (MOVWconst [c]) x) => (SGTconst [c] x)
+(SGTU (MOVWconst [c]) x) => (SGTUconst [c] x)
+(SGT x (MOVWconst [0])) => (SGTzero x)
+(SGTU x (MOVWconst [0])) => (SGTUzero x)
+
+// mul with constant
+(Select1 (MULTU (MOVWconst [0]) _ )) => (MOVWconst [0])
+(Select0 (MULTU (MOVWconst [0]) _ )) => (MOVWconst [0])
+(Select1 (MULTU (MOVWconst [1]) x )) => x
+(Select0 (MULTU (MOVWconst [1]) _ )) => (MOVWconst [0])
+(Select1 (MULTU (MOVWconst [-1]) x )) => (NEG <x.Type> x)
+(Select0 (MULTU (MOVWconst [-1]) x )) => (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
+(Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
+(Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SRLconst [int32(32-log2uint32(int64(c)))] x)
+
+(MUL (MOVWconst [0]) _ ) => (MOVWconst [0])
+(MUL (MOVWconst [1]) x ) => x
+(MUL (MOVWconst [-1]) x ) => (NEG x)
+(MUL (MOVWconst [c]) x ) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
+
+// generic simplifications
+(ADD x (NEG y)) => (SUB x y)
+(SUB x x) => (MOVWconst [0])
+(SUB (MOVWconst [0]) x) => (NEG x)
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVWconst [0])
+
+// miscellaneous patterns generated by dec64
+(AND (SGTUconst [1] x) (SGTUconst [1] y)) => (SGTUconst [1] (OR <x.Type> x y))
+(OR (SGTUzero x) (SGTUzero y)) => (SGTUzero (OR <x.Type> x y))
+
+// remove redundant *const ops
+(ADDconst [0] x) => x
+(SUBconst [0] x) => x
+(ANDconst [0] _) => (MOVWconst [0])
+(ANDconst [-1] x) => x
+(ORconst [0] x) => x
+(ORconst [-1] _) => (MOVWconst [-1])
+(XORconst [0] x) => x
+(XORconst [-1] x) => (NORconst [0] x)
+
+// generic constant folding
+(ADDconst [c] (MOVWconst [d])) => (MOVWconst [int32(c+d)])
+(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
+(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
+(SUBconst [c] (MOVWconst [d])) => (MOVWconst [d-c])
+(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
+(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
+(SLLconst [c] (MOVWconst [d])) => (MOVWconst [d<<uint32(c)])
+(SRLconst [c] (MOVWconst [d])) => (MOVWconst [int32(uint32(d)>>uint32(c))])
+(SRAconst [c] (MOVWconst [d])) => (MOVWconst [d>>uint32(c)])
+(MUL (MOVWconst [c]) (MOVWconst [d])) => (MOVWconst [c*d])
+(Select1 (MULTU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32(uint32(c)*uint32(d))])
+(Select0 (MULTU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32((int64(uint32(c))*int64(uint32(d)))>>32)])
+(Select1 (DIV (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [c/d])
+(Select1 (DIVU (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)/uint32(d))])
+(Select0 (DIV (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [c%d])
+(Select0 (DIVU (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)%uint32(d))])
+(ANDconst [c] (MOVWconst [d])) => (MOVWconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (MOVWconst [d])) => (MOVWconst [c|d])
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (MOVWconst [d])) => (MOVWconst [c^d])
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(NORconst [c] (MOVWconst [d])) => (MOVWconst [^(c|d)])
+(NEG (MOVWconst [c])) => (MOVWconst [-c])
+(MOVBreg (MOVWconst [c])) => (MOVWconst [int32(int8(c))])
+(MOVBUreg (MOVWconst [c])) => (MOVWconst [int32(uint8(c))])
+(MOVHreg (MOVWconst [c])) => (MOVWconst [int32(int16(c))])
+(MOVHUreg (MOVWconst [c])) => (MOVWconst [int32(uint16(c))])
+(MOVWreg (MOVWconst [c])) => (MOVWconst [c])
+
+// constant comparisons
+(SGTconst [c] (MOVWconst [d])) && c > d => (MOVWconst [1])
+(SGTconst [c] (MOVWconst [d])) && c <= d => (MOVWconst [0])
+(SGTUconst [c] (MOVWconst [d])) && uint32(c) > uint32(d) => (MOVWconst [1])
+(SGTUconst [c] (MOVWconst [d])) && uint32(c) <= uint32(d) => (MOVWconst [0])
+(SGTzero (MOVWconst [d])) && d > 0 => (MOVWconst [1])
+(SGTzero (MOVWconst [d])) && d <= 0 => (MOVWconst [0])
+(SGTUzero (MOVWconst [d])) && d != 0 => (MOVWconst [1])
+(SGTUzero (MOVWconst [d])) && d == 0 => (MOVWconst [0])
+
+// other known comparisons
+(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVWconst [1])
+(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVWconst [0])
+(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVWconst [1])
+(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVWconst [0])
+(SGTUconst [c] (MOVBUreg _)) && 0xff < uint32(c) => (MOVWconst [1])
+(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVWconst [1])
+(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVWconst [0])
+(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVWconst [1])
+(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVWconst [0])
+(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint32(c) => (MOVWconst [1])
+(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVWconst [1])
+(SGTUconst [c] (ANDconst [m] _)) && uint32(m) < uint32(c) => (MOVWconst [1])
+(SGTconst [c] (SRLconst _ [d])) && 0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) => (MOVWconst [1])
+(SGTUconst [c] (SRLconst _ [d])) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) => (MOVWconst [1])
+
+// absorb constants into branches
+(EQ (MOVWconst [0]) yes no) => (First yes no)
+(EQ (MOVWconst [c]) yes no) && c != 0 => (First no yes)
+(NE (MOVWconst [0]) yes no) => (First no yes)
+(NE (MOVWconst [c]) yes no) && c != 0 => (First yes no)
+(LTZ (MOVWconst [c]) yes no) && c < 0 => (First yes no)
+(LTZ (MOVWconst [c]) yes no) && c >= 0 => (First no yes)
+(LEZ (MOVWconst [c]) yes no) && c <= 0 => (First yes no)
+(LEZ (MOVWconst [c]) yes no) && c > 0 => (First no yes)
+(GTZ (MOVWconst [c]) yes no) && c > 0 => (First yes no)
+(GTZ (MOVWconst [c]) yes no) && c <= 0 => (First no yes)
+(GEZ (MOVWconst [c]) yes no) && c >= 0 => (First yes no)
+(GEZ (MOVWconst [c]) yes no) && c < 0 => (First no yes)
+
+// conditional move
+(CMOVZ _ f (MOVWconst [0])) => f
+(CMOVZ a _ (MOVWconst [c])) && c!=0 => a
+(CMOVZzero _ (MOVWconst [0])) => (MOVWconst [0])
+(CMOVZzero a (MOVWconst [c])) && c!=0 => a
+(CMOVZ a (MOVWconst [0]) c) => (CMOVZzero a c)
+
+// atomic
+(LoweredAtomicStore32 ptr (MOVWconst [0]) mem) => (LoweredAtomicStorezero ptr mem)
+(LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(int64(c)) => (LoweredAtomicAddconst [c] ptr mem)
+
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
new file mode 100644
index 0000000..088c9b1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
@@ -0,0 +1,678 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|64|32|16|8) ...) => (ADDV ...)
+(Add(32|64)F ...) => (ADD(F|D) ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUBV ...)
+(Sub(32|64)F ...) => (SUB(F|D) ...)
+
+(Mul(64|32|16|8) x y) => (Select1 (MULVU x y))
+(Mul(32|64)F ...) => (MUL(F|D) ...)
+(Mul64uhilo ...) => (MULVU ...)
+(Select0 (Mul64uover x y)) => (Select1 <typ.UInt64> (MULVU x y))
+(Select1 (Mul64uover x y)) => (SGTU <typ.Bool> (Select0 <typ.UInt64> (MULVU x y)) (MOVVconst <typ.UInt64> [0]))
+
+(Hmul64 x y) => (Select0 (MULV x y))
+(Hmul64u x y) => (Select0 (MULVU x y))
+(Hmul32 x y) => (SRAVconst (Select1 <typ.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
+(Hmul32u x y) => (SRLVconst (Select1 <typ.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
+
+(Div64 x y) => (Select1 (DIVV x y))
+(Div64u x y) => (Select1 (DIVVU x y))
+(Div32 x y) => (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+(Div32u x y) => (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Div16 x y) => (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+(Div16u x y) => (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Div8 x y) => (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+(Div8u x y) => (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Div(32|64)F ...) => (DIV(F|D) ...)
+
+(Mod64 x y) => (Select0 (DIVV x y))
+(Mod64u x y) => (Select0 (DIVVU x y))
+(Mod32 x y) => (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+(Mod32u x y) => (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Mod16 x y) => (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+(Mod16u x y) => (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+(Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+
+// (x + y) / 2 with x>=y => (x - y) / 2 + y
+(Avg64u <t> x y) => (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
+
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
+
+// shifts
+// hardware instruction uses only the low 6 bits of the shift
+// we compare to 64 to ensure Go semantics for large shifts
+(Lsh64x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh64x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh64x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh64x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh32x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh32x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh32x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh32x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh16x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh16x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh16x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh16x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh8x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh8x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh8x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh8x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Rsh64Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> x y))
+(Rsh64Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
+(Rsh64Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
+(Rsh64Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
+
+(Rsh32Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
+(Rsh32Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Rsh32Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
+(Rsh32Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
+
+(Rsh16Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
+(Rsh16Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
+(Rsh16Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Rsh16Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
+
+(Rsh8Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
+(Rsh8Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
+(Rsh8Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
+(Rsh8Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+
+(Rsh64x64 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh64x32 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh64x16 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh64x8 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh32x64 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh32x32 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh32x16 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh32x8 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh16x64 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh16x32 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh16x16 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh16x8 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh8x64 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh8x32 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh8x16 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh8x8 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+// rotates
+(RotateLeft8 <t> x (MOVVconst [c])) => (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
+(RotateLeft16 <t> x (MOVVconst [c])) => (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
+(RotateLeft32 <t> x (MOVVconst [c])) => (Or32 (Lsh32x64 <t> x (MOVVconst [c&31])) (Rsh32Ux64 <t> x (MOVVconst [-c&31])))
+(RotateLeft64 <t> x (MOVVconst [c])) => (Or64 (Lsh64x64 <t> x (MOVVconst [c&63])) (Rsh64Ux64 <t> x (MOVVconst [-c&63])))
+
+// unary ops
+(Neg(64|32|16|8) ...) => (NEGV ...)
+(Neg(32|64)F ...) => (NEG(F|D) ...)
+
+(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x)
+
+(Sqrt ...) => (SQRTD ...)
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORconst [1] x)
+
+// constants
+(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)])
+(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
+(ConstNil) => (MOVVconst [0])
+(ConstBool [b]) => (MOVVconst [int64(b2i(b))])
+
+(Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+(ZeroExt8to64 ...) => (MOVBUreg ...)
+(ZeroExt16to64 ...) => (MOVHUreg ...)
+(ZeroExt32to64 ...) => (MOVWUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+(SignExt8to64 ...) => (MOVBreg ...)
+(SignExt16to64 ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+// float <=> int conversion
+(Cvt32to32F ...) => (MOVWF ...)
+(Cvt32to64F ...) => (MOVWD ...)
+(Cvt64to32F ...) => (MOVVF ...)
+(Cvt64to64F ...) => (MOVVD ...)
+(Cvt32Fto32 ...) => (TRUNCFW ...)
+(Cvt64Fto32 ...) => (TRUNCDW ...)
+(Cvt32Fto64 ...) => (TRUNCFV ...)
+(Cvt64Fto64 ...) => (TRUNCDV ...)
+(Cvt32Fto64F ...) => (MOVFD ...)
+(Cvt64Fto32F ...) => (MOVDF ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (Copy ...)
+
+// comparisons
+(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y))
+(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y))
+(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
+
+(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
+(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
+(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
+(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0]))
+(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0]))
+(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
+
+(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x))
+(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x))
+(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x))
+(Less64 x y) => (SGT y x)
+(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
+
+(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
+(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
+(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
+(Less64U x y) => (SGTU y x)
+
+(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
+(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
+(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
+(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y))
+(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
+
+(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y))
+
+(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVVaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDVconst [off] ptr)
+
+(Addr {sym} base) => (MOVVaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVVaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVVstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem)
+
+// zeroing
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore ptr (MOVVconst [0]) mem)
+(Zero [2] ptr mem) =>
+ (MOVBstore [1] ptr (MOVVconst [0])
+ (MOVBstore [0] ptr (MOVVconst [0]) mem))
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore ptr (MOVVconst [0]) mem)
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] ptr (MOVVconst [0])
+ (MOVHstore [0] ptr (MOVVconst [0]) mem))
+(Zero [4] ptr mem) =>
+ (MOVBstore [3] ptr (MOVVconst [0])
+ (MOVBstore [2] ptr (MOVVconst [0])
+ (MOVBstore [1] ptr (MOVVconst [0])
+ (MOVBstore [0] ptr (MOVVconst [0]) mem))))
+(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore ptr (MOVVconst [0]) mem)
+(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] ptr (MOVVconst [0])
+ (MOVWstore [0] ptr (MOVVconst [0]) mem))
+(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] ptr (MOVVconst [0])
+ (MOVHstore [4] ptr (MOVVconst [0])
+ (MOVHstore [2] ptr (MOVVconst [0])
+ (MOVHstore [0] ptr (MOVVconst [0]) mem))))
+
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVVconst [0])
+ (MOVBstore [1] ptr (MOVVconst [0])
+ (MOVBstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] ptr (MOVVconst [0])
+ (MOVHstore [2] ptr (MOVVconst [0])
+ (MOVHstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] ptr (MOVVconst [0])
+ (MOVWstore [4] ptr (MOVVconst [0])
+ (MOVWstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [8] ptr (MOVVconst [0])
+ (MOVVstore [0] ptr (MOVVconst [0]) mem))
+(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [16] ptr (MOVVconst [0])
+ (MOVVstore [8] ptr (MOVVconst [0])
+ (MOVVstore [0] ptr (MOVVconst [0]) mem)))
+
+// medium zeroing uses a duff device
+// 8, and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] {t} ptr mem)
+ && s%8 == 0 && s > 24 && s <= 8*128
+ && t.Alignment()%8 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [8 * (128 - s/8)] ptr mem)
+
+// large or unaligned zeroing uses a loop
+(Zero [s] {t} ptr mem)
+ && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 =>
+ (LoweredZero [t.Alignment()]
+ ptr
+ (ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)])
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore dst (MOVHload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))
+(Move [4] dst src mem) =>
+ (MOVBstore [3] dst (MOVBload [3] src mem)
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))))
+(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore dst (MOVVload src mem) mem)
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] dst (MOVHload [6] src mem)
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))))
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem)))
+(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem)))
+(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem)))
+(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [8] dst (MOVVload [8] src mem)
+ (MOVVstore dst (MOVVload src mem) mem))
+(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [16] dst (MOVVload [16] src mem)
+ (MOVVstore [8] dst (MOVVload [8] src mem)
+ (MOVVstore dst (MOVVload src mem) mem)))
+
+// medium move uses a duff device
+(Move [s] {t} dst src mem)
+ && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+// 16 and 128 are magic constants. 16 is the number of bytes to encode:
+// MOVV (R1), R23
+// ADDV $8, R1
+// MOVV R23, (R2)
+// ADDV $8, R2
+// and 128 is the number of such blocks. See runtime/duff_mips64.s:duffcopy.
+
+// large or unaligned move uses a loop
+(Move [s] {t} dst src mem)
+ && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 =>
+ (LoweredMove [t.Alignment()]
+ dst
+ src
+ (ADDVconst <src.Type> src [s-moveSize(t.Alignment(), config)])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+
+// atomic intrinsics
+(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...)
+(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
+
+(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...)
+(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
+
+(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
+
+(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
+
+(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...)
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (SGTU ptr (MOVVconst [0]))
+(IsInBounds idx len) => (SGTU len idx)
+(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+(If cond yes no) => (NE cond yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// Optimizations
+
+// Absorb boolean tests into block
+(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
+(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
+(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
+(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
+(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
+(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
+(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no)
+(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no)
+(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
+(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
+(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
+(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
+
+// fold offset into address
+(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBload [off1+int32(off2)] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} ptr mem)
+(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVload [off1+int32(off2)] {sym} ptr mem)
+(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVFload [off1+int32(off2)] {sym} ptr mem)
+(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
+
+(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+
+// store zero
+(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVVstorezero [off] {sym} ptr mem)
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVVreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVHload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVWload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVVreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVHreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVWreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVVnop doesn't emit instruction, only for ensuring the type.
+(MOVVreg x) && x.Uses == 1 => (MOVVnop x)
+
+// fold constant into arithmatic ops
+(ADDV x (MOVVconst [c])) && is32Bit(c) => (ADDVconst [c] x)
+(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
+(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
+(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
+(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x)
+(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x)
+
+(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
+(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
+(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63])
+(SLLV x (MOVVconst [c])) => (SLLVconst x [c])
+(SRLV x (MOVVconst [c])) => (SRLVconst x [c])
+(SRAV x (MOVVconst [c])) => (SRAVconst x [c])
+
+(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x)
+(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x)
+
+// mul by constant
+(Select1 (MULVU x (MOVVconst [-1]))) => (NEGV x)
+(Select1 (MULVU _ (MOVVconst [0]))) => (MOVVconst [0])
+(Select1 (MULVU x (MOVVconst [1]))) => x
+(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SLLVconst [log64(c)] x)
+
+// div by constant
+(Select1 (DIVVU x (MOVVconst [1]))) => x
+(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SRLVconst [log64(c)] x)
+(Select0 (DIVVU _ (MOVVconst [1]))) => (MOVVconst [0]) // mod
+(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (ANDconst [c-1] x) // mod
+
+// generic simplifications
+(ADDV x (NEGV y)) => (SUBV x y)
+(SUBV x x) => (MOVVconst [0])
+(SUBV (MOVVconst [0]) x) => (NEGV x)
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVVconst [0])
+
+// remove redundant *const ops
+(ADDVconst [0] x) => x
+(SUBVconst [0] x) => x
+(ANDconst [0] _) => (MOVVconst [0])
+(ANDconst [-1] x) => x
+(ORconst [0] x) => x
+(ORconst [-1] _) => (MOVVconst [-1])
+(XORconst [0] x) => x
+(XORconst [-1] x) => (NORconst [0] x)
+
+// generic constant folding
+(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d])
+(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x)
+(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x)
+(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c])
+(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x)
+(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x)
+(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d<<uint64(c)])
+(SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))])
+(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)])
+(Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c*d])
+(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c/d])
+(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))])
+(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c%d]) // mod
+(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod
+(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d])
+(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x)
+(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d])
+(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x)
+(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)])
+(NEGV (MOVVconst [c])) => (MOVVconst [-c])
+(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))])
+(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))])
+(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))])
+(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))])
+(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))])
+(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))])
+(MOVVreg (MOVVconst [c])) => (MOVVconst [c])
+(LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem)
+(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem)
+(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem)
+
+// constant comparisons
+(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1])
+(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0])
+(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1])
+(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0])
+
+// other known comparisons
+(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1])
+(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0])
+(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1])
+(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0])
+(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1])
+(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0])
+(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1])
+(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0])
+(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0])
+(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1])
+(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
+(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
+
+// absorb constants into branches
+(EQ (MOVVconst [0]) yes no) => (First yes no)
+(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
+(NE (MOVVconst [0]) yes no) => (First no yes)
+(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no)
+(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no)
+(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes)
+(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no)
+(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes)
+(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no)
+(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes)
+(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
+(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes)
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go
new file mode 100644
index 0000000..e1e3933
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go
@@ -0,0 +1,482 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R23).
+
+// Suffixes encode the bit width of various instructions.
+// V (vlong) = 64 bit
+// WU (word) = 32 bit unsigned
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// F (float) = 32 bit float
+// D (double) = 64 bit float
+
+// Note: registers not used in regalloc are not included in this list,
+// so that regmask stays within int64
+// Be careful when hand coding regmasks.
+var regNamesMIPS64 = []string{
+ "R0", // constant 0
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18",
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ // R23 = REGTMP not used in regalloc
+ "R24",
+ "R25",
+ // R26 reserved by kernel
+ // R27 reserved by kernel
+ // R28 = REGSB not used in regalloc
+ "SP", // aka R29
+ "g", // aka R30
+ "R31", // aka REGLINK
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28",
+ "F29",
+ "F30",
+ "F31",
+
+ "HI", // high bits of multiplication
+ "LO", // low bits of multiplication
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesMIPS64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesMIPS64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
+ lo = buildReg("LO")
+ hi = buildReg("HI")
+ callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ r4 = buildReg("R4")
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp2hilo = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{hi, lo}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
+ gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ //fp1flags = regInfo{inputs: []regMask{fp}}
+ //fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ //gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ )
+ ops := []opData{
+ // binary ops
+ {name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1
+ {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops.
+ {name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1
+ {name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt
+ {name: "MULV", argLength: 2, reg: gp2hilo, asm: "MULV", commutative: true, typ: "(Int64,Int64)"}, // arg0 * arg1, signed, results hi,lo
+ {name: "MULVU", argLength: 2, reg: gp2hilo, asm: "MULVU", commutative: true, typ: "(UInt64,UInt64)"}, // arg0 * arg1, unsigned, results hi,lo
+ {name: "DIVV", argLength: 2, reg: gp2hilo, asm: "DIVV", typ: "(Int64,Int64)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+ {name: "DIVVU", argLength: 2, reg: gp2hilo, asm: "DIVVU", typ: "(UInt64,UInt64)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+
+ {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1
+ {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1
+ {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1
+ {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1
+ {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1
+ {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1
+ {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1
+ {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt64"}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", typ: "UInt64"}, // arg0 ^ auxInt
+ {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1)
+ {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int64"}, // ^(arg0 | auxInt)
+
+ {name: "NEGV", argLength: 1, reg: gp11}, // -arg0
+ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
+ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64
+ {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64
+
+ // shifts
+ {name: "SLLV", argLength: 2, reg: gp21, asm: "SLLV"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLLVconst", argLength: 1, reg: gp11, asm: "SLLV", aux: "Int64"}, // arg0 << auxInt
+ {name: "SRLV", argLength: 2, reg: gp21, asm: "SRLV"}, // arg0 >> arg1, unsigned, shift amount is mod 64
+ {name: "SRLVconst", argLength: 1, reg: gp11, asm: "SRLV", aux: "Int64"}, // arg0 >> auxInt, unsigned
+ {name: "SRAV", argLength: 2, reg: gp21, asm: "SRAV"}, // arg0 >> arg1, signed, shift amount is mod 64
+ {name: "SRAVconst", argLength: 1, reg: gp11, asm: "SRAV", aux: "Int64"}, // arg0 >> auxInt, signed
+
+ // comparisons
+ {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise
+ {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (signed), 0 otherwise
+ {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise
+ {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise
+
+ {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32
+ {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64
+ {name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32
+ {name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64
+ {name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32
+ {name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64
+
+ // moves
+ {name: "MOVVconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVV", typ: "UInt64", rematerializeable: true}, // auxint
+ {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVVaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVV", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVVload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVV", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVVstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem.
+
+ // conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
+ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
+ {name: "MOVVreg", argLength: 1, reg: gp11, asm: "MOVV"}, // move from arg0
+
+ {name: "MOVVnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32
+ {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64
+ {name: "MOVVF", argLength: 1, reg: fp11, asm: "MOVVF"}, // int64 -> float32
+ {name: "MOVVD", argLength: 1, reg: fp11, asm: "MOVVD"}, // int64 -> float64
+ {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32
+ {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32
+ {name: "TRUNCFV", argLength: 1, reg: fp11, asm: "TRUNCFV"}, // float32 -> int64
+ {name: "TRUNCDV", argLength: 1, reg: fp11, asm: "TRUNCDV"}, // float64 -> int64
+ {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
+ {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
+
+ // function calls
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // duffzero
+ // arg0 = address of memory to zero
+ // arg1 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ // R1 aka mips.REGRT1 changed as side effect
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{gp},
+ clobbers: buildReg("R1 R31"),
+ },
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // returns mem
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1")},
+ clobbers: buildReg("R1 R2 R31"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = address of the last element to zero
+ // arg2 = mem
+ // auxint = alignment
+ // returns mem
+ // SUBV $8, R1
+ // MOVV R0, 8(R1)
+ // ADDV $8, R1
+ // BNE Rarg1, R1, -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), gp},
+ clobbers: buildReg("R1"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ },
+
+ // large or unaligned move
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // auxint = alignment
+ // returns mem
+ // SUBV $8, R1
+ // MOVV 8(R1), Rtmp
+ // MOVV Rtmp, (R2)
+ // ADDV $8, R1
+ // ADDV $8, R2
+ // BNE Rarg2, R1, -4(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1"), gp},
+ clobbers: buildReg("R1 R2"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // atomic loads.
+ // load from arg0. arg1=mem.
+ // returns <value,memory> so they can be properly ordered with other loads.
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true},
+
+ // atomic stores.
+ // store arg1 to arg0. arg2=mem. returns memory.
+ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ // store zero to arg0. arg1=mem. returns memory.
+ {name: "LoweredAtomicStorezero32", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStorezero64", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic exchange.
+ // store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>.
+ // SYNC
+ // LL (Rarg0), Rout
+ // MOVV Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic add.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>.
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDV Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDV Rarg1, Rout
+ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ // *arg0 += auxint. arg1=mem. returns <new content of *arg0, memory>. auxint is 32-bit.
+ {name: "LoweredAtomicAddconst32", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAddconst64", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int64", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // SYNC
+ // MOVV $0, Rout
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVV Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // SYNC
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true
+ {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R22 (mips.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R22")}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary,
+ // but clobbers R31 (LR) because it's a call
+ // and R23 (REGTMP).
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R20"), buildReg("R21")}, clobbers: (callerSave &^ gpg) | buildReg("R31")}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LTZ", controls: 1}, // < 0
+ {name: "LEZ", controls: 1}, // <= 0
+ {name: "GTZ", controls: 1}, // > 0
+ {name: "GEZ", controls: 1}, // >= 0
+ {name: "FPT", controls: 1}, // FP flag is true
+ {name: "FPF", controls: 1}, // FP flag is false
+ }
+
+ archs = append(archs, arch{
+ name: "MIPS64",
+ pkg: "cmd/internal/obj/mips",
+ genfile: "../../mips64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesMIPS64,
+ gpregmask: gp,
+ fpregmask: fp,
+ specialregmask: hi | lo,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R31"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/MIPSOps.go b/src/cmd/compile/internal/ssa/gen/MIPSOps.go
new file mode 100644
index 0000000..75ab99e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/MIPSOps.go
@@ -0,0 +1,439 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - Unused portions of AuxInt are filled by sign-extending the used portion.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R23).
+
+// Suffixes encode the bit width of various instructions.
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// F (float) = 32 bit float
+// D (double) = 64 bit float
+
+// Note: registers not used in regalloc are not included in this list,
+// so that regmask stays within int64
+// Be careful when hand coding regmasks.
+var regNamesMIPS = []string{
+ "R0", // constant 0
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18",
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ //REGTMP
+ "R24",
+ "R25",
+ // R26 reserved by kernel
+ // R27 reserved by kernel
+ "R28",
+ "SP", // aka R29
+ "g", // aka R30
+ "R31", // REGLINK
+
+ // odd FP registers contain high parts of 64-bit FP values
+ "F0",
+ "F2",
+ "F4",
+ "F6",
+ "F8",
+ "F10",
+ "F12",
+ "F14",
+ "F16",
+ "F18",
+ "F20",
+ "F22",
+ "F24",
+ "F26",
+ "F28",
+ "F30",
+
+ "HI", // high bits of multiplication
+ "LO", // low bits of multiplication
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesMIPS) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesMIPS {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30")
+ lo = buildReg("LO")
+ hi = buildReg("HI")
+ callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ r4 = buildReg("R4")
+ r5 = buildReg("R5")
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gp2hilo = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{hi, lo}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
+ gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ )
+ ops := []opData{
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADDU", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADDU", aux: "Int32"}, // arg0 + auxInt
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUBU"}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUBU", aux: "Int32"}, // arg0 - auxInt
+ {name: "MUL", argLength: 2, reg: regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}, clobbers: hi | lo}, asm: "MUL", commutative: true}, // arg0 * arg1
+ {name: "MULT", argLength: 2, reg: gp2hilo, asm: "MUL", commutative: true, typ: "(Int32,Int32)"}, // arg0 * arg1, signed, results hi,lo
+ {name: "MULTU", argLength: 2, reg: gp2hilo, asm: "MULU", commutative: true, typ: "(UInt32,UInt32)"}, // arg0 * arg1, unsigned, results hi,lo
+ {name: "DIV", argLength: 2, reg: gp2hilo, asm: "DIV", typ: "(Int32,Int32)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+ {name: "DIVU", argLength: 2, reg: gp2hilo, asm: "DIVU", typ: "(UInt32,UInt32)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+
+ {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1
+ {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1
+ {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1
+ {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1
+ {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1
+ {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1
+ {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1
+ {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int32"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt32"}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int32", typ: "UInt32"}, // arg0 ^ auxInt
+ {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1)
+ {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int32"}, // ^(arg0 | auxInt)
+
+ {name: "NEG", argLength: 1, reg: gp11}, // -arg0
+ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
+ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64
+ {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64
+
+ // shifts
+ {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 32
+ {name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int32"}, // arg0 << auxInt, shift amount must be 0 through 31 inclusive
+ {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> arg1, unsigned, shift amount is mod 32
+ {name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int32"}, // arg0 >> auxInt, shift amount must be 0 through 31 inclusive
+ {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> arg1, signed, shift amount is mod 32
+ {name: "SRAconst", argLength: 1, reg: gp11, asm: "SRA", aux: "Int32"}, // arg0 >> auxInt, signed, shift amount must be 0 through 31 inclusive
+
+ {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"},
+
+ // comparisons
+ {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise
+ {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int32", typ: "Bool"}, // 1 if auxInt > arg0 (signed), 0 otherwise
+ {name: "SGTzero", argLength: 1, reg: gp11, asm: "SGT", typ: "Bool"}, // 1 if arg0 > 0 (signed), 0 otherwise
+ {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise
+ {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int32", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise
+ {name: "SGTUzero", argLength: 1, reg: gp11, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > 0 (unsigned), 0 otherwise
+
+ {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32
+ {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64
+ {name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32
+ {name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64
+ {name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32
+ {name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64
+
+ // moves
+ {name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", typ: "UInt32", rematerializeable: true}, // auxint
+ {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float32", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVWaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVW", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+
+ // conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0
+
+ {name: "MOVWnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ // conditional move on zero (returns arg1 if arg2 is 0, otherwise arg0)
+ // order of parameters is reversed so we can use resultInArg0 (OpCMOVZ result arg1 arg2-> CMOVZ arg2reg, arg1reg, resultReg)
+ {name: "CMOVZ", argLength: 3, reg: gp31, asm: "CMOVZ", resultInArg0: true},
+ {name: "CMOVZzero", argLength: 2, reg: regInfo{inputs: []regMask{gp, gpg}, outputs: []regMask{gp}}, asm: "CMOVZ", resultInArg0: true},
+
+ {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32
+ {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64
+ {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32
+ {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32
+ {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
+ {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
+
+ // function calls
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // atomic ops
+
+ // load from arg0. arg1=mem.
+ // returns <value,memory> so they can be properly ordered with other loads.
+ // SYNC
+ // MOV(B|W) (Rarg0), Rout
+ // SYNC
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true},
+
+ // store arg1 to arg0. arg2=mem. returns memory.
+ // SYNC
+ // MOV(B|W) Rarg1, (Rarg0)
+ // SYNC
+ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStorezero", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic exchange.
+ // store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>.
+ // SYNC
+ // LL (Rarg0), Rout
+ // MOVW Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ {name: "LoweredAtomicExchange", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic add.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>.
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDU Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDU Rarg1, Rout
+ {name: "LoweredAtomicAdd", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAddconst", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // SYNC
+ // MOVW $0, Rout
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVW Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // SYNC
+ {name: "LoweredAtomicCas", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic and/or.
+ // *arg0 &= (|=) arg1. arg2=mem. returns memory.
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // AND Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ {name: "LoweredAtomicAnd", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = address of the last element to zero
+ // arg2 = mem
+ // auxint = alignment
+ // returns mem
+ // SUBU $4, R1
+ // MOVW R0, 4(R1)
+ // ADDU $4, R1
+ // BNE Rarg1, R1, -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int32",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), gp},
+ clobbers: buildReg("R1"),
+ },
+ faultOnNilArg0: true,
+ },
+
+ // large or unaligned move
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // auxint = alignment
+ // returns mem
+ // SUBU $4, R1
+ // MOVW 4(R1), Rtmp
+ // MOVW Rtmp, (R2)
+ // ADDU $4, R1
+ // ADDU $4, R2
+ // BNE Rarg2, R1, -4(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int32",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1"), gp},
+ clobbers: buildReg("R1 R2"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true
+ {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R22 (mips.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R22")}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary,
+ // but clobbers R31 (LR) because it's a call
+ // and R23 (REGTMP).
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R20"), buildReg("R21")}, clobbers: (callerSave &^ gpg) | buildReg("R31")}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // Extend ops are the same as Bounds ops except the indexes are 64-bit.
+ {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r3, r4}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r2, r3}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r1, r2}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LTZ", controls: 1}, // < 0
+ {name: "LEZ", controls: 1}, // <= 0
+ {name: "GTZ", controls: 1}, // > 0
+ {name: "GEZ", controls: 1}, // >= 0
+ {name: "FPT", controls: 1}, // FP flag is true
+ {name: "FPF", controls: 1}, // FP flag is false
+ }
+
+ archs = append(archs, arch{
+ name: "MIPS",
+ pkg: "cmd/internal/obj/mips",
+ genfile: "../../mips/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesMIPS,
+ gpregmask: gp,
+ fpregmask: fp,
+ specialregmask: hi | lo,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R31"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
new file mode 100644
index 0000000..c064046
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -0,0 +1,1461 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(Ptr|64|32|16|8) ...) => (ADD ...)
+(Add64F ...) => (FADD ...)
+(Add32F ...) => (FADDS ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
+(Sub32F ...) => (FSUBS ...)
+(Sub64F ...) => (FSUB ...)
+
+// Combine 64 bit integer multiply and adds
+(ADD l:(MULLD x y) z) && objabi.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z)
+
+(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Mod64 x y) && objabi.GOPPC64 >=9 => (MODSD x y)
+(Mod64 x y) && objabi.GOPPC64 <=8 => (SUB x (MULLD y (DIVD x y)))
+(Mod64u x y) && objabi.GOPPC64 >= 9 => (MODUD x y)
+(Mod64u x y) && objabi.GOPPC64 <= 8 => (SUB x (MULLD y (DIVDU x y)))
+(Mod32 x y) && objabi.GOPPC64 >= 9 => (MODSW x y)
+(Mod32 x y) && objabi.GOPPC64 <= 8 => (SUB x (MULLW y (DIVW x y)))
+(Mod32u x y) && objabi.GOPPC64 >= 9 => (MODUW x y)
+(Mod32u x y) && objabi.GOPPC64 <= 8 => (SUB x (MULLW y (DIVWU x y)))
+
+// (x + y) / 2 with x>=y => (x - y) / 2 + y
+(Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
+
+(Add64carry ...) => (LoweredAdd64Carry ...)
+(Mul64 ...) => (MULLD ...)
+(Mul(32|16|8) ...) => (MULLW ...)
+(Mul64uhilo ...) => (LoweredMuluhilo ...)
+
+(Div64 [false] x y) => (DIVD x y)
+(Div64u ...) => (DIVDU ...)
+(Div32 [false] x y) => (DIVW x y)
+(Div32u ...) => (DIVWU ...)
+(Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Hmul(64|64u|32|32u) ...) => (MULH(D|DU|W|WU) ...)
+
+(Mul32F ...) => (FMULS ...)
+(Mul64F ...) => (FMUL ...)
+
+(Div32F ...) => (FDIVS ...)
+(Div64F ...) => (FDIV ...)
+
+// Lowering float <=> int
+(Cvt32to32F x) => (FCFIDS (MTVSRD (SignExt32to64 x)))
+(Cvt32to64F x) => (FCFID (MTVSRD (SignExt32to64 x)))
+(Cvt64to32F x) => (FCFIDS (MTVSRD x))
+(Cvt64to64F x) => (FCFID (MTVSRD x))
+
+(Cvt32Fto32 x) => (MFVSRD (FCTIWZ x))
+(Cvt32Fto64 x) => (MFVSRD (FCTIDZ x))
+(Cvt64Fto32 x) => (MFVSRD (FCTIWZ x))
+(Cvt64Fto64 x) => (MFVSRD (FCTIDZ x))
+
+(Cvt32Fto64F ...) => (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64
+(Cvt64Fto32F ...) => (FRSP ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
+
+(Sqrt ...) => (FSQRT ...)
+(Floor ...) => (FFLOOR ...)
+(Ceil ...) => (FCEIL ...)
+(Trunc ...) => (FTRUNC ...)
+(Round ...) => (FROUND ...)
+(Copysign x y) => (FCPSGN y x)
+(Abs ...) => (FABS ...)
+(FMA ...) => (FMADD ...)
+
+// Lowering extension
+// Note: we always extend to 64 bits even though some ops don't need that many result bits.
+(SignExt8to(16|32|64) ...) => (MOVBreg ...)
+(SignExt16to(32|64) ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
+(ZeroExt16to(32|64) ...) => (MOVHZreg ...)
+(ZeroExt32to64 ...) => (MOVWZreg ...)
+
+(Trunc(16|32|64)to8 <t> x) && isSigned(t) => (MOVBreg x)
+(Trunc(16|32|64)to8 x) => (MOVBZreg x)
+(Trunc(32|64)to16 <t> x) && isSigned(t) => (MOVHreg x)
+(Trunc(32|64)to16 x) => (MOVHZreg x)
+(Trunc64to32 <t> x) && isSigned(t) => (MOVWreg x)
+(Trunc64to32 x) => (MOVWZreg x)
+
+// Lowering constants
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
+(Const(32|64)F ...) => (FMOV(S|D)const ...)
+(ConstNil) => (MOVDconst [0])
+(ConstBool [b]) => (MOVDconst [b2i(b)])
+
+// Constant folding
+(FABS (FMOVDconst [x])) => (FMOVDconst [math.Abs(x)])
+(FSQRT (FMOVDconst [x])) && x >= 0 => (FMOVDconst [math.Sqrt(x)])
+(FFLOOR (FMOVDconst [x])) => (FMOVDconst [math.Floor(x)])
+(FCEIL (FMOVDconst [x])) => (FMOVDconst [math.Ceil(x)])
+(FTRUNC (FMOVDconst [x])) => (FMOVDconst [math.Trunc(x)])
+
+// Rotates
+(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+(RotateLeft32 x (MOVDconst [c])) => (ROTLWconst [c&31] x)
+(RotateLeft64 x (MOVDconst [c])) => (ROTLconst [c&63] x)
+
+// Rotate generation with const shift
+(ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
+( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
+(XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
+
+(ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
+( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
+(XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
+
+// Rotate generation with non-const shift
+// these match patterns from math/bits/RotateLeft[32|64], but there could be others
+(ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+(ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+(XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+(XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+
+
+(ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+(ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+(XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+(XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+
+
+// Lowering rotates
+(RotateLeft32 x y) => (ROTLW x y)
+(RotateLeft64 x y) => (ROTL x y)
+
+// Constant rotate generation
+(ROTLW x (MOVDconst [c])) => (ROTLWconst x [c&31])
+(ROTL x (MOVDconst [c])) => (ROTLconst x [c&63])
+
+// Combine rotate and mask operations
+(ANDconst [m] (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+(AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+(ANDconst [m] (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+(AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+
+// Note, any rotated word bitmask is still a valid word bitmask.
+(ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+(ROTLWconst [r] (ANDconst [m] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+
+(ANDconst [m] (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
+(ANDconst [m] (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
+(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
+(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
+
+(SRWconst (ANDconst [m] x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
+(SRWconst (ANDconst [m] x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
+(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+
+// Merge shift right + shift left and clear left (e.g for a table lookup)
+(CLRLSLDI [c] (SRWconst [s] x)) && mergePPC64ClrlsldiSrw(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
+(SLDconst [l] (SRWconst [r] x)) && mergePPC64SldiSrw(l,r) != 0 => (RLWINM [mergePPC64SldiSrw(l,r)] x)
+// The following reduction shows up frequently too. e.g b[(x>>14)&0xFF]
+(CLRLSLDI [c] i:(RLWINM [s] x)) && mergePPC64ClrlsldiRlwinm(c,s) != 0 => (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
+
+// large constant shifts
+(Lsh64x64 _ (MOVDconst [c])) && uint64(c) >= 64 => (MOVDconst [0])
+(Rsh64Ux64 _ (MOVDconst [c])) && uint64(c) >= 64 => (MOVDconst [0])
+(Lsh32x64 _ (MOVDconst [c])) && uint64(c) >= 32 => (MOVDconst [0])
+(Rsh32Ux64 _ (MOVDconst [c])) && uint64(c) >= 32 => (MOVDconst [0])
+(Lsh16x64 _ (MOVDconst [c])) && uint64(c) >= 16 => (MOVDconst [0])
+(Rsh16Ux64 _ (MOVDconst [c])) && uint64(c) >= 16 => (MOVDconst [0])
+(Lsh8x64 _ (MOVDconst [c])) && uint64(c) >= 8 => (MOVDconst [0])
+(Rsh8Ux64 _ (MOVDconst [c])) && uint64(c) >= 8 => (MOVDconst [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 => (SRADconst x [63])
+(Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 => (SRAWconst x [63])
+(Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 => (SRAWconst (SignExt16to32 x) [63])
+(Rsh8x64 x (MOVDconst [c])) && uint64(c) >= 8 => (SRAWconst (SignExt8to32 x) [63])
+
+// constant shifts
+(Lsh64x64 x (MOVDconst [c])) && uint64(c) < 64 => (SLDconst x [c])
+(Rsh64x64 x (MOVDconst [c])) && uint64(c) < 64 => (SRADconst x [c])
+(Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 => (SRDconst x [c])
+(Lsh32x64 x (MOVDconst [c])) && uint64(c) < 32 => (SLWconst x [c])
+(Rsh32x64 x (MOVDconst [c])) && uint64(c) < 32 => (SRAWconst x [c])
+(Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 => (SRWconst x [c])
+(Lsh16x64 x (MOVDconst [c])) && uint64(c) < 16 => (SLWconst x [c])
+(Rsh16x64 x (MOVDconst [c])) && uint64(c) < 16 => (SRAWconst (SignExt16to32 x) [c])
+(Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 => (SRWconst (ZeroExt16to32 x) [c])
+(Lsh8x64 x (MOVDconst [c])) && uint64(c) < 8 => (SLWconst x [c])
+(Rsh8x64 x (MOVDconst [c])) && uint64(c) < 8 => (SRAWconst (SignExt8to32 x) [c])
+(Rsh8Ux64 x (MOVDconst [c])) && uint64(c) < 8 => (SRWconst (ZeroExt8to32 x) [c])
+
+(Lsh64x32 x (MOVDconst [c])) && uint32(c) < 64 => (SLDconst x [c&63])
+(Rsh64x32 x (MOVDconst [c])) && uint32(c) < 64 => (SRADconst x [c&63])
+(Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 => (SRDconst x [c&63])
+(Lsh32x32 x (MOVDconst [c])) && uint32(c) < 32 => (SLWconst x [c&31])
+(Rsh32x32 x (MOVDconst [c])) && uint32(c) < 32 => (SRAWconst x [c&31])
+(Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 => (SRWconst x [c&31])
+(Lsh16x32 x (MOVDconst [c])) && uint32(c) < 16 => (SLWconst x [c&31])
+(Rsh16x32 x (MOVDconst [c])) && uint32(c) < 16 => (SRAWconst (SignExt16to32 x) [c&15])
+(Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 => (SRWconst (ZeroExt16to32 x) [c&15])
+(Lsh8x32 x (MOVDconst [c])) && uint32(c) < 8 => (SLWconst x [c&7])
+(Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 => (SRAWconst (SignExt8to32 x) [c&7])
+(Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 => (SRWconst (ZeroExt8to32 x) [c&7])
+
+// Lower bounded shifts first. No need to check shift value.
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVHZreg x) y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVBZreg x) y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVHreg x) y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVBreg x) y)
+
+// non-constant rotates
+// These are subexpressions found in statements that can become rotates
+// In these cases the shift count is known to be < 64 so the more complicated expressions
+// with Mask & Carry is not needed
+(Lsh64x64 x (AND y (MOVDconst [63]))) => (SLD x (ANDconst <typ.Int64> [63] y))
+(Lsh64x64 x (ANDconst <typ.Int64> [63] y)) => (SLD x (ANDconst <typ.Int64> [63] y))
+(Rsh64Ux64 x (AND y (MOVDconst [63]))) => (SRD x (ANDconst <typ.Int64> [63] y))
+(Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) => (SRD x (ANDconst <typ.UInt> [63] y))
+(Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+(Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))) => (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+(Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+(Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63])))) => (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+(Rsh64x64 x (AND y (MOVDconst [63]))) => (SRAD x (ANDconst <typ.Int64> [63] y))
+(Rsh64x64 x (ANDconst <typ.UInt> [63] y)) => (SRAD x (ANDconst <typ.UInt> [63] y))
+(Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+(Rsh64x64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))) => (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+(Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+(Rsh64x64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63])))) => (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+
+(Lsh64x64 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Rsh64x64 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Rsh64Ux64 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+
+(Lsh32x64 x (AND y (MOVDconst [31]))) => (SLW x (ANDconst <typ.Int32> [31] y))
+(Lsh32x64 x (ANDconst <typ.Int32> [31] y)) => (SLW x (ANDconst <typ.Int32> [31] y))
+
+(Rsh32Ux64 x (AND y (MOVDconst [31]))) => (SRW x (ANDconst <typ.Int32> [31] y))
+(Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) => (SRW x (ANDconst <typ.UInt> [31] y))
+(Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+(Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))) => (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+(Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+(Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31])))) => (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+
+(Rsh32x64 x (AND y (MOVDconst [31]))) => (SRAW x (ANDconst <typ.Int32> [31] y))
+(Rsh32x64 x (ANDconst <typ.UInt> [31] y)) => (SRAW x (ANDconst <typ.UInt> [31] y))
+(Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+(Rsh32x64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))) => (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+(Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+(Rsh32x64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31])))) => (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+
+(Rsh32x64 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+(Rsh32Ux64 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+(Lsh32x64 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+
+(Rsh16x64 x y) => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+(Rsh16Ux64 x y) => (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+(Lsh16x64 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+
+(Rsh8x64 x y) => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+(Rsh8Ux64 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+(Lsh8x64 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+
+(Rsh64x32 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Rsh64Ux32 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Lsh64x32 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Rsh32x32 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+(Rsh32Ux32 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+(Lsh32x32 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+
+(Rsh16x32 x y) => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+(Rsh16Ux32 x y) => (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+(Lsh16x32 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+
+(Rsh8x32 x y) => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+(Rsh8Ux32 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+(Lsh8x32 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+
+
+(Rsh64x16 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+(Rsh64Ux16 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+(Lsh64x16 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+
+(Rsh32x16 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+(Rsh32Ux16 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+(Lsh32x16 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+
+(Rsh16x16 x y) => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+(Rsh16Ux16 x y) => (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+(Lsh16x16 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+
+(Rsh8x16 x y) => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+(Rsh8Ux16 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+(Lsh8x16 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+
+
+(Rsh64x8 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+(Rsh64Ux8 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+(Lsh64x8 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+
+(Rsh32x8 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+(Rsh32Ux8 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+(Lsh32x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+
+(Rsh16x8 x y) => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+(Rsh16Ux8 x y) => (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+(Lsh16x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+
+(Rsh8x8 x y) => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+(Rsh8Ux8 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+(Lsh8x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+
+// Cleaning up shift ops
+(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPU (ANDconst [d] y) (MOVDconst [c]))) && c >= d => (ANDconst [d] y)
+(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y))) && c >= d => (ANDconst [d] y)
+(ORN x (MOVDconst [-1])) => x
+
+(S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x)
+(S(RAW|RW|LW) x (MOVDconst [c])) => (S(RAW|RW|LW)const [c&31 | (c>>5&1*31)] x)
+
+(Addr {sym} base) => (MOVDaddr {sym} [0] base)
+(LocalAddr {sym} base _) => (MOVDaddr {sym} base)
+(OffPtr [off] ptr) => (ADD (MOVDconst <typ.Int64> [off]) ptr)
+
+// TODO: optimize these cases?
+(Ctz32NonZero ...) => (Ctz32 ...)
+(Ctz64NonZero ...) => (Ctz64 ...)
+
+(Ctz64 x) && objabi.GOPPC64<=8 => (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
+(Ctz64 x) => (CNTTZD x)
+(Ctz32 x) && objabi.GOPPC64<=8 => (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
+(Ctz32 x) => (CNTTZW (MOVWZreg x))
+(Ctz16 x) => (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
+(Ctz8 x) => (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
+
+(BitLen64 x) => (SUBFCconst [64] (CNTLZD <typ.Int> x))
+(BitLen32 x) => (SUBFCconst [32] (CNTLZW <typ.Int> x))
+
+(PopCount64 ...) => (POPCNTD ...)
+(PopCount32 x) => (POPCNTW (MOVWZreg x))
+(PopCount16 x) => (POPCNTW (MOVHZreg x))
+(PopCount8 x) => (POPCNTB (MOVBZreg x))
+
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
+
+(Neg(64|32|16|8) ...) => (NEG ...)
+(Neg64F ...) => (FNEG ...)
+(Neg32F ...) => (FNEG ...)
+
+(Com(64|32|16|8) x) => (NOR x x)
+
+// Lowering boolean ops
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(Not x) => (XORconst [1] x)
+
+// Use ANDN for AND x NOT y
+(AND x (NOR y y)) => (ANDN x y)
+
+// Lowering comparisons
+(EqB x y) => (ANDconst [1] (EQV x y))
+// Sign extension dependence on operand sign sets up for sign/zero-extension elision later
+(Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) => (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) => (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Eq8 x y) => (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (Equal (CMPW x y))
+(Eq64 x y) => (Equal (CMP x y))
+(Eq32F x y) => (Equal (FCMPU x y))
+(Eq64F x y) => (Equal (FCMPU x y))
+(EqPtr x y) => (Equal (CMP x y))
+
+(NeqB ...) => (XOR ...)
+// Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
+(Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) => (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) => (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Neq8 x y) => (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) => (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) => (NotEqual (CMPW x y))
+(Neq64 x y) => (NotEqual (CMP x y))
+(Neq32F x y) => (NotEqual (FCMPU x y))
+(Neq64F x y) => (NotEqual (FCMPU x y))
+(NeqPtr x y) => (NotEqual (CMP x y))
+
+(Less8 x y) => (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) => (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Less32 x y) => (LessThan (CMPW x y))
+(Less64 x y) => (LessThan (CMP x y))
+(Less32F x y) => (FLessThan (FCMPU x y))
+(Less64F x y) => (FLessThan (FCMPU x y))
+
+(Less8U x y) => (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) => (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) => (LessThan (CMPWU x y))
+(Less64U x y) => (LessThan (CMPU x y))
+
+(Leq8 x y) => (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (LessEqual (CMPW x y))
+(Leq64 x y) => (LessEqual (CMP x y))
+(Leq32F x y) => (FLessEqual (FCMPU x y))
+(Leq64F x y) => (FLessEqual (FCMPU x y))
+
+(Leq8U x y) => (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (LessEqual (CMPWU x y))
+(Leq64U x y) => (LessEqual (CMPU x y))
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) => (EQ cc yes no)
+(If (NotEqual cc) yes no) => (NE cc yes no)
+(If (LessThan cc) yes no) => (LT cc yes no)
+(If (LessEqual cc) yes no) => (LE cc yes no)
+(If (GreaterThan cc) yes no) => (GT cc yes no)
+(If (GreaterEqual cc) yes no) => (GE cc yes no)
+(If (FLessThan cc) yes no) => (FLT cc yes no)
+(If (FLessEqual cc) yes no) => (FLE cc yes no)
+(If (FGreaterThan cc) yes no) => (FGT cc yes no)
+(If (FGreaterEqual cc) yes no) => (FGE cc yes no)
+
+(If cond yes no) => (NE (CMPWconst [0] cond) yes no)
+
+// Absorb boolean tests into block
+(NE (CMPWconst [0] (Equal cc)) yes no) => (EQ cc yes no)
+(NE (CMPWconst [0] (NotEqual cc)) yes no) => (NE cc yes no)
+(NE (CMPWconst [0] (LessThan cc)) yes no) => (LT cc yes no)
+(NE (CMPWconst [0] (LessEqual cc)) yes no) => (LE cc yes no)
+(NE (CMPWconst [0] (GreaterThan cc)) yes no) => (GT cc yes no)
+(NE (CMPWconst [0] (GreaterEqual cc)) yes no) => (GE cc yes no)
+(NE (CMPWconst [0] (FLessThan cc)) yes no) => (FLT cc yes no)
+(NE (CMPWconst [0] (FLessEqual cc)) yes no) => (FLE cc yes no)
+(NE (CMPWconst [0] (FGreaterThan cc)) yes no) => (FGT cc yes no)
+(NE (CMPWconst [0] (FGreaterEqual cc)) yes no) => (FGE cc yes no)
+
+// Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
+(EQ (CMPconst [0] (ANDconst [c] x)) yes no) => (EQ (ANDCCconst [c] x) yes no)
+(NE (CMPconst [0] (ANDconst [c] x)) yes no) => (NE (ANDCCconst [c] x) yes no)
+(EQ (CMPWconst [0] (ANDconst [c] x)) yes no) => (EQ (ANDCCconst [c] x) yes no)
+(NE (CMPWconst [0] (ANDconst [c] x)) yes no) => (NE (ANDCCconst [c] x) yes no)
+
+// absorb flag constants into branches
+(EQ (FlagEQ) yes no) => (First yes no)
+(EQ (FlagLT) yes no) => (First no yes)
+(EQ (FlagGT) yes no) => (First no yes)
+
+(NE (FlagEQ) yes no) => (First no yes)
+(NE (FlagLT) yes no) => (First yes no)
+(NE (FlagGT) yes no) => (First yes no)
+
+(LT (FlagEQ) yes no) => (First no yes)
+(LT (FlagLT) yes no) => (First yes no)
+(LT (FlagGT) yes no) => (First no yes)
+
+(LE (FlagEQ) yes no) => (First yes no)
+(LE (FlagLT) yes no) => (First yes no)
+(LE (FlagGT) yes no) => (First no yes)
+
+(GT (FlagEQ) yes no) => (First no yes)
+(GT (FlagLT) yes no) => (First no yes)
+(GT (FlagGT) yes no) => (First yes no)
+
+(GE (FlagEQ) yes no) => (First yes no)
+(GE (FlagLT) yes no) => (First no yes)
+(GE (FlagGT) yes no) => (First yes no)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+
+// constant comparisons
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) => (FlagLT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) => (FlagGT)
+
+(CMPconst (MOVDconst [x]) [y]) && x==y => (FlagEQ)
+(CMPconst (MOVDconst [x]) [y]) && x<y => (FlagLT)
+(CMPconst (MOVDconst [x]) [y]) && x>y => (FlagGT)
+
+(CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
+
+(CMPUconst (MOVDconst [x]) [y]) && x==y => (FlagEQ)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
+
+// other known comparisons
+//(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagLT)
+//(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagLT)
+//(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) => (FlagLT)
+//(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) => (FlagLT)
+
+// absorb flag constants into boolean values
+(Equal (FlagEQ)) => (MOVDconst [1])
+(Equal (FlagLT)) => (MOVDconst [0])
+(Equal (FlagGT)) => (MOVDconst [0])
+
+(NotEqual (FlagEQ)) => (MOVDconst [0])
+(NotEqual (FlagLT)) => (MOVDconst [1])
+(NotEqual (FlagGT)) => (MOVDconst [1])
+
+(LessThan (FlagEQ)) => (MOVDconst [0])
+(LessThan (FlagLT)) => (MOVDconst [1])
+(LessThan (FlagGT)) => (MOVDconst [0])
+
+(LessEqual (FlagEQ)) => (MOVDconst [1])
+(LessEqual (FlagLT)) => (MOVDconst [1])
+(LessEqual (FlagGT)) => (MOVDconst [0])
+
+(GreaterThan (FlagEQ)) => (MOVDconst [0])
+(GreaterThan (FlagLT)) => (MOVDconst [0])
+(GreaterThan (FlagGT)) => (MOVDconst [1])
+
+(GreaterEqual (FlagEQ)) => (MOVDconst [1])
+(GreaterEqual (FlagLT)) => (MOVDconst [0])
+(GreaterEqual (FlagGT)) => (MOVDconst [1])
+
+// absorb InvertFlags into boolean values
+(Equal (InvertFlags x)) => (Equal x)
+(NotEqual (InvertFlags x)) => (NotEqual x)
+(LessThan (InvertFlags x)) => (GreaterThan x)
+(GreaterThan (InvertFlags x)) => (LessThan x)
+(LessEqual (InvertFlags x)) => (GreaterEqual x)
+(GreaterEqual (InvertFlags x)) => (LessEqual x)
+
+// Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (ANDconst [c] x)) yes no) => ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (ANDconst [c] x)) yes no) => ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ANDCC x y) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ORCC x y) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no)
+
+(CondSelect x y bool) && flagArg(bool) != nil => (ISEL [2] x y bool)
+(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [2] x y (CMPWconst [0] bool))
+
+// Lowering loads
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && isSigned(t) => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) => (MOVWZload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && isSigned(t) => (MOVHload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) => (MOVHZload ptr mem)
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBZload ptr mem)
+(Load <t> ptr mem) && is8BitInt(t) && isSigned(t) => (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
+(Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) => (MOVBZload ptr mem)
+
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is32BitFloat(val.Type) => (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) => x -- type is wrong
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitInt(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+
+// Using Zero instead of LoweredZero allows the
+// target address to be folded where possible.
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstorezero destptr mem)
+(Zero [2] destptr mem) =>
+ (MOVHstorezero destptr mem)
+(Zero [3] destptr mem) =>
+ (MOVBstorezero [2] destptr
+ (MOVHstorezero destptr mem))
+(Zero [4] destptr mem) =>
+ (MOVWstorezero destptr mem)
+(Zero [5] destptr mem) =>
+ (MOVBstorezero [4] destptr
+ (MOVWstorezero destptr mem))
+(Zero [6] destptr mem) =>
+ (MOVHstorezero [4] destptr
+ (MOVWstorezero destptr mem))
+(Zero [7] destptr mem) =>
+ (MOVBstorezero [6] destptr
+ (MOVHstorezero [4] destptr
+ (MOVWstorezero destptr mem)))
+
+// MOVD for store with DS must have offsets that are multiple of 4
+(Zero [8] {t} destptr mem) && t.Alignment()%4 == 0 =>
+ (MOVDstorezero destptr mem)
+(Zero [8] destptr mem) =>
+ (MOVWstorezero [4] destptr
+ (MOVWstorezero [0] destptr mem))
+// Handle these cases only if aligned properly, otherwise use general case below
+(Zero [12] {t} destptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem))
+(Zero [16] {t} destptr mem) && t.Alignment()%4 == 0 =>
+ (MOVDstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem))
+(Zero [24] {t} destptr mem) && t.Alignment()%4 == 0 =>
+ (MOVDstorezero [16] destptr
+ (MOVDstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem)))
+(Zero [32] {t} destptr mem) && t.Alignment()%4 == 0 =>
+ (MOVDstorezero [24] destptr
+ (MOVDstorezero [16] destptr
+ (MOVDstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem))))
+
+// Handle cases not handled above
+// Lowered Short cases do not generate loops, and as a result don't clobber
+// the address registers or flags.
+(Zero [s] ptr mem) && objabi.GOPPC64 <= 8 && s < 64 => (LoweredZeroShort [s] ptr mem)
+(Zero [s] ptr mem) && objabi.GOPPC64 <= 8 => (LoweredZero [s] ptr mem)
+(Zero [s] ptr mem) && s < 128 && objabi.GOPPC64 >= 9 => (LoweredQuadZeroShort [s] ptr mem)
+(Zero [s] ptr mem) && objabi.GOPPC64 >= 9 => (LoweredQuadZero [s] ptr mem)
+
+// moves
+// Only the MOVD and MOVW instructions require 4 byte
+// alignment in the offset field. The other MOVx instructions
+// allow any alignment.
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVHstore dst (MOVHZload src mem) mem)
+(Move [4] dst src mem) =>
+ (MOVWstore dst (MOVWZload src mem) mem)
+// MOVD for load and store must have offsets that are multiple of 4
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVDstore dst (MOVDload src mem) mem)
+(Move [8] dst src mem) =>
+ (MOVWstore [4] dst (MOVWZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBZload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVBstore [6] dst (MOVBZload [6] src mem)
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem)))
+
+// Large move uses a loop. Since the address is computed and the
+// offset is zero, any alignment can be used.
+(Move [s] dst src mem) && s > 8 && objabi.GOPPC64 <= 8 && logLargeCopy(v, s) =>
+ (LoweredMove [s] dst src mem)
+(Move [s] dst src mem) && s > 8 && s <= 64 && objabi.GOPPC64 >= 9 =>
+ (LoweredQuadMoveShort [s] dst src mem)
+(Move [s] dst src mem) && s > 8 && objabi.GOPPC64 >= 9 && logLargeCopy(v, s) =>
+ (LoweredQuadMove [s] dst src mem)
+
+// Calls
+// Lowering calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+
+// Miscellaneous
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) => (LessThan (CMPU idx len))
+(IsSliceInBounds idx len) => (LessEqual (CMPU idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// Optimizations
+// Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
+// so ORconst, XORconst easily expand into a pair.
+
+// Include very-large constants in the const-const case.
+(AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
+(OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
+(XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
+(ORN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|^d])
+(ANDN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&^d])
+(NOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [^(c|d)])
+
+// Discover consts
+(AND x (MOVDconst [c])) && isU16Bit(c) => (ANDconst [c] x)
+(XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x)
+(OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x)
+
+// Simplify consts
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(ANDconst [-1] x) => x
+(ANDconst [0] _) => (MOVDconst [0])
+(XORconst [0] x) => x
+(ORconst [-1] _) => (MOVDconst [-1])
+(ORconst [0] x) => x
+
+// zero-extend of small and => small and
+(MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF => y
+(MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y
+(MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF => y
+(MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y
+
+// sign extend of small-positive and => small-positive-and
+(MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F => y
+(MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF => y
+(MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
+(MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y
+
+// small and of zero-extend => either zero-extend or small and
+(ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF => y
+(ANDconst [0xFF] y:(MOVBreg _)) => y
+(ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF => y
+(ANDconst [0xFFFF] y:(MOVHreg _)) => y
+
+(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF => y
+(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
+// normal case
+(ANDconst [c] (MOV(B|BZ)reg x)) => (ANDconst [c&0xFF] x)
+(ANDconst [c] (MOV(H|HZ)reg x)) => (ANDconst [c&0xFFFF] x)
+(ANDconst [c] (MOV(W|WZ)reg x)) => (ANDconst [c&0xFFFFFFFF] x)
+
+// Eliminate unnecessary sign/zero extend following right shift
+(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))
+(MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) => (SRWconst [c] (MOVHZreg x))
+(MOVWZreg (SRWconst [c] (MOVWZreg x))) => (SRWconst [c] (MOVWZreg x))
+(MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) => (SRAWconst [c] (MOVBreg x))
+(MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) => (SRAWconst [c] (MOVHreg x))
+(MOVWreg (SRAWconst [c] (MOVWreg x))) => (SRAWconst [c] (MOVWreg x))
+
+(MOVWZreg (SRWconst [c] x)) && sizeof(x.Type) <= 32 => (SRWconst [c] x)
+(MOVHZreg (SRWconst [c] x)) && sizeof(x.Type) <= 16 => (SRWconst [c] x)
+(MOVBZreg (SRWconst [c] x)) && sizeof(x.Type) == 8 => (SRWconst [c] x)
+(MOVWreg (SRAWconst [c] x)) && sizeof(x.Type) <= 32 => (SRAWconst [c] x)
+(MOVHreg (SRAWconst [c] x)) && sizeof(x.Type) <= 16 => (SRAWconst [c] x)
+(MOVBreg (SRAWconst [c] x)) && sizeof(x.Type) == 8 => (SRAWconst [c] x)
+
+// initial right shift will handle sign/zero extend
+(MOVBZreg (SRDconst [c] x)) && c>=56 => (SRDconst [c] x)
+(MOVBreg (SRDconst [c] x)) && c>56 => (SRDconst [c] x)
+(MOVBreg (SRDconst [c] x)) && c==56 => (SRADconst [c] x)
+(MOVBreg (SRADconst [c] x)) && c>=56 => (SRADconst [c] x)
+(MOVBZreg (SRWconst [c] x)) && c>=24 => (SRWconst [c] x)
+(MOVBreg (SRWconst [c] x)) && c>24 => (SRWconst [c] x)
+(MOVBreg (SRWconst [c] x)) && c==24 => (SRAWconst [c] x)
+(MOVBreg (SRAWconst [c] x)) && c>=24 => (SRAWconst [c] x)
+
+(MOVHZreg (SRDconst [c] x)) && c>=48 => (SRDconst [c] x)
+(MOVHreg (SRDconst [c] x)) && c>48 => (SRDconst [c] x)
+(MOVHreg (SRDconst [c] x)) && c==48 => (SRADconst [c] x)
+(MOVHreg (SRADconst [c] x)) && c>=48 => (SRADconst [c] x)
+(MOVHZreg (SRWconst [c] x)) && c>=16 => (SRWconst [c] x)
+(MOVHreg (SRWconst [c] x)) && c>16 => (SRWconst [c] x)
+(MOVHreg (SRAWconst [c] x)) && c>=16 => (SRAWconst [c] x)
+(MOVHreg (SRWconst [c] x)) && c==16 => (SRAWconst [c] x)
+
+(MOVWZreg (SRDconst [c] x)) && c>=32 => (SRDconst [c] x)
+(MOVWreg (SRDconst [c] x)) && c>32 => (SRDconst [c] x)
+(MOVWreg (SRADconst [c] x)) && c>=32 => (SRADconst [c] x)
+(MOVWreg (SRDconst [c] x)) && c==32 => (SRADconst [c] x)
+
+// Various redundant zero/sign extension combinations.
+(MOVBZreg y:(MOVBZreg _)) => y // repeat
+(MOVBreg y:(MOVBreg _)) => y // repeat
+(MOVBreg (MOVBZreg x)) => (MOVBreg x)
+(MOVBZreg (MOVBreg x)) => (MOVBZreg x)
+
+// H - there are more combinations than these
+
+(MOVHZreg y:(MOVHZreg _)) => y // repeat
+(MOVHZreg y:(MOVBZreg _)) => y // wide of narrow
+(MOVHZreg y:(MOVHBRload _ _)) => y
+
+(MOVHreg y:(MOVHreg _)) => y // repeat
+(MOVHreg y:(MOVBreg _)) => y // wide of narrow
+
+(MOVHreg y:(MOVHZreg x)) => (MOVHreg x)
+(MOVHZreg y:(MOVHreg x)) => (MOVHZreg x)
+
+// W - there are more combinations than these
+
+(MOVWZreg y:(MOVWZreg _)) => y // repeat
+(MOVWZreg y:(MOVHZreg _)) => y // wide of narrow
+(MOVWZreg y:(MOVBZreg _)) => y // wide of narrow
+(MOVWZreg y:(MOVHBRload _ _)) => y
+(MOVWZreg y:(MOVWBRload _ _)) => y
+
+(MOVWreg y:(MOVWreg _)) => y // repeat
+(MOVWreg y:(MOVHreg _)) => y // wide of narrow
+(MOVWreg y:(MOVBreg _)) => y // wide of narrow
+
+(MOVWreg y:(MOVWZreg x)) => (MOVWreg x)
+(MOVWZreg y:(MOVWreg x)) => (MOVWZreg x)
+
+// Truncate then logical then truncate: omit first, lesser or equal truncate
+(MOVWZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVWZreg ((OR|XOR|AND) <t> x y))
+(MOVHZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
+(MOVHZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
+(MOVBZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
+(MOVBZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
+(MOVBZreg ((OR|XOR|AND) <t> x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
+
+(MOV(B|H|W)Zreg z:(ANDconst [c] (MOVBZload ptr x))) => z
+(MOVBZreg z:(AND y (MOVBZload ptr x))) => z
+(MOV(H|W)Zreg z:(ANDconst [c] (MOVHZload ptr x))) => z
+(MOVHZreg z:(AND y (MOVHZload ptr x))) => z
+(MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) => z
+(MOVWZreg z:(AND y (MOVWZload ptr x))) => z
+
+// Arithmetic constant ops
+
+(ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [c] x)
+(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x)
+(ADDconst [0] x) => x
+(SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x)
+
+(ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x)
+(ADDconst [c] x:(SP)) && is32Bit(c) => (MOVDaddr [int32(c)] x) // so it is rematerializeable
+
+(MULL(W|D) x (MOVDconst [c])) && is16Bit(c) => (MULL(W|D)const [int32(c)] x)
+
+// Subtract from (with carry, but ignored) constant.
+// Note, these clobber the carry bit.
+(SUB (MOVDconst [c]) x) && is32Bit(c) => (SUBFCconst [c] x)
+(SUBFCconst [c] (NEG x)) => (ADDconst [c] x)
+(SUBFCconst [c] (SUBFCconst [d] x)) && is32Bit(c-d) => (ADDconst [c-d] x)
+(SUBFCconst [0] x) => (NEG x)
+(ADDconst [c] (SUBFCconst [d] x)) && is32Bit(c+d) => (SUBFCconst [c+d] x)
+(NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x)
+(NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x)
+
+// Use register moves instead of stores and loads to move int<=>float values
+// Common with math Float64bits, Float64frombits
+(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) => (MFVSRD x)
+(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) => (MTVSRD x)
+
+(FMOVDstore [off] {sym} ptr (MTVSRD x) mem) => (MOVDstore [off] {sym} ptr x mem)
+(MOVDstore [off] {sym} ptr (MFVSRD x) mem) => (FMOVDstore [off] {sym} ptr x mem)
+
+(MTVSRD (MOVDconst [c])) && !math.IsNaN(math.Float64frombits(uint64(c))) => (FMOVDconst [math.Float64frombits(uint64(c))])
+(MFVSRD (FMOVDconst [c])) => (MOVDconst [int64(math.Float64bits(c))])
+
+(MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (FMOVDload [off] {sym} ptr mem)
+(MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem)
+
+// Fold offsets for stores.
+(MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 => (MOVDstore [off1+int32(off2)] {sym} x val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} x val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} x val mem)
+(MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} x val mem)
+
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(int64(off1)+off2) => (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(int64(off1)+off2) => (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
+
+// Fold address into load/store.
+// The assembler needs to generate several instructions and use
+// temp register for accessing global, and each time it will reload
+// the temp register. So don't fold address of global, unless there
+// is only one use.
+(MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+
+(FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+
+(MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+// Fold offsets for loads.
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOVSload [off1+int32(off2)] {sym} ptr mem)
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOVDload [off1+int32(off2)] {sym} ptr mem)
+
+(MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 => (MOVDload [off1+int32(off2)] {sym} x mem)
+(MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 => (MOVWload [off1+int32(off2)] {sym} x mem)
+(MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVWZload [off1+int32(off2)] {sym} x mem)
+(MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} x mem)
+(MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVHZload [off1+int32(off2)] {sym} x mem)
+(MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVBZload [off1+int32(off2)] {sym} x mem)
+
+// Determine load + addressing that can be done as a register indexed load
+(MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 => (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
+
+// Determine indexed loads with constant values that can be done without index
+(MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem)
+(MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
+(MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem)
+(MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
+
+// Store of zero => storezero
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+
+// Fold offsets for storezero
+(MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0 =>
+ (MOVDstorezero [off1+int32(off2)] {sym} x mem)
+(MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
+ (MOVWstorezero [off1+int32(off2)] {sym} x mem)
+(MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
+ (MOVHstorezero [off1+int32(off2)] {sym} x mem)
+(MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
+ (MOVBstorezero [off1+int32(off2)] {sym} x mem)
+
+// Stores with addressing that can be done as indexed stores
+(MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 => (MOV(D|W|H|B)storeidx ptr idx val mem)
+
+// Stores with constant index values can be done without indexed instructions
+(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem)
+(MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem)
+(MOVDstoreidx (MOVDconst [c]) ptr val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem)
+(MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem)
+
+// Fold symbols into storezero
+(MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
+ && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
+ (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+(MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
+ && (x.Op != OpSB || p.Uses == 1) =>
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+(MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
+ && (x.Op != OpSB || p.Uses == 1) =>
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+(MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
+ && (x.Op != OpSB || p.Uses == 1) =>
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+
+// atomic intrinsics
+(AtomicLoad(8|32|64|Ptr) ptr mem) => (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
+(AtomicLoadAcq(32|64) ptr mem) => (LoweredAtomicLoad(32|64) [0] ptr mem)
+
+(AtomicStore(8|32|64) ptr val mem) => (LoweredAtomicStore(8|32|64) [1] ptr val mem)
+(AtomicStoreRel(32|64) ptr val mem) => (LoweredAtomicStore(32|64) [0] ptr val mem)
+//(AtomicStorePtrNoWB ptr val mem) => (STLR ptr val mem)
+
+(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
+
+(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
+
+(AtomicCompareAndSwap(32|64) ptr old new_ mem) => (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
+(AtomicCompareAndSwapRel32 ptr old new_ mem) => (LoweredAtomicCas32 [0] ptr old new_ mem)
+
+(AtomicAnd8 ...) => (LoweredAtomicAnd8 ...)
+(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
+(AtomicOr8 ...) => (LoweredAtomicOr8 ...)
+(AtomicOr32 ...) => (LoweredAtomicOr32 ...)
+
+(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
+
+// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
+// This may interact with other patterns in the future. (Compare with arm64)
+(MOV(B|H|W)Zreg x:(MOVBZload _ _)) => x
+(MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) => x
+(MOV(H|W)Zreg x:(MOVHZload _ _)) => x
+(MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) => x
+(MOV(H|W)reg x:(MOVHload _ _)) => x
+(MOV(H|W)reg x:(MOVHloadidx _ _ _)) => x
+(MOVWZreg x:(MOVWZload _ _)) => x
+(MOVWZreg x:(MOVWZloadidx _ _ _)) => x
+(MOVWreg x:(MOVWload _ _)) => x
+(MOVWreg x:(MOVWloadidx _ _ _)) => x
+
+// don't extend if argument is already extended
+(MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) => x
+(MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) => x
+(MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) => x
+(MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) => x
+(MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) => x
+(MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) => x
+
+(MOVBZreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
+(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
+(MOVHZreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
+(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
+(MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
+
+// Implement clrsldi and clrslwi extended mnemonics as described in
+// ISA 3.0 section C.8. AuxInt field contains values needed for
+// the instructions, packed together since there is only one available.
+(SLDconst [c] z:(MOVBZreg x)) && c < 8 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
+(SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
+(SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
+
+(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+(SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
+(SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
+(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+// special case for power9
+(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && objabi.GOPPC64 >= 9 => (EXTSWSLconst [c] x)
+
+// Lose widening ops fed to stores
+(MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstoreidx ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVWstoreidx ptr idx (MOV(W|WZ)reg x) mem) => (MOVWstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstoreidx ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+(MOVHBRstore {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHBRstore {sym} ptr x mem)
+(MOVWBRstore {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWBRstore {sym} ptr x mem)
+
+// Lose W-widening ops fed to compare-W
+(CMPW x (MOVWreg y)) => (CMPW x y)
+(CMPW (MOVWreg x) y) => (CMPW x y)
+(CMPWU x (MOVWZreg y)) => (CMPWU x y)
+(CMPWU (MOVWZreg x) y) => (CMPWU x y)
+
+(CMP x (MOVDconst [c])) && is16Bit(c) => (CMPconst x [c])
+(CMP (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPconst y [c]))
+(CMPW x (MOVDconst [c])) && is16Bit(c) => (CMPWconst x [int32(c)])
+(CMPW (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPWconst y [int32(c)]))
+
+(CMPU x (MOVDconst [c])) && isU16Bit(c) => (CMPUconst x [c])
+(CMPU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPUconst y [c]))
+(CMPWU x (MOVDconst [c])) && isU16Bit(c) => (CMPWUconst x [int32(c)])
+(CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)]))
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+
+// ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1
+// ISEL auxInt values 4=GE 5=LE 6=NE arg2 ? arg1 : arg0
+// ISELB special case where arg0, arg1 values are 0, 1
+
+(Equal cmp) => (ISELB [2] (MOVDconst [1]) cmp)
+(NotEqual cmp) => (ISELB [6] (MOVDconst [1]) cmp)
+(LessThan cmp) => (ISELB [0] (MOVDconst [1]) cmp)
+(FLessThan cmp) => (ISELB [0] (MOVDconst [1]) cmp)
+(FLessEqual cmp) => (ISEL [2] (MOVDconst [1]) (ISELB [0] (MOVDconst [1]) cmp) cmp)
+(GreaterEqual cmp) => (ISELB [4] (MOVDconst [1]) cmp)
+(GreaterThan cmp) => (ISELB [1] (MOVDconst [1]) cmp)
+(FGreaterThan cmp) => (ISELB [1] (MOVDconst [1]) cmp)
+(FGreaterEqual cmp) => (ISEL [2] (MOVDconst [1]) (ISELB [1] (MOVDconst [1]) cmp) cmp)
+(LessEqual cmp) => (ISELB [5] (MOVDconst [1]) cmp)
+
+(ISELB [0] _ (FlagLT)) => (MOVDconst [1])
+(ISELB [0] _ (Flag(GT|EQ))) => (MOVDconst [0])
+(ISELB [1] _ (FlagGT)) => (MOVDconst [1])
+(ISELB [1] _ (Flag(LT|EQ))) => (MOVDconst [0])
+(ISELB [2] _ (FlagEQ)) => (MOVDconst [1])
+(ISELB [2] _ (Flag(LT|GT))) => (MOVDconst [0])
+(ISELB [4] _ (FlagLT)) => (MOVDconst [0])
+(ISELB [4] _ (Flag(GT|EQ))) => (MOVDconst [1])
+(ISELB [5] _ (FlagGT)) => (MOVDconst [0])
+(ISELB [5] _ (Flag(LT|EQ))) => (MOVDconst [1])
+(ISELB [6] _ (FlagEQ)) => (MOVDconst [0])
+(ISELB [6] _ (Flag(LT|GT))) => (MOVDconst [1])
+
+(ISEL [2] x _ (FlagEQ)) => x
+(ISEL [2] _ y (Flag(LT|GT))) => y
+
+(ISEL [6] _ y (FlagEQ)) => y
+(ISEL [6] x _ (Flag(LT|GT))) => x
+
+(ISEL [0] _ y (Flag(EQ|GT))) => y
+(ISEL [0] x _ (FlagLT)) => x
+
+(ISEL [5] _ x (Flag(EQ|LT))) => x
+(ISEL [5] y _ (FlagGT)) => y
+
+(ISEL [1] _ y (Flag(EQ|LT))) => y
+(ISEL [1] x _ (FlagGT)) => x
+
+(ISEL [4] x _ (Flag(EQ|GT))) => x
+(ISEL [4] _ y (FlagLT)) => y
+
+(ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 0 => (ISELB [n+1] (MOVDconst [1]) bool)
+(ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 1 => (ISELB [n-1] (MOVDconst [1]) bool)
+(ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 2 => (ISELB [n] (MOVDconst [1]) bool)
+(ISEL [n] x y (InvertFlags bool)) && n%4 == 0 => (ISEL [n+1] x y bool)
+(ISEL [n] x y (InvertFlags bool)) && n%4 == 1 => (ISEL [n-1] x y bool)
+(ISEL [n] x y (InvertFlags bool)) && n%4 == 2 => (ISEL [n] x y bool)
+
+// A particular pattern seen in cgo code:
+(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (ANDconst [c&0xFF] x)
+
+// floating point negative abs
+(FNEG (FABS x)) => (FNABS x)
+(FNEG (FNABS x)) => (FABS x)
+
+// floating-point fused multiply-add/sub
+(FADD (FMUL x y) z) => (FMADD x y z)
+(FSUB (FMUL x y) z) => (FMSUB x y z)
+(FADDS (FMULS x y) z) => (FMADDS x y z)
+(FSUBS (FMULS x y) z) => (FMSUBS x y z)
+
+
+// The following statements are found in encoding/binary functions UintXX (load) and PutUintXX (store)
+// and convert the statements in these functions from multiple single byte loads or stores to
+// the single largest possible load or store.
+// Some are marked big or little endian based on the order in which the bytes are loaded or stored,
+// not on the ordering of the machine. These are intended for little endian machines.
+// To implement for big endian machines, most rules would have to be duplicated but the
+// resulting rule would be reversed, i. e., MOVHZload on little endian would be MOVHBRload on big endian
+// and vice versa.
+// b[0] | b[1]<<8 => load 16-bit Little endian
+(OR <t> x0:(MOVBZload [i0] {s} p mem)
+ o1:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [8]))
+ && !config.BigEndian
+ && i1 == i0+1
+ && x0.Uses ==1 && x1.Uses == 1
+ && o1.Uses == 1
+ && mergePoint(b, x0, x1) != nil
+ && clobber(x0, x1, o1)
+ => @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
+
+// b[0]<<8 | b[1] => load 16-bit Big endian on Little endian arch.
+// Use byte-reverse indexed load for 2 bytes.
+(OR <t> x0:(MOVBZload [i1] {s} p mem)
+ o1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [8]))
+ && !config.BigEndian
+ && i1 == i0+1
+ && x0.Uses ==1 && x1.Uses == 1
+ && o1.Uses == 1
+ && mergePoint(b, x0, x1) != nil
+ && clobber(x0, x1, o1)
+ => @mergePoint(b,x0,x1) (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+
+// b[0]<<n+8 | b[1]<<n => load 16-bit Big endian (where n%8== 0)
+// Use byte-reverse indexed load for 2 bytes,
+// then shift left to the correct position. Used to match subrules
+// from longer rules.
+(OR <t> s0:(SL(W|D)const x0:(MOVBZload [i1] {s} p mem) [n1])
+ s1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [n2]))
+ && !config.BigEndian
+ && i1 == i0+1
+ && n1%8 == 0
+ && n2 == n1+8
+ && x0.Uses == 1 && x1.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1
+ && mergePoint(b, x0, x1) != nil
+ && clobber(x0, x1, s0, s1)
+ => @mergePoint(b,x0,x1) (SLDconst <t> (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [n1])
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 => load 32-bit Little endian
+// Use byte-reverse indexed load for 4 bytes.
+(OR <t> s1:(SL(W|D)const x2:(MOVBZload [i3] {s} p mem) [24])
+ o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [16])
+ x0:(MOVHZload [i0] {s} p mem)))
+ && !config.BigEndian
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1
+ && o0.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1
+ && mergePoint(b, x0, x1, x2) != nil
+ && clobber(x0, x1, x2, s0, s1, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
+
+// b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] => load 32-bit Big endian order on Little endian arch
+// Use byte-reverse indexed load for 4 bytes with computed address.
+// Could be used to match subrules of a longer rule.
+(OR <t> s1:(SL(W|D)const x2:(MOVBZload [i0] {s} p mem) [24])
+ o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [16])
+ x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem)))
+ && !config.BigEndian
+ && i1 == i0+1
+ && i2 == i0+2
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && o0.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1
+ && mergePoint(b, x0, x1, x2) != nil
+ && clobber(x0, x1, x2, s0, s1, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+
+// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 => load 32-bit Big endian order on Little endian arch
+// Use byte-reverse indexed load for 4 bytes with computed address.
+// Could be used to match subrules of a longer rule.
+(OR <t> x0:(MOVBZload [i3] {s} p mem)
+ o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [8])
+ s1:(SL(W|D)const x2:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [16])))
+ && !config.BigEndian
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && o0.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1
+ && mergePoint(b, x0, x1, x2) != nil
+ && clobber(x0, x1, x2, s0, s1, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+
+// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 => load 32-bit Big endian order on Little endian arch
+// Use byte-reverse indexed load to for 4 bytes with computed address.
+// Used to match longer rules.
+(OR <t> s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32])
+ o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i2] {s} p mem) [40])
+ s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [48])))
+ && !config.BigEndian
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && o0.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
+ && mergePoint(b, x0, x1, x2) != nil
+ && clobber(x0, x1, x2, s0, s1, s2, o0)
+ => @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
+
+// b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 => load 32-bit Big endian order on Little endian arch
+// Use byte-reverse indexed load for 4 bytes with constant address.
+// Used to match longer rules.
+(OR <t> s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56])
+ o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])
+ s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem) [32])))
+ && !config.BigEndian
+ && i1 == i0+1
+ && i2 == i0+2
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && o0.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
+ && mergePoint(b, x0, x1, x2) != nil
+ && clobber(x0, x1, x2, s0, s1, s2, o0)
+ => @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4] <<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 => load 64-bit Little endian
+// Rules with commutative ops and many operands will result in extremely large functions in rewritePPC64,
+// so matching shorter previously defined subrules is important.
+// Offset must be multiple of 4 for MOVD
+(OR <t> s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])
+ o5:(OR <t> s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])
+ o4:(OR <t> s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])
+ o3:(OR <t> s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])
+ x0:(MOVWZload {s} [i0] p mem)))))
+ && !config.BigEndian
+ && i0%4 == 0
+ && i4 == i0+4
+ && i5 == i0+5
+ && i6 == i0+6
+ && i7 == i0+7
+ && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1
+ && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
+ && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
+ && mergePoint(b, x0, x4, x5, x6, x7) != nil
+ && clobber(x0, x4, x5, x6, x7, s3, s4, s5, s6, o3, o4, o5)
+ => @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload <t> {s} [i0] p mem)
+
+// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 load 64-bit Big endian ordered bytes on Little endian arch
+// Use byte-reverse indexed load of 8 bytes.
+// Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64,
+// so matching shorter previously defined subrules is important.
+(OR <t> s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])
+ o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])
+ o1:(OR <t> s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40])
+ o2:(OR <t> s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32])
+ x4:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i4] p) mem)))))
+ && !config.BigEndian
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && i4 == i0+4
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
+ && mergePoint(b, x0, x1, x2, x3, x4) != nil
+ && clobber(x0, x1, x2, x3, x4, o0, o1, o2, s0, s1, s2, s3)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+
+// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] => load 64-bit Big endian ordered bytes on Little endian arch
+// Use byte-reverse indexed load of 8 bytes.
+// Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64,
+// so matching shorter previously defined subrules is important.
+(OR <t> x7:(MOVBZload [i7] {s} p mem)
+ o5:(OR <t> s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])
+ o4:(OR <t> s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])
+ o3:(OR <t> s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])
+ s0:(SL(W|D)const x3:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])))))
+ && !config.BigEndian
+ && i4 == i0+4
+ && i5 == i0+5
+ && i6 == i0+6
+ && i7 == i0+7
+ && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
+ && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
+ && mergePoint(b, x3, x4, x5, x6, x7) != nil
+ && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)
+ => @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+
+// 2 byte store Little endian as in:
+// b[0] = byte(v >> 16)
+// b[1] = byte(v >> 24)
+// Added for use in matching longer rules.
+(MOVBstore [i1] {s} p (SR(W|D)const w [24])
+ x0:(MOVBstore [i0] {s} p (SR(W|D)const w [16]) mem))
+ && !config.BigEndian
+ && x0.Uses == 1
+ && i1 == i0+1
+ && clobber(x0)
+ => (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
+
+// 2 byte store Little endian as in:
+// b[0] = byte(v)
+// b[1] = byte(v >> 8)
+(MOVBstore [i1] {s} p (SR(W|D)const w [8])
+ x0:(MOVBstore [i0] {s} p w mem))
+ && !config.BigEndian
+ && x0.Uses == 1
+ && i1 == i0+1
+ && clobber(x0)
+ => (MOVHstore [i0] {s} p w mem)
+
+// 4 byte store Little endian as in:
+// b[0:1] = uint16(v)
+// b[2:3] = uint16(v >> 16)
+(MOVHstore [i1] {s} p (SR(W|D)const w [16])
+ x0:(MOVHstore [i0] {s} p w mem))
+ && !config.BigEndian
+ && x0.Uses == 1
+ && i1 == i0+2
+ && clobber(x0)
+ => (MOVWstore [i0] {s} p w mem)
+
+// 4 byte store Big endian as in:
+// b[0] = byte(v >> 24)
+// b[1] = byte(v >> 16)
+// b[2] = byte(v >> 8)
+// b[3] = byte(v)
+// Use byte-reverse indexed 4 byte store.
+(MOVBstore [i3] {s} p w
+ x0:(MOVBstore [i2] {s} p (SRWconst w [8])
+ x1:(MOVBstore [i1] {s} p (SRWconst w [16])
+ x2:(MOVBstore [i0] {s} p (SRWconst w [24]) mem))))
+ && !config.BigEndian
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && i1 == i0+1 && i2 == i0+2 && i3 == i0+3
+ && clobber(x0, x1, x2)
+ => (MOVWBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+
+// The 2 byte store appears after the 4 byte store so that the
+// match for the 2 byte store is not done first.
+// If the 4 byte store is based on the 2 byte store then there are
+// variations on the MOVDaddr subrule that would require additional
+// rules to be written.
+
+// 2 byte store Big endian as in:
+// b[0] = byte(v >> 8)
+// b[1] = byte(v)
+(MOVBstore [i1] {s} p w x0:(MOVBstore [i0] {s} p (SRWconst w [8]) mem))
+ && !config.BigEndian
+ && x0.Uses == 1
+ && i1 == i0+1
+ && clobber(x0)
+ => (MOVHBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+
+// 8 byte store Little endian as in:
+// b[0] = byte(v)
+// b[1] = byte(v >> 8)
+// b[2] = byte(v >> 16)
+// b[3] = byte(v >> 24)
+// b[4] = byte(v >> 32)
+// b[5] = byte(v >> 40)
+// b[6] = byte(v >> 48)
+// b[7] = byte(v >> 56)
+// Built on previously defined rules
+// Offset must be multiple of 4 for MOVDstore
+(MOVBstore [i7] {s} p (SRDconst w [56])
+ x0:(MOVBstore [i6] {s} p (SRDconst w [48])
+ x1:(MOVBstore [i5] {s} p (SRDconst w [40])
+ x2:(MOVBstore [i4] {s} p (SRDconst w [32])
+ x3:(MOVWstore [i0] {s} p w mem)))))
+ && !config.BigEndian
+ && i0%4 == 0
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
+ && clobber(x0, x1, x2, x3)
+ => (MOVDstore [i0] {s} p w mem)
+
+// 8 byte store Big endian as in:
+// b[0] = byte(v >> 56)
+// b[1] = byte(v >> 48)
+// b[2] = byte(v >> 40)
+// b[3] = byte(v >> 32)
+// b[4] = byte(v >> 24)
+// b[5] = byte(v >> 16)
+// b[6] = byte(v >> 8)
+// b[7] = byte(v)
+// Use byte-reverse indexed 8 byte store.
+(MOVBstore [i7] {s} p w
+ x0:(MOVBstore [i6] {s} p (SRDconst w [8])
+ x1:(MOVBstore [i5] {s} p (SRDconst w [16])
+ x2:(MOVBstore [i4] {s} p (SRDconst w [24])
+ x3:(MOVBstore [i3] {s} p (SRDconst w [32])
+ x4:(MOVBstore [i2] {s} p (SRDconst w [40])
+ x5:(MOVBstore [i1] {s} p (SRDconst w [48])
+ x6:(MOVBstore [i0] {s} p (SRDconst w [56]) mem))))))))
+ && !config.BigEndian
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1
+ && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
+ && clobber(x0, x1, x2, x3, x4, x5, x6)
+ => (MOVDBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
new file mode 100644
index 0000000..f7198b9
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
@@ -0,0 +1,717 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Less-than-64-bit integer types live in the low portion of registers.
+// For now, the upper portion is junk; sign/zero-extension might be optimized in the future, but not yet.
+// - Boolean types are zero or 1; stored in a byte, but loaded with AMOVBZ so the upper bytes of a register are zero.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R31).
+
+var regNamesPPC64 = []string{
+ "R0", // REGZERO, not used, but simplifies counting in regalloc
+ "SP", // REGSP
+ "SB", // REGSB
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11", // REGCTXT for closures
+ "R12",
+ "R13", // REGTLS
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18",
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ "R23",
+ "R24",
+ "R25",
+ "R26",
+ "R27",
+ "R28",
+ "R29",
+ "g", // REGG. Using name "g" and setting Config.hasGReg makes it "just happen".
+ "R31", // REGTMP
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28",
+ "F29",
+ "F30",
+ "F31",
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // "CR0",
+ // "CR1",
+ // "CR2",
+ // "CR3",
+ // "CR4",
+ // "CR5",
+ // "CR6",
+ // "CR7",
+
+ // "CR",
+ // "XER",
+ // "LR",
+ // "CTR",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesPPC64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesPPC64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ var (
+ gp = buildReg("R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29")
+ fp = buildReg("F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26")
+ sp = buildReg("SP")
+ sb = buildReg("SB")
+ gr = buildReg("g")
+ // cr = buildReg("CR")
+ // ctr = buildReg("CTR")
+ // lr = buildReg("LR")
+ tmp = buildReg("R31")
+ ctxt = buildReg("R11")
+ callptr = buildReg("R12")
+ // tls = buildReg("R13")
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}}
+ gp21a0 = regInfo{inputs: []regMask{gp, gp | sp | sb}, outputs: []regMask{gp}}
+ gp31 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}}
+ gp22 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, gp}}
+ gp32 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, gp}}
+ gp1cr = regInfo{inputs: []regMask{gp | sp | sb}}
+ gp2cr = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}}
+ crgp = regInfo{inputs: nil, outputs: []regMask{gp}}
+ crgp11 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}
+ crgp21 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
+ gploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}}
+ gpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}}
+ gpstorezero = regInfo{inputs: []regMask{gp | sp | sb}} // ppc64.REGZERO is reserved zero value
+ gpxchg = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}}
+ gpcas = regInfo{inputs: []regMask{gp | sp | sb, gp, gp}, outputs: []regMask{gp}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}}
+ fp2cr = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{fp}}
+ fploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gp | sp | sb, fp}}
+ fpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, fp}}
+ callerSave = regMask(gp | fp | gr)
+ r3 = buildReg("R3")
+ r4 = buildReg("R4")
+ r5 = buildReg("R5")
+ r6 = buildReg("R6")
+ )
+ ops := []opData{
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "Int64"}, // arg0 + auxInt
+ {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1
+ {name: "SUBFCconst", argLength: 1, reg: gp11, asm: "SUBC", aux: "Int64"}, // auxInt - arg0 (with carry)
+ {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1
+
+ {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true}, // arg0*arg1 (signed 64-bit)
+ {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true}, // arg0*arg1 (signed 32-bit)
+ {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit)
+ {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit)
+ {name: "MADDLD", argLength: 3, reg: gp31, asm: "MADDLD", typ: "Int64"}, // (arg0*arg1)+arg2 (signed 64-bit)
+
+ {name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", commutative: true}, // (arg0 * arg1) >> 64, signed
+ {name: "MULHW", argLength: 2, reg: gp21, asm: "MULHW", commutative: true}, // (arg0 * arg1) >> 32, signed
+ {name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", commutative: true}, // (arg0 * arg1) >> 64, unsigned
+ {name: "MULHWU", argLength: 2, reg: gp21, asm: "MULHWU", commutative: true}, // (arg0 * arg1) >> 32, unsigned
+ {name: "LoweredMuluhilo", argLength: 2, reg: gp22, resultNotInArgs: true}, // arg0 * arg1, returns (hi, lo)
+
+ {name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true}, // arg0*arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0*arg1
+
+ {name: "FMADD", argLength: 3, reg: fp31, asm: "FMADD"}, // arg0*arg1 + arg2
+ {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS"}, // arg0*arg1 + arg2
+ {name: "FMSUB", argLength: 3, reg: fp31, asm: "FMSUB"}, // arg0*arg1 - arg2
+ {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS"}, // arg0*arg1 - arg2
+
+ {name: "SRAD", argLength: 2, reg: gp21, asm: "SRAD"}, // signed arg0 >> (arg1&127), 64 bit width (note: 127, not 63!)
+ {name: "SRAW", argLength: 2, reg: gp21, asm: "SRAW"}, // signed arg0 >> (arg1&63), 32 bit width
+ {name: "SRD", argLength: 2, reg: gp21, asm: "SRD"}, // unsigned arg0 >> (arg1&127), 64 bit width
+ {name: "SRW", argLength: 2, reg: gp21, asm: "SRW"}, // unsigned arg0 >> (arg1&63), 32 bit width
+ {name: "SLD", argLength: 2, reg: gp21, asm: "SLD"}, // arg0 << (arg1&127), 64 bit width
+ {name: "SLW", argLength: 2, reg: gp21, asm: "SLW"}, // arg0 << (arg1&63), 32 bit width
+
+ {name: "ROTL", argLength: 2, reg: gp21, asm: "ROTL"}, // arg0 rotate left by arg1 mod 64
+ {name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32
+ // The following are ops to implement the extended mnemonics for shifts as described in section C.8 of the ISA.
+ // The constant shift values are packed into the aux int32.
+ {name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int32"}, // arg0 extract bits identified by shift params"
+ {name: "CLRLSLWI", argLength: 1, reg: gp11, asm: "CLRLSLWI", aux: "Int32"}, //
+ {name: "CLRLSLDI", argLength: 1, reg: gp11, asm: "CLRLSLDI", aux: "Int32"}, //
+
+ {name: "LoweredAdd64Carry", argLength: 3, reg: gp32, resultNotInArgs: true}, // arg0 + arg1 + carry, returns (sum, carry)
+
+ {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width
+ {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width
+ {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width
+ {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width
+ {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 64, 64 bit width
+ {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 32, 32 bit width
+
+ {name: "ROTLconst", argLength: 1, reg: gp11, asm: "ROTL", aux: "Int64"}, // arg0 rotate left by auxInt bits
+ {name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits
+ {name: "EXTSWSLconst", argLength: 1, reg: gp11, asm: "EXTSWSLI", aux: "Int64"},
+
+ {name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux
+ {name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux
+ {name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above
+
+ {name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD", clobberFlags: true}, // count leading zeros
+ {name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW", clobberFlags: true}, // count leading zeros (32 bit)
+
+ {name: "CNTTZD", argLength: 1, reg: gp11, asm: "CNTTZD"}, // count trailing zeros
+ {name: "CNTTZW", argLength: 1, reg: gp11, asm: "CNTTZW"}, // count trailing zeros (32 bit)
+
+ {name: "POPCNTD", argLength: 1, reg: gp11, asm: "POPCNTD"}, // number of set bits in arg0
+ {name: "POPCNTW", argLength: 1, reg: gp11, asm: "POPCNTW"}, // number of set bits in each word of arg0 placed in corresponding word
+ {name: "POPCNTB", argLength: 1, reg: gp11, asm: "POPCNTB"}, // number of set bits in each byte of arg0 placed in corresponding byte
+
+ {name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV"}, // arg0/arg1
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0/arg1
+
+ {name: "DIVD", argLength: 2, reg: gp21, asm: "DIVD", typ: "Int64"}, // arg0/arg1 (signed 64-bit)
+ {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"}, // arg0/arg1 (signed 32-bit)
+ {name: "DIVDU", argLength: 2, reg: gp21, asm: "DIVDU", typ: "Int64"}, // arg0/arg1 (unsigned 64-bit)
+ {name: "DIVWU", argLength: 2, reg: gp21, asm: "DIVWU", typ: "Int32"}, // arg0/arg1 (unsigned 32-bit)
+
+ {name: "MODUD", argLength: 2, reg: gp21, asm: "MODUD", typ: "UInt64"}, // arg0 % arg1 (unsigned 64-bit)
+ {name: "MODSD", argLength: 2, reg: gp21, asm: "MODSD", typ: "Int64"}, // arg0 % arg1 (signed 64-bit)
+ {name: "MODUW", argLength: 2, reg: gp21, asm: "MODUW", typ: "UInt32"}, // arg0 % arg1 (unsigned 32-bit)
+ {name: "MODSW", argLength: 2, reg: gp21, asm: "MODSW", typ: "Int32"}, // arg0 % arg1 (signed 32-bit)
+ // MOD is implemented as rem := arg0 - (arg0/arg1) * arg1
+
+ // Conversions are all float-to-float register operations. "Integer" refers to encoding in the FP register.
+ {name: "FCTIDZ", argLength: 1, reg: fp11, asm: "FCTIDZ", typ: "Float64"}, // convert float to 64-bit int round towards zero
+ {name: "FCTIWZ", argLength: 1, reg: fp11, asm: "FCTIWZ", typ: "Float64"}, // convert float to 32-bit int round towards zero
+ {name: "FCFID", argLength: 1, reg: fp11, asm: "FCFID", typ: "Float64"}, // convert 64-bit integer to float
+ {name: "FCFIDS", argLength: 1, reg: fp11, asm: "FCFIDS", typ: "Float32"}, // convert 32-bit integer to float
+ {name: "FRSP", argLength: 1, reg: fp11, asm: "FRSP", typ: "Float64"}, // round float to 32-bit value
+
+ // Movement between float and integer registers with no change in bits; accomplished with stores+loads on PPC.
+ // Because the 32-bit load-literal-bits instructions have impoverished addressability, always widen the
+ // data instead and use FMOVDload and FMOVDstore instead (this will also dodge endianess issues).
+ // There are optimizations that should apply -- (Xi2f64 (MOVWload (not-ADD-ptr+offset) ) ) could use
+ // the word-load instructions. (Xi2f64 (MOVDload ptr )) can be (FMOVDload ptr)
+
+ {name: "MFVSRD", argLength: 1, reg: fpgp, asm: "MFVSRD", typ: "Int64"}, // move 64 bits of F register into G register
+ {name: "MTVSRD", argLength: 1, reg: gpfp, asm: "MTVSRD", typ: "Float64"}, // move 64 bits of G register into F register
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1
+ {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1
+ {name: "ANDCC", argLength: 2, reg: gp2cr, asm: "ANDCC", commutative: true, typ: "Flags"}, // arg0&arg1 sets CC
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1
+ {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1
+ {name: "ORCC", argLength: 2, reg: gp2cr, asm: "ORCC", commutative: true, typ: "Flags"}, // arg0|arg1 sets CC
+ {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0|arg1)
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1
+ {name: "XORCC", argLength: 2, reg: gp2cr, asm: "XORCC", commutative: true, typ: "Flags"}, // arg0^arg1 sets CC
+ {name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer)
+ {name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"}, // -arg0 (floating point)
+ {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) (floating point)
+ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0) (floating point, single precision)
+ {name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"}, // floor(arg0), float64
+ {name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"}, // ceil(arg0), float64
+ {name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"}, // trunc(arg0), float64
+ {name: "FROUND", argLength: 1, reg: fp11, asm: "FRIN"}, // round(arg0), float64
+ {name: "FABS", argLength: 1, reg: fp11, asm: "FABS"}, // abs(arg0), float64
+ {name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64
+ {name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64
+
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux
+ {name: "ANDconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", clobberFlags: true}, // arg0&aux // and-immediate sets CC on PPC, always.
+ {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}}, asm: "ANDCC", aux: "Int64", typ: "Flags"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always.
+
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64
+ {name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH", typ: "Int64"}, // sign extend int16 to int64
+ {name: "MOVHZreg", argLength: 1, reg: gp11, asm: "MOVHZ", typ: "Int64"}, // zero extend uint16 to uint64
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW", typ: "Int64"}, // sign extend int32 to int64
+ {name: "MOVWZreg", argLength: 1, reg: gp11, asm: "MOVWZ", typ: "Int64"}, // zero extend uint32 to uint64
+
+ // Load bytes in the endian order of the arch from arg0+aux+auxint into a 64 bit register.
+ {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte zero extend
+ {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes sign extend
+ {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes zero extend
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes sign extend
+ {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes zero extend
+ {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes
+
+ // Load bytes in reverse endian order of the arch from arg0 into a 64 bit register, all zero extend.
+ // The generated instructions are indexed loads with no offset field in the instruction so the aux fields are not used.
+ // In these cases the index register field is set to 0 and the full address is in the base register.
+ {name: "MOVDBRload", argLength: 2, reg: gpload, asm: "MOVDBR", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes reverse order
+ {name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes zero extend reverse order
+ {name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes zero extend reverse order
+
+ // In these cases an index register is used in addition to a base register
+ // Loads from memory location arg[0] + arg[1].
+ {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, asm: "MOVBZ", typ: "UInt8"}, // zero extend uint8 to uint64
+ {name: "MOVHloadidx", argLength: 3, reg: gploadidx, asm: "MOVH", typ: "Int16"}, // sign extend int16 to int64
+ {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, asm: "MOVHZ", typ: "UInt16"}, // zero extend uint16 to uint64
+ {name: "MOVWloadidx", argLength: 3, reg: gploadidx, asm: "MOVW", typ: "Int32"}, // sign extend int32 to int64
+ {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, asm: "MOVWZ", typ: "UInt32"}, // zero extend uint32 to uint64
+ {name: "MOVDloadidx", argLength: 3, reg: gploadidx, asm: "MOVD", typ: "Int64"},
+ {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVHBR", typ: "Int16"}, // sign extend int16 to int64
+ {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVWBR", typ: "Int32"}, // sign extend int32 to int64
+ {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVDBR", typ: "Int64"},
+ {name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", typ: "Float64"},
+ {name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", typ: "Float32"},
+
+ // Store bytes in the reverse endian order of the arch into arg0.
+ // These are indexed stores with no offset field in the instruction so the auxint fields are not used.
+ {name: "MOVDBRstore", argLength: 3, reg: gpstore, asm: "MOVDBR", aux: "Sym", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes reverse order
+ {name: "MOVWBRstore", argLength: 3, reg: gpstore, asm: "MOVWBR", aux: "Sym", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes reverse order
+ {name: "MOVHBRstore", argLength: 3, reg: gpstore, asm: "MOVHBR", aux: "Sym", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes reverse order
+
+ // Floating point loads from arg0+aux+auxint
+ {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load double float
+ {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load single float
+
+ // Store bytes in the endian order of the arch into arg0+aux+auxint
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte
+ {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes
+ {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes
+
+ // Store floating point value into arg0+aux+auxint
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double flot
+ {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store single float
+
+ // Stores using index and base registers
+ // Stores to arg[0] + arg[1]
+ {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVB", typ: "Mem"}, // store bye
+ {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVH", typ: "Mem"}, // store half word
+ {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVW", typ: "Mem"}, // store word
+ {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVD", typ: "Mem"}, // store double word
+ {name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", typ: "Mem"}, // store double float
+ {name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", typ: "Mem"}, // store single float
+ {name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVHBR", typ: "Mem"}, // store half word reversed byte using index reg
+ {name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVWBR", typ: "Mem"}, // store word reversed byte using index reg
+ {name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVDBR", typ: "Mem"}, // store double word reversed byte using index reg
+
+ // The following ops store 0 into arg0+aux+auxint arg1=mem
+ {name: "MOVBstorezero", argLength: 2, reg: gpstorezero, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 1 byte
+ {name: "MOVHstorezero", argLength: 2, reg: gpstorezero, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 2 bytes
+ {name: "MOVWstorezero", argLength: 2, reg: gpstorezero, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 4 bytes
+ {name: "MOVDstorezero", argLength: 2, reg: gpstorezero, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 8 bytes
+
+ {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{sp | sb | gp}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB/GP
+
+ {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", typ: "Int64", rematerializeable: true}, //
+ {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", rematerializeable: true}, //
+ {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float32", asm: "FMOVS", rematerializeable: true}, //
+ {name: "FCMPU", argLength: 2, reg: fp2cr, asm: "FCMPU", typ: "Flags"},
+
+ {name: "CMP", argLength: 2, reg: gp2cr, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPU", argLength: 2, reg: gp2cr, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPW", argLength: 2, reg: gp2cr, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPWU", argLength: 2, reg: gp2cr, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPconst", argLength: 1, reg: gp1cr, asm: "CMP", aux: "Int64", typ: "Flags"},
+ {name: "CMPUconst", argLength: 1, reg: gp1cr, asm: "CMPU", aux: "Int64", typ: "Flags"},
+ {name: "CMPWconst", argLength: 1, reg: gp1cr, asm: "CMPW", aux: "Int32", typ: "Flags"},
+ {name: "CMPWUconst", argLength: 1, reg: gp1cr, asm: "CMPWU", aux: "Int32", typ: "Flags"},
+
+ // ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1
+ // ISEL auxInt values 4=GE 5=LE 6=NE arg2 ? arg1 : arg0
+ // ISELB special case where arg0, arg1 values are 0, 1 for boolean result
+ {name: "ISEL", argLength: 3, reg: crgp21, asm: "ISEL", aux: "Int32", typ: "Int32"}, // see above
+ {name: "ISELB", argLength: 2, reg: crgp11, asm: "ISEL", aux: "Int32", typ: "Int32"}, // see above
+
+ // pseudo-ops
+ {name: "Equal", argLength: 1, reg: crgp}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: crgp}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise.
+ {name: "FLessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise.
+ {name: "FLessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise; PPC <= === !> which is wrong for NaN
+ {name: "GreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise.
+ {name: "FGreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise.
+ {name: "FGreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise.; PPC >= === !< which is wrong for NaN
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of the closure pointer.
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{ctxt}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
+ // Round ops to block fused-multiply-add extraction.
+ {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+ {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{callptr, ctxt, 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{callptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R3, changed as side effect)
+ // returns mem
+ //
+ // a loop is generated when there is more than one iteration
+ // needed to clear 4 doublewords
+ //
+ // XXLXOR VS32,VS32,VS32
+ // MOVD $len/32,R31
+ // MOVD R31,CTR
+ // MOVD $16,R31
+ // loop:
+ // STXVD2X VS32,(R0)(R3)
+ // STXVD2X VS32,(R31),R3)
+ // ADD R3,32
+ // BC loop
+
+ // remaining doubleword clears generated as needed
+ // MOVD R0,(R3)
+ // MOVD R0,8(R3)
+ // MOVD R0,16(R3)
+ // MOVD R0,24(R3)
+
+ // one or more of these to clear remainder < 8 bytes
+ // MOVW R0,n1(R3)
+ // MOVH R0,n2(R3)
+ // MOVB R0,n3(R3)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20")},
+ clobbers: buildReg("R20"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ },
+ {
+ name: "LoweredZeroShort",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{gp}},
+ typ: "Mem",
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ },
+ {
+ name: "LoweredQuadZeroShort",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{gp},
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ },
+ {
+ name: "LoweredQuadZero",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20")},
+ clobbers: buildReg("R20"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ },
+
+ // R31 is temp register
+ // Loop code:
+ // MOVD len/32,R31 set up loop ctr
+ // MOVD R31,CTR
+ // MOVD $16,R31 index register
+ // loop:
+ // LXVD2X (R0)(R4),VS32
+ // LXVD2X (R31)(R4),VS33
+ // ADD R4,$32 increment src
+ // STXVD2X VS32,(R0)(R3)
+ // STXVD2X VS33,(R31)(R3)
+ // ADD R3,$32 increment dst
+ // BC 16,0,loop branch ctr
+ // For this purpose, VS32 and VS33 are treated as
+ // scratch registers. Since regalloc does not
+ // track vector registers, even if it could be marked
+ // as clobbered it would have no effect.
+ // TODO: If vector registers are managed by regalloc
+ // mark these as clobbered.
+ //
+ // Bytes not moved by this loop are moved
+ // with a combination of the following instructions,
+ // starting with the largest sizes and generating as
+ // many as needed, using the appropriate offset value.
+ // MOVD n(R4),R14
+ // MOVD R14,n(R3)
+ // MOVW n1(R4),R14
+ // MOVW R14,n1(R3)
+ // MOVH n2(R4),R14
+ // MOVH R14,n2(R3)
+ // MOVB n3(R4),R14
+ // MOVB R14,n3(R3)
+
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20"), buildReg("R21")},
+ clobbers: buildReg("R20 R21"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ },
+ {
+ name: "LoweredMoveShort",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{gp, gp},
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ },
+
+ // The following is similar to the LoweredMove, but uses
+ // LXV instead of LXVD2X, which does not require an index
+ // register and will do 4 in a loop instead of only.
+ {
+ name: "LoweredQuadMove",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20"), buildReg("R21")},
+ clobbers: buildReg("R20 R21"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ },
+
+ {
+ name: "LoweredQuadMoveShort",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{gp, gp},
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ },
+
+ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
+
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, typ: "UInt8", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, typ: "UInt32", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+
+ // atomic add32, 64
+ // LWSYNC
+ // LDAR (Rarg0), Rout
+ // ADD Rarg1, Rout
+ // STDCCC Rout, (Rarg0)
+ // BNE -3(PC)
+ // return new sum
+ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic exchange32, 64
+ // LWSYNC
+ // LDAR (Rarg0), Rout
+ // STDCCC Rarg1, (Rarg0)
+ // BNE -2(PC)
+ // ISYNC
+ // return old val
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // SYNC
+ // LDAR (Rarg0), Rtmp
+ // CMP Rarg1, Rtmp
+ // BNE 3(PC)
+ // STDCCC Rarg2, (Rarg0)
+ // BNE -4(PC)
+ // CBNZ Rtmp, -4(PC)
+ // CSET EQ, Rout
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic 8/32 and/or.
+ // *arg0 &= (|=) arg1. arg2=mem. returns memory. auxint must be zero.
+ // LBAR/LWAT (Rarg0), Rtmp
+ // AND/OR Rarg1, Rtmp
+ // STBCCC/STWCCC Rtmp, (Rarg0), Rtmp
+ // BNE Rtmp, -3(PC)
+ {name: "LoweredAtomicAnd8", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr8", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It preserves R0 through R17 (except special registers R1, R2, R11, R12, R13), g, and its arguments R20 and R21,
+ // but may clobber anything else, including R31 (REGTMP).
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R20"), buildReg("R21")}, clobbers: (callerSave &^ buildReg("R0 R3 R4 R5 R6 R7 R8 R9 R10 R14 R15 R16 R17 R20 R21 g")) | buildReg("R31")}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r6}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r5}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // So if we want (LessThan (CMP a b)) but we can't do that because a is a constant,
+ // then we do (LessThan (InvertFlags (CMP b a))) instead.
+ // Rewrites will convert this to (GreaterThan (CMP b a)).
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Constant flag values. For any comparison, there are 3 possible
+ // outcomes: either the three from the signed total order (<,==,>)
+ // or the three from the unsigned total order, depending on which
+ // comparison operation was used (CMP or CMPU -- PPC is different from
+ // the other architectures, which have a single comparison producing
+ // both signed and unsigned comparison results.)
+
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT"}, // signed < or unsigned <
+ {name: "FlagGT"}, // signed > or unsigned >
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "FLT", controls: 1},
+ {name: "FLE", controls: 1},
+ {name: "FGT", controls: 1},
+ {name: "FGE", controls: 1},
+ }
+
+ archs = append(archs, arch{
+ name: "PPC64",
+ pkg: "cmd/internal/obj/ppc64",
+ genfile: "../../ppc64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesPPC64,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: int8(num["SP"]),
+ linkreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/README b/src/cmd/compile/internal/ssa/gen/README
new file mode 100644
index 0000000..6d2c6bb
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/README
@@ -0,0 +1,7 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+This package generates opcode tables, rewrite rules, etc. for the ssa compiler.
+Run it with go-1.13 (or above):
+ go run *.go
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
new file mode 100644
index 0000000..4380a5e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
@@ -0,0 +1,737 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Optimizations TODO:
+// * Use SLTI and SLTIU for comparisons to constants, instead of SLT/SLTU with constants in registers
+// * Use the zero register instead of moving 0 into a register.
+// * Add rules to avoid generating a temp bool value for (If (SLT[U] ...) ...).
+// * Optimize left and right shift by simplifying SLTIU, Neg, and ADD for constants.
+// * Arrange for non-trivial Zero and Move lowerings to use aligned loads and stores.
+// * Eliminate zero immediate shifts, adds, etc.
+// * Avoid using Neq32 for writeBarrier.enabled checks.
+
+// Lowering arithmetic
+(Add64 ...) => (ADD ...)
+(AddPtr ...) => (ADD ...)
+(Add32 ...) => (ADD ...)
+(Add16 ...) => (ADD ...)
+(Add8 ...) => (ADD ...)
+(Add32F ...) => (FADDS ...)
+(Add64F ...) => (FADDD ...)
+
+(Sub64 ...) => (SUB ...)
+(SubPtr ...) => (SUB ...)
+(Sub32 ...) => (SUB ...)
+(Sub16 ...) => (SUB ...)
+(Sub8 ...) => (SUB ...)
+(Sub32F ...) => (FSUBS ...)
+(Sub64F ...) => (FSUBD ...)
+
+(Mul64 ...) => (MUL ...)
+(Mul32 ...) => (MULW ...)
+(Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y))
+(Mul8 x y) => (MULW (SignExt8to32 x) (SignExt8to32 y))
+(Mul32F ...) => (FMULS ...)
+(Mul64F ...) => (FMULD ...)
+
+(Div32F ...) => (FDIVS ...)
+(Div64F ...) => (FDIVD ...)
+
+(Div64 x y [false]) => (DIV x y)
+(Div64u ...) => (DIVU ...)
+(Div32 x y [false]) => (DIVW x y)
+(Div32u ...) => (DIVUW ...)
+(Div16 x y [false]) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Hmul64 ...) => (MULH ...)
+(Hmul64u ...) => (MULHU ...)
+(Hmul32 x y) => (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
+(Hmul32u x y) => (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
+
+// (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1)
+(Avg64u <t> x y) => (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
+
+(Mod64 x y [false]) => (REM x y)
+(Mod64u ...) => (REMU ...)
+(Mod32 x y [false]) => (REMW x y)
+(Mod32u ...) => (REMUW ...)
+(Mod16 x y [false]) => (REMW (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(And64 ...) => (AND ...)
+(And32 ...) => (AND ...)
+(And16 ...) => (AND ...)
+(And8 ...) => (AND ...)
+
+(Or64 ...) => (OR ...)
+(Or32 ...) => (OR ...)
+(Or16 ...) => (OR ...)
+(Or8 ...) => (OR ...)
+
+(Xor64 ...) => (XOR ...)
+(Xor32 ...) => (XOR ...)
+(Xor16 ...) => (XOR ...)
+(Xor8 ...) => (XOR ...)
+
+(Neg64 ...) => (NEG ...)
+(Neg32 ...) => (NEG ...)
+(Neg16 ...) => (NEG ...)
+(Neg8 ...) => (NEG ...)
+(Neg32F ...) => (FNEGS ...)
+(Neg64F ...) => (FNEGD ...)
+
+(Com64 ...) => (NOT ...)
+(Com32 ...) => (NOT ...)
+(Com16 ...) => (NOT ...)
+(Com8 ...) => (NOT ...)
+
+(Sqrt ...) => (FSQRTD ...)
+
+// Sign and zero extension.
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt8to64 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+(SignExt16to64 ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt8to64 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+(ZeroExt16to64 ...) => (MOVHUreg ...)
+(ZeroExt32to64 ...) => (MOVWUreg ...)
+
+(Cvt32to32F ...) => (FCVTSW ...)
+(Cvt32to64F ...) => (FCVTDW ...)
+(Cvt64to32F ...) => (FCVTSL ...)
+(Cvt64to64F ...) => (FCVTDL ...)
+
+(Cvt32Fto32 ...) => (FCVTWS ...)
+(Cvt32Fto64 ...) => (FCVTLS ...)
+(Cvt64Fto32 ...) => (FCVTWD ...)
+(Cvt64Fto64 ...) => (FCVTLD ...)
+
+(Cvt32Fto64F ...) => (FCVTDS ...)
+(Cvt64Fto32F ...) => (FCVTSD ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round32F ...) => (Copy ...)
+(Round64F ...) => (Copy ...)
+
+// From genericOps.go:
+// "0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0"
+//
+// Like other arches, we compute ~((x-1) >> 63), with arithmetic right shift.
+// For positive x, bit 63 of x-1 is always 0, so the result is -1.
+// For zero x, bit 63 of x-1 is 1, so the result is 0.
+//
+(Slicemask <t> x) => (NOT (SRAI <t> [63] (ADDI <t> [-1] x)))
+
+// Truncations
+// We ignore the unused high parts of registers, so truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Shifts
+
+// SLL only considers the bottom 6 bits of y. If y > 64, the result should
+// always be 0.
+//
+// Breaking down the operation:
+//
+// (SLL x y) generates x << (y & 63).
+//
+// If y < 64, this is the value we want. Otherwise, we want zero.
+//
+// So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
+(Lsh8x8 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh8x16 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh8x32 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh8x64 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Lsh16x8 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh16x16 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh16x32 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh16x64 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Lsh32x8 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh32x16 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh32x32 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh32x64 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
+(Lsh64x8 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh64x16 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh64x32 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh64x64 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+
+// SRL only considers the bottom 6 bits of y. If y > 64, the result should
+// always be 0. See Lsh above for a detailed description.
+(Rsh8Ux8 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh8Ux16 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh8Ux32 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh8Ux64 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Rsh16Ux8 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh16Ux16 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh16Ux32 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh16Ux64 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Rsh32Ux8 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh32Ux16 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh32Ux32 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh32Ux64 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
+(Rsh64Ux8 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh64Ux16 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh64Ux32 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh64Ux64 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+
+// SRA only considers the bottom 6 bits of y. If y > 64, the result should
+// be either 0 or -1 based on the sign bit.
+//
+// We implement this by performing the max shift (-1) if y >= 64.
+//
+// We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves
+// us with -1 (0xffff...) if y >= 64.
+//
+// We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
+// more than the 6 bits SRA cares about.
+(Rsh8x8 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh8x16 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh8x32 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh8x64 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh16x8 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh16x16 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh16x32 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh16x64 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh32x8 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh32x16 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh32x32 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh32x64 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh64x8 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh64x16 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh64x32 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh64x64 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+
+// rotates
+(RotateLeft8 <t> x (MOVBconst [c])) => (Or8 (Lsh8x64 <t> x (MOVBconst [c&7])) (Rsh8Ux64 <t> x (MOVBconst [-c&7])))
+(RotateLeft16 <t> x (MOVHconst [c])) => (Or16 (Lsh16x64 <t> x (MOVHconst [c&15])) (Rsh16Ux64 <t> x (MOVHconst [-c&15])))
+(RotateLeft32 <t> x (MOVWconst [c])) => (Or32 (Lsh32x64 <t> x (MOVWconst [c&31])) (Rsh32Ux64 <t> x (MOVWconst [-c&31])))
+(RotateLeft64 <t> x (MOVDconst [c])) => (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
+
+(Less64 ...) => (SLT ...)
+(Less32 x y) => (SLT (SignExt32to64 x) (SignExt32to64 y))
+(Less16 x y) => (SLT (SignExt16to64 x) (SignExt16to64 y))
+(Less8 x y) => (SLT (SignExt8to64 x) (SignExt8to64 y))
+(Less64U ...) => (SLTU ...)
+(Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Less8U x y) => (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Less64F ...) => (FLTD ...)
+(Less32F ...) => (FLTS ...)
+
+// Convert x <= y to !(y > x).
+(Leq64 x y) => (Not (Less64 y x))
+(Leq32 x y) => (Not (Less32 y x))
+(Leq16 x y) => (Not (Less16 y x))
+(Leq8 x y) => (Not (Less8 y x))
+(Leq64U x y) => (Not (Less64U y x))
+(Leq32U x y) => (Not (Less32U y x))
+(Leq16U x y) => (Not (Less16U y x))
+(Leq8U x y) => (Not (Less8U y x))
+(Leq64F ...) => (FLED ...)
+(Leq32F ...) => (FLES ...)
+
+(EqPtr x y) => (SEQZ (SUB <x.Type> x y))
+(Eq64 x y) => (SEQZ (SUB <x.Type> x y))
+(Eq32 x y) => (SEQZ (SUBW <x.Type> x y))
+(Eq16 x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Eq8 x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Eq64F ...) => (FEQD ...)
+(Eq32F ...) => (FEQS ...)
+
+(NeqPtr x y) => (SNEZ (SUB <x.Type> x y))
+(Neq64 x y) => (SNEZ (SUB <x.Type> x y))
+(Neq32 x y) => (SNEZ (SUBW <x.Type> x y))
+(Neq16 x y) => (SNEZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Neq8 x y) => (SNEZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Neq64F ...) => (FNED ...)
+(Neq32F ...) => (FNES ...)
+
+// Loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && ( is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && ( is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVWload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+// Stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
+
+// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
+// knows what variables are being read/written by the ops.
+(MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVBUload [off1+int32(off2)] {sym} base mem)
+(MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVBload [off1+int32(off2)] {sym} base mem)
+(MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVHUload [off1+int32(off2)] {sym} base mem)
+(MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVHload [off1+int32(off2)] {sym} base mem)
+(MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVWUload [off1+int32(off2)] {sym} base mem)
+(MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVWload [off1+int32(off2)] {sym} base mem)
+(MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVDload [off1+int32(off2)] {sym} base mem)
+
+(MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVBstore [off1+int32(off2)] {sym} base val mem)
+(MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVHstore [off1+int32(off2)] {sym} base val mem)
+(MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVWstore [off1+int32(off2)] {sym} base val mem)
+(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVDstore [off1+int32(off2)] {sym} base val mem)
+(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
+
+// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
+// with OffPtr -> ADDI.
+(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
+
+// Small zeroing
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVBconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore ptr (MOVHconst [0]) mem)
+(Zero [2] ptr mem) =>
+ (MOVBstore [1] ptr (MOVBconst [0])
+ (MOVBstore ptr (MOVBconst [0]) mem))
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore ptr (MOVWconst [0]) mem)
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] ptr (MOVHconst [0])
+ (MOVHstore ptr (MOVHconst [0]) mem))
+(Zero [4] ptr mem) =>
+ (MOVBstore [3] ptr (MOVBconst [0])
+ (MOVBstore [2] ptr (MOVBconst [0])
+ (MOVBstore [1] ptr (MOVBconst [0])
+ (MOVBstore ptr (MOVBconst [0]) mem))))
+(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore ptr (MOVDconst [0]) mem)
+(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] ptr (MOVWconst [0])
+ (MOVWstore ptr (MOVWconst [0]) mem))
+(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] ptr (MOVHconst [0])
+ (MOVHstore [4] ptr (MOVHconst [0])
+ (MOVHstore [2] ptr (MOVHconst [0])
+ (MOVHstore ptr (MOVHconst [0]) mem))))
+
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVBconst [0])
+ (MOVBstore [1] ptr (MOVBconst [0])
+ (MOVBstore ptr (MOVBconst [0]) mem)))
+(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] ptr (MOVHconst [0])
+ (MOVHstore [2] ptr (MOVHconst [0])
+ (MOVHstore ptr (MOVHconst [0]) mem)))
+(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] ptr (MOVWconst [0])
+ (MOVWstore [4] ptr (MOVWconst [0])
+ (MOVWstore ptr (MOVWconst [0]) mem)))
+(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [16] ptr (MOVDconst [0])
+ (MOVDstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem)))
+(Zero [32] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [24] ptr (MOVDconst [0])
+ (MOVDstore [16] ptr (MOVDconst [0])
+ (MOVDstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))))
+
+// Medium 8-aligned zeroing uses a Duff's device
+// 8 and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] {t} ptr mem)
+ && s%8 == 0 && s <= 8*128
+ && t.Alignment()%8 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [8 * (128 - s/8)] ptr mem)
+
+// Generic zeroing uses a loop
+(Zero [s] {t} ptr mem) =>
+ (LoweredZero [t.Alignment()]
+ ptr
+ (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
+ mem)
+
+(Convert ...) => (MOVconvert ...)
+
+// Checks
+(IsNonNil p) => (NeqPtr (MOVDconst [0]) p)
+(IsInBounds ...) => (Less64U ...)
+(IsSliceInBounds ...) => (Leq64U ...)
+
+// Trivial lowering
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// Small moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore dst (MOVHload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))
+(Move [4] dst src mem) =>
+ (MOVBstore [3] dst (MOVBload [3] src mem)
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))))
+(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore dst (MOVDload src mem) mem)
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] dst (MOVHload [6] src mem)
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))))
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem)))
+(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem)))
+(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem)))
+(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem)))
+(Move [32] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [24] dst (MOVDload [24] src mem)
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))))
+
+// Medium 8-aligned move uses a Duff's device
+// 16 and 128 are magic constants, see runtime/mkduff.go
+(Move [s] {t} dst src mem)
+ && s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+
+// Generic move uses a loop
+(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) =>
+ (LoweredMove [t.Alignment()]
+ dst
+ src
+ (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src)
+ mem)
+
+// Boolean ops; 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (SEQZ (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not ...) => (SEQZ ...)
+
+// Lowering pointer arithmetic
+// TODO: Special handling for SP offsets, like ARM
+(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr)
+(OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
+(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
+
+// TODO(jsing): Check if we actually need MOV{B,H,W}const as most platforms
+// use a single MOVDconst op.
+(Const8 ...) => (MOVBconst ...)
+(Const16 ...) => (MOVHconst ...)
+(Const32 ...) => (MOVWconst ...)
+(Const64 ...) => (MOVDconst ...)
+(Const32F [val]) => (FMVSX (MOVWconst [int32(math.Float32bits(val))]))
+(Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
+(ConstNil) => (MOVDconst [0])
+(ConstBool [val]) => (MOVBconst [int8(b2i(val))])
+
+// Convert 64 bit immediate to two 32 bit immediates, combine with add and shift.
+// The lower 32 bit immediate will be treated as signed,
+// so if it is negative, adjust for the borrow by incrementing the top half.
+// We don't have to worry about overflow from the increment,
+// because if the top half is all 1s, and int32(c) is negative,
+// then the overall constant fits in an int32.
+(MOVDconst <t> [c]) && !is32Bit(c) && int32(c) < 0 => (ADD (SLLI <t> [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))]))
+(MOVDconst <t> [c]) && !is32Bit(c) && int32(c) >= 0 => (ADD (SLLI <t> [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))]))
+
+(Addr {sym} base) => (MOVaddr {sym} [0] base)
+(LocalAddr {sym} base _) => (MOVaddr {sym} base)
+
+// Calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+
+// Atomic Intrinsics
+(AtomicLoad8 ...) => (LoweredAtomicLoad8 ...)
+(AtomicLoad32 ...) => (LoweredAtomicLoad32 ...)
+(AtomicLoad64 ...) => (LoweredAtomicLoad64 ...)
+(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
+
+(AtomicStore8 ...) => (LoweredAtomicStore8 ...)
+(AtomicStore32 ...) => (LoweredAtomicStore32 ...)
+(AtomicStore64 ...) => (LoweredAtomicStore64 ...)
+(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
+
+(AtomicAdd32 ...) => (LoweredAtomicAdd32 ...)
+(AtomicAdd64 ...) => (LoweredAtomicAdd64 ...)
+
+(AtomicCompareAndSwap32 ...) => (LoweredAtomicCas32 ...)
+(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
+
+(AtomicExchange32 ...) => (LoweredAtomicExchange32 ...)
+(AtomicExchange64 ...) => (LoweredAtomicExchange64 ...)
+
+// Conditional branches
+(If cond yes no) => (BNEZ cond yes no)
+
+// Optimizations
+
+// Absorb SEQZ/SNEZ into branch.
+(BEQZ (SEQZ x) yes no) => (BNEZ x yes no)
+(BEQZ (SNEZ x) yes no) => (BEQZ x yes no)
+(BNEZ (SEQZ x) yes no) => (BEQZ x yes no)
+(BNEZ (SNEZ x) yes no) => (BNEZ x yes no)
+
+// Convert BEQZ/BNEZ into more optimal branch conditions.
+(BEQZ (SUB x y) yes no) => (BEQ x y yes no)
+(BNEZ (SUB x y) yes no) => (BNE x y yes no)
+(BEQZ (SLT x y) yes no) => (BGE x y yes no)
+(BNEZ (SLT x y) yes no) => (BLT x y yes no)
+(BEQZ (SLTU x y) yes no) => (BGEU x y yes no)
+(BNEZ (SLTU x y) yes no) => (BLTU x y yes no)
+
+// Convert branch with zero to BEQZ/BNEZ.
+(BEQ (MOVDconst [0]) cond yes no) => (BEQZ cond yes no)
+(BEQ cond (MOVDconst [0]) yes no) => (BEQZ cond yes no)
+(BNE (MOVDconst [0]) cond yes no) => (BNEZ cond yes no)
+(BNE cond (MOVDconst [0]) yes no) => (BNEZ cond yes no)
+
+// Store zero
+(MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
+
+// Avoid sign/zero extension for consts.
+(MOVBreg (MOVBconst [c])) => (MOVDconst [int64(c)])
+(MOVHreg (MOVBconst [c])) => (MOVDconst [int64(c)])
+(MOVHreg (MOVHconst [c])) => (MOVDconst [int64(c)])
+(MOVWreg (MOVBconst [c])) => (MOVDconst [int64(c)])
+(MOVWreg (MOVHconst [c])) => (MOVDconst [int64(c)])
+(MOVWreg (MOVWconst [c])) => (MOVDconst [int64(c)])
+(MOVBUreg (MOVBconst [c])) => (MOVDconst [int64(uint8(c))])
+(MOVHUreg (MOVBconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVHUreg (MOVHconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVWUreg (MOVBconst [c])) => (MOVDconst [int64(uint32(c))])
+(MOVWUreg (MOVHconst [c])) => (MOVDconst [int64(uint32(c))])
+(MOVWUreg (MOVWconst [c])) => (MOVDconst [int64(uint32(c))])
+
+// Avoid sign/zero extension after properly typed load.
+(MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWload _ _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
+
+// Fold double extensions.
+(MOVBreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVWreg _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
+
+// Do not extend before store.
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+
+// Replace extend after load with alternate load where possible.
+(MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
+(MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
+(MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <t> [off] {sym} ptr mem)
+(MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+(MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
+
+// If a register move has only 1 use, just use the same register without emitting instruction
+// MOVnop does not emit an instruction, only for ensuring the type.
+(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
+
+// Fold constant into immediate instructions where possible.
+(ADD (MOVBconst [val]) x) => (ADDI [int64(val)] x)
+(ADD (MOVHconst [val]) x) => (ADDI [int64(val)] x)
+(ADD (MOVWconst [val]) x) => (ADDI [int64(val)] x)
+(ADD (MOVDconst [val]) x) && is32Bit(val) => (ADDI [val] x)
+
+(AND (MOVBconst [val]) x) => (ANDI [int64(val)] x)
+(AND (MOVHconst [val]) x) => (ANDI [int64(val)] x)
+(AND (MOVWconst [val]) x) => (ANDI [int64(val)] x)
+(AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
+
+(OR (MOVBconst [val]) x) => (ORI [int64(val)] x)
+(OR (MOVHconst [val]) x) => (ORI [int64(val)] x)
+(OR (MOVWconst [val]) x) => (ORI [int64(val)] x)
+(OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x)
+
+(XOR (MOVBconst [val]) x) => (XORI [int64(val)] x)
+(XOR (MOVHconst [val]) x) => (XORI [int64(val)] x)
+(XOR (MOVWconst [val]) x) => (XORI [int64(val)] x)
+(XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
+
+(SLL x (MOVBconst [val])) => (SLLI [int64(val&63)] x)
+(SLL x (MOVHconst [val])) => (SLLI [int64(val&63)] x)
+(SLL x (MOVWconst [val])) => (SLLI [int64(val&63)] x)
+(SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
+
+(SRL x (MOVBconst [val])) => (SRLI [int64(val&63)] x)
+(SRL x (MOVHconst [val])) => (SRLI [int64(val&63)] x)
+(SRL x (MOVWconst [val])) => (SRLI [int64(val&63)] x)
+(SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
+
+(SRA x (MOVBconst [val])) => (SRAI [int64(val&63)] x)
+(SRA x (MOVHconst [val])) => (SRAI [int64(val&63)] x)
+(SRA x (MOVWconst [val])) => (SRAI [int64(val&63)] x)
+(SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
+
+// Convert subtraction of a const into ADDI with negative immediate, where possible.
+(SUB x (MOVBconst [val])) => (ADDI [-int64(val)] x)
+(SUB x (MOVHconst [val])) => (ADDI [-int64(val)] x)
+(SUB x (MOVWconst [val])) && is32Bit(-int64(val)) => (ADDI [-int64(val)] x)
+(SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x)
+
+// Subtraction of zero.
+(SUB x (MOVBconst [0])) => x
+(SUB x (MOVHconst [0])) => x
+(SUB x (MOVWconst [0])) => x
+(SUB x (MOVDconst [0])) => x
+
+// Subtraction of zero with sign extension.
+(SUBW x (MOVWconst [0])) => (ADDIW [0] x)
+
+// Subtraction from zero.
+(SUB (MOVBconst [0]) x) => (NEG x)
+(SUB (MOVHconst [0]) x) => (NEG x)
+(SUB (MOVWconst [0]) x) => (NEG x)
+(SUB (MOVDconst [0]) x) => (NEG x)
+
+// Subtraction from zero with sign extension.
+(SUBW (MOVDconst [0]) x) => (NEGW x)
+
+// Addition of zero.
+(ADDI [0] x) => x
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
new file mode 100644
index 0000000..f643192
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
@@ -0,0 +1,464 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "fmt"
+)
+
+// Notes:
+// - Boolean types occupy the entire register. 0=false, 1=true.
+
+// Suffixes encode the bit width of various instructions:
+//
+// D (double word) = 64 bit int
+// W (word) = 32 bit int
+// H (half word) = 16 bit int
+// B (byte) = 8 bit int
+// S (single) = 32 bit float
+// D (double) = 64 bit float
+// L = 64 bit int, used when the opcode starts with F
+
+const (
+ riscv64REG_G = 27
+ riscv64REG_CTXT = 20
+ riscv64REG_LR = 1
+ riscv64REG_SP = 2
+ riscv64REG_TP = 4
+ riscv64REG_TMP = 31
+ riscv64REG_ZERO = 0
+)
+
+func riscv64RegName(r int) string {
+ switch {
+ case r == riscv64REG_G:
+ return "g"
+ case r == riscv64REG_SP:
+ return "SP"
+ case 0 <= r && r <= 31:
+ return fmt.Sprintf("X%d", r)
+ case 32 <= r && r <= 63:
+ return fmt.Sprintf("F%d", r-32)
+ default:
+ panic(fmt.Sprintf("unknown register %d", r))
+ }
+}
+
+func init() {
+ var regNamesRISCV64 []string
+ var gpMask, fpMask, gpgMask, gpspMask, gpspsbMask, gpspsbgMask regMask
+ regNamed := make(map[string]regMask)
+
+ // Build the list of register names, creating an appropriately indexed
+ // regMask for the gp and fp registers as we go.
+ //
+ // If name is specified, use it rather than the riscv reg number.
+ addreg := func(r int, name string) regMask {
+ mask := regMask(1) << uint(len(regNamesRISCV64))
+ if name == "" {
+ name = riscv64RegName(r)
+ }
+ regNamesRISCV64 = append(regNamesRISCV64, name)
+ regNamed[name] = mask
+ return mask
+ }
+
+ // General purpose registers.
+ for r := 0; r <= 31; r++ {
+ if r == riscv64REG_LR {
+ // LR is not used by regalloc, so we skip it to leave
+ // room for pseudo-register SB.
+ continue
+ }
+
+ mask := addreg(r, "")
+
+ // Add general purpose registers to gpMask.
+ switch r {
+ // ZERO, TP and TMP are not in any gp mask.
+ case riscv64REG_ZERO, riscv64REG_TP, riscv64REG_TMP:
+ case riscv64REG_G:
+ gpgMask |= mask
+ gpspsbgMask |= mask
+ case riscv64REG_SP:
+ gpspMask |= mask
+ gpspsbMask |= mask
+ gpspsbgMask |= mask
+ default:
+ gpMask |= mask
+ gpgMask |= mask
+ gpspMask |= mask
+ gpspsbMask |= mask
+ gpspsbgMask |= mask
+ }
+ }
+
+ // Floating pointer registers.
+ for r := 32; r <= 63; r++ {
+ mask := addreg(r, "")
+ fpMask |= mask
+ }
+
+ // Pseudo-register: SB
+ mask := addreg(-1, "SB")
+ gpspsbMask |= mask
+ gpspsbgMask |= mask
+
+ if len(regNamesRISCV64) > 64 {
+ // regMask is only 64 bits.
+ panic("Too many RISCV64 registers")
+ }
+
+ regCtxt := regNamed["X20"]
+ callerSave := gpMask | fpMask | regNamed["g"]
+
+ var (
+ gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
+ gpstore0 = regInfo{inputs: []regMask{gpspsbMask}}
+ gp01 = regInfo{outputs: []regMask{gpMask}}
+ gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}
+ gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}}
+ gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}}
+ gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}}
+ gpxchg = regInfo{inputs: []regMask{gpspsbgMask, gpgMask}, outputs: []regMask{gpMask}}
+ gpcas = regInfo{inputs: []regMask{gpspsbgMask, gpgMask, gpgMask}, outputs: []regMask{gpMask}}
+
+ fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}}
+ fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}}
+ gpfp = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{fpMask}}
+ fpgp = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{gpMask}}
+ fpstore = regInfo{inputs: []regMask{gpspsbMask, fpMask, 0}}
+ fpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{fpMask}}
+ fp2gp = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{gpMask}}
+
+ call = regInfo{clobbers: callerSave}
+ callClosure = regInfo{inputs: []regMask{gpspMask, regCtxt, 0}, clobbers: callerSave}
+ callInter = regInfo{inputs: []regMask{gpMask}, clobbers: callerSave}
+ )
+
+ RISCV64ops := []opData{
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDI", argLength: 1, reg: gp11sb, asm: "ADDI", aux: "Int64"}, // arg0 + auxint
+ {name: "ADDIW", argLength: 1, reg: gp11, asm: "ADDIW", aux: "Int64"}, // 32 low bits of arg0 + auxint, sign extended to 64 bits
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0
+ {name: "NEGW", argLength: 1, reg: gp11, asm: "NEGW"}, // -arg0 of 32 bits, sign extended to 64 bits
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+ {name: "SUBW", argLength: 2, reg: gp21, asm: "SUBW"}, // 32 low bits of arg 0 - 32 low bits of arg 1, sign extended to 64 bits
+
+ // M extension. H means high (i.e., it returns the top bits of
+ // the result). U means unsigned. W means word (i.e., 32-bit).
+ {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true, typ: "Int64"}, // arg0 * arg1
+ {name: "MULW", argLength: 2, reg: gp21, asm: "MULW", commutative: true, typ: "Int32"},
+ {name: "MULH", argLength: 2, reg: gp21, asm: "MULH", commutative: true, typ: "Int64"},
+ {name: "MULHU", argLength: 2, reg: gp21, asm: "MULHU", commutative: true, typ: "UInt64"},
+ {name: "DIV", argLength: 2, reg: gp21, asm: "DIV", typ: "Int64"}, // arg0 / arg1
+ {name: "DIVU", argLength: 2, reg: gp21, asm: "DIVU", typ: "UInt64"},
+ {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"},
+ {name: "DIVUW", argLength: 2, reg: gp21, asm: "DIVUW", typ: "UInt32"},
+ {name: "REM", argLength: 2, reg: gp21, asm: "REM", typ: "Int64"}, // arg0 % arg1
+ {name: "REMU", argLength: 2, reg: gp21, asm: "REMU", typ: "UInt64"},
+ {name: "REMW", argLength: 2, reg: gp21, asm: "REMW", typ: "Int32"},
+ {name: "REMUW", argLength: 2, reg: gp21, asm: "REMUW", typ: "UInt32"},
+
+ {name: "MOVaddr", argLength: 1, reg: gp11sb, asm: "MOV", aux: "SymOff", rematerializeable: true, symEffect: "RdWr"}, // arg0 + auxint + offset encoded in aux
+ // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
+
+ {name: "MOVBconst", reg: gp01, asm: "MOV", typ: "UInt8", aux: "Int8", rematerializeable: true}, // 8 low bits of auxint
+ {name: "MOVHconst", reg: gp01, asm: "MOV", typ: "UInt16", aux: "Int16", rematerializeable: true}, // 16 low bits of auxint
+ {name: "MOVWconst", reg: gp01, asm: "MOV", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint
+ {name: "MOVDconst", reg: gp01, asm: "MOV", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
+
+ // Loads: load <size> bits from arg0+auxint+aux and extend to 64 bits; arg1=mem
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, sign extend
+ {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // 16 bits, sign extend
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // 32 bits, sign extend
+ {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOV", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // 64 bits
+ {name: "MOVBUload", argLength: 2, reg: gpload, asm: "MOVBU", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, zero extend
+ {name: "MOVHUload", argLength: 2, reg: gpload, asm: "MOVHU", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // 16 bits, zero extend
+ {name: "MOVWUload", argLength: 2, reg: gpload, asm: "MOVWU", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // 32 bits, zero extend
+
+ // Stores: store <size> lowest bits in arg1 to arg0+auxint+aux; arg2=mem
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits
+ {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
+ {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOV", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
+
+ // Stores: store <size> of zero in arg0+auxint+aux; arg1=mem
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
+ {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
+
+ // Conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
+ {name: "MOVDreg", argLength: 1, reg: gp11, asm: "MOV"}, // move from arg0
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
+
+ {name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ // Shift ops
+ {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << (aux1 & 63)
+ {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> (aux1 & 63), signed
+ {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> (aux1 & 63), unsigned
+ {name: "SLLI", argLength: 1, reg: gp11, asm: "SLLI", aux: "Int64"}, // arg0 << auxint, shift amount 0-63
+ {name: "SRAI", argLength: 1, reg: gp11, asm: "SRAI", aux: "Int64"}, // arg0 >> auxint, signed, shift amount 0-63
+ {name: "SRLI", argLength: 1, reg: gp11, asm: "SRLI", aux: "Int64"}, // arg0 >> auxint, unsigned, shift amount 0-63
+
+ // Bitwise ops
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true}, // arg0 ^ arg1
+ {name: "XORI", argLength: 1, reg: gp11, asm: "XORI", aux: "Int64"}, // arg0 ^ auxint
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1
+ {name: "ORI", argLength: 1, reg: gp11, asm: "ORI", aux: "Int64"}, // arg0 | auxint
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDI", argLength: 1, reg: gp11, asm: "ANDI", aux: "Int64"}, // arg0 & auxint
+ {name: "NOT", argLength: 1, reg: gp11, asm: "NOT"}, // ^arg0
+
+ // Generate boolean values
+ {name: "SEQZ", argLength: 1, reg: gp11, asm: "SEQZ"}, // arg0 == 0, result is 0 or 1
+ {name: "SNEZ", argLength: 1, reg: gp11, asm: "SNEZ"}, // arg0 != 0, result is 0 or 1
+ {name: "SLT", argLength: 2, reg: gp21, asm: "SLT"}, // arg0 < arg1, result is 0 or 1
+ {name: "SLTI", argLength: 1, reg: gp11, asm: "SLTI", aux: "Int64"}, // arg0 < auxint, result is 0 or 1
+ {name: "SLTU", argLength: 2, reg: gp21, asm: "SLTU"}, // arg0 < arg1, unsigned, result is 0 or 1
+ {name: "SLTIU", argLength: 1, reg: gp11, asm: "SLTIU", aux: "Int64"}, // arg0 < auxint, unsigned, result is 0 or 1
+
+ // MOVconvert converts between pointers and integers.
+ // We have a special op for this so as to not confuse GC
+ // (particularly stack maps). It takes a memory arg so it
+ // gets correctly ordered with respect to GC safepoints.
+ {name: "MOVconvert", argLength: 2, reg: gp11, asm: "MOV"}, // arg0, but converted to int/ptr as appropriate; arg1=mem
+
+ // Calls
+ {name: "CALLstatic", argLength: 1, reg: call, aux: "CallOff", call: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: callClosure, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: callInter, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // duffzero
+ // arg0 = address of memory to zero (in X10, changed as side effect)
+ // arg1 = mem
+ // auxint = offset into duffzero code to start executing
+ // X1 (link register) changed because of function call
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X10"]},
+ clobbers: regNamed["X1"] | regNamed["X10"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy
+ // arg0 = address of dst memory (in X11, changed as side effect)
+ // arg1 = address of src memory (in X10, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // X1 (link register) changed because of function call
+ // returns mem
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X11"], regNamed["X10"]},
+ clobbers: regNamed["X1"] | regNamed["X10"] | regNamed["X11"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // Generic moves and zeros
+
+ // general unaligned zeroing
+ // arg0 = address of memory to zero (in X5, changed as side effect)
+ // arg1 = address of the last element to zero (inclusive)
+ // arg2 = mem
+ // auxint = element size
+ // returns mem
+ // mov ZERO, (X5)
+ // ADD $sz, X5
+ // BGEU Rarg1, X5, -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X5"], gpMask},
+ clobbers: regNamed["X5"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ },
+
+ // general unaligned move
+ // arg0 = address of dst memory (in X5, changed as side effect)
+ // arg1 = address of src memory (in X6, changed as side effect)
+ // arg2 = address of the last element of src (can't be X7 as we clobber it before using arg2)
+ // arg3 = mem
+ // auxint = alignment
+ // clobbers X7 as a tmp register.
+ // returns mem
+ // mov (X6), X7
+ // mov X7, (X5)
+ // ADD $sz, X5
+ // ADD $sz, X6
+ // BGEU Rarg2, X5, -4(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X5"], regNamed["X6"], gpMask &^ regNamed["X7"]},
+ clobbers: regNamed["X5"] | regNamed["X6"] | regNamed["X7"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // Atomic loads.
+ // load from arg0. arg1=mem.
+ // returns <value,memory> so they can be properly ordered with other loads.
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true},
+
+ // Atomic stores.
+ // store arg1 to arg0. arg2=mem. returns memory.
+ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+
+ // Atomic exchange.
+ // store arg1 to *arg0. arg2=mem. returns <old content of *arg0, memory>.
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // Atomic add.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>.
+ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // Atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // MOV $0, Rout
+ // LR (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 3(PC)
+ // SC Rarg2, (Rarg0), Rtmp
+ // BNE Rtmp, ZERO, -3(PC)
+ // MOV $1, Rout
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // Lowering pass-throughs
+ {name: "LoweredNilCheck", argLength: 2, faultOnNilArg0: true, nilCheck: true, reg: regInfo{inputs: []regMask{gpspMask}}}, // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{regCtxt}}}, // scheduler ensures only at beginning of entry block
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary,
+ // but clobbers RA (LR) because it's a call
+ // and T6 (REG_TMP).
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{regNamed["X5"], regNamed["X6"]}, clobbers: (callerSave &^ (gpMask | regNamed["g"])) | regNamed["X1"]}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X7"], regNamed["X28"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X6"], regNamed["X7"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X5"], regNamed["X6"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+
+ // F extension.
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true, typ: "Float32"}, // arg0 + arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS", commutative: false, typ: "Float32"}, // arg0 - arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, typ: "Float32"}, // arg0 * arg1
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", commutative: false, typ: "Float32"}, // arg0 / arg1
+ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS", typ: "Float32"}, // sqrt(arg0)
+ {name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS", typ: "Float32"}, // -arg0
+ {name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"}, // reinterpret arg0 as float
+ {name: "FCVTSW", argLength: 1, reg: gpfp, asm: "FCVTSW", typ: "Float32"}, // float32(low 32 bits of arg0)
+ {name: "FCVTSL", argLength: 1, reg: gpfp, asm: "FCVTSL", typ: "Float32"}, // float32(arg0)
+ {name: "FCVTWS", argLength: 1, reg: fpgp, asm: "FCVTWS", typ: "Int32"}, // int32(arg0)
+ {name: "FCVTLS", argLength: 1, reg: fpgp, asm: "FCVTLS", typ: "Int64"}, // int64(arg0)
+ {name: "FMOVWload", argLength: 2, reg: fpload, asm: "MOVF", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load float32 from arg0+auxint+aux
+ {name: "FMOVWstore", argLength: 3, reg: fpstore, asm: "MOVF", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store float32 to arg0+auxint+aux
+ {name: "FEQS", argLength: 2, reg: fp2gp, asm: "FEQS", commutative: true}, // arg0 == arg1
+ {name: "FNES", argLength: 2, reg: fp2gp, asm: "FNES", commutative: true}, // arg0 != arg1
+ {name: "FLTS", argLength: 2, reg: fp2gp, asm: "FLTS"}, // arg0 < arg1
+ {name: "FLES", argLength: 2, reg: fp2gp, asm: "FLES"}, // arg0 <= arg1
+
+ // D extension.
+ {name: "FADDD", argLength: 2, reg: fp21, asm: "FADDD", commutative: true, typ: "Float64"}, // arg0 + arg1
+ {name: "FSUBD", argLength: 2, reg: fp21, asm: "FSUBD", commutative: false, typ: "Float64"}, // arg0 - arg1
+ {name: "FMULD", argLength: 2, reg: fp21, asm: "FMULD", commutative: true, typ: "Float64"}, // arg0 * arg1
+ {name: "FDIVD", argLength: 2, reg: fp21, asm: "FDIVD", commutative: false, typ: "Float64"}, // arg0 / arg1
+ {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD", typ: "Float64"}, // sqrt(arg0)
+ {name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD", typ: "Float64"}, // -arg0
+ {name: "FMVDX", argLength: 1, reg: gpfp, asm: "FMVDX", typ: "Float64"}, // reinterpret arg0 as float
+ {name: "FCVTDW", argLength: 1, reg: gpfp, asm: "FCVTDW", typ: "Float64"}, // float64(low 32 bits of arg0)
+ {name: "FCVTDL", argLength: 1, reg: gpfp, asm: "FCVTDL", typ: "Float64"}, // float64(arg0)
+ {name: "FCVTWD", argLength: 1, reg: fpgp, asm: "FCVTWD", typ: "Int32"}, // int32(arg0)
+ {name: "FCVTLD", argLength: 1, reg: fpgp, asm: "FCVTLD", typ: "Int64"}, // int64(arg0)
+ {name: "FCVTDS", argLength: 1, reg: fp11, asm: "FCVTDS", typ: "Float64"}, // float64(arg0)
+ {name: "FCVTSD", argLength: 1, reg: fp11, asm: "FCVTSD", typ: "Float32"}, // float32(arg0)
+ {name: "FMOVDload", argLength: 2, reg: fpload, asm: "MOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load float64 from arg0+auxint+aux
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store float6 to arg0+auxint+aux
+ {name: "FEQD", argLength: 2, reg: fp2gp, asm: "FEQD", commutative: true}, // arg0 == arg1
+ {name: "FNED", argLength: 2, reg: fp2gp, asm: "FNED", commutative: true}, // arg0 != arg1
+ {name: "FLTD", argLength: 2, reg: fp2gp, asm: "FLTD"}, // arg0 < arg1
+ {name: "FLED", argLength: 2, reg: fp2gp, asm: "FLED"}, // arg0 <= arg1
+ }
+
+ RISCV64blocks := []blockData{
+ {name: "BEQ", controls: 2},
+ {name: "BNE", controls: 2},
+ {name: "BLT", controls: 2},
+ {name: "BGE", controls: 2},
+ {name: "BLTU", controls: 2},
+ {name: "BGEU", controls: 2},
+
+ {name: "BEQZ", controls: 1},
+ {name: "BNEZ", controls: 1},
+ {name: "BLEZ", controls: 1},
+ {name: "BGEZ", controls: 1},
+ {name: "BLTZ", controls: 1},
+ {name: "BGTZ", controls: 1},
+ }
+
+ archs = append(archs, arch{
+ name: "RISCV64",
+ pkg: "cmd/internal/obj/riscv",
+ genfile: "../../riscv64/ssa.go",
+ ops: RISCV64ops,
+ blocks: RISCV64blocks,
+ regnames: regNamesRISCV64,
+ gpregmask: gpMask,
+ fpregmask: fpMask,
+ framepointerreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules
new file mode 100644
index 0000000..384f2e8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/S390X.rules
@@ -0,0 +1,1695 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(64|Ptr) ...) => (ADD ...)
+(Add(32|16|8) ...) => (ADDW ...)
+(Add32F x y) => (Select0 (FADDS x y))
+(Add64F x y) => (Select0 (FADD x y))
+
+(Sub(64|Ptr) ...) => (SUB ...)
+(Sub(32|16|8) ...) => (SUBW ...)
+(Sub32F x y) => (Select0 (FSUBS x y))
+(Sub64F x y) => (Select0 (FSUB x y))
+
+(Mul64 ...) => (MULLD ...)
+(Mul(32|16|8) ...) => (MULLW ...)
+(Mul32F ...) => (FMULS ...)
+(Mul64F ...) => (FMUL ...)
+(Mul64uhilo ...) => (MLGR ...)
+
+(Div32F ...) => (FDIVS ...)
+(Div64F ...) => (FDIV ...)
+
+(Div64 x y) => (DIVD x y)
+(Div64u ...) => (DIVDU ...)
+// DIVW/DIVWU has a 64-bit dividend and a 32-bit divisor,
+// so a sign/zero extension of the dividend is required.
+(Div32 x y) => (DIVW (MOVWreg x) y)
+(Div32u x y) => (DIVWU (MOVWZreg x) y)
+(Div16 x y) => (DIVW (MOVHreg x) (MOVHreg y))
+(Div16u x y) => (DIVWU (MOVHZreg x) (MOVHZreg y))
+(Div8 x y) => (DIVW (MOVBreg x) (MOVBreg y))
+(Div8u x y) => (DIVWU (MOVBZreg x) (MOVBZreg y))
+
+(Hmul(64|64u) ...) => (MULH(D|DU) ...)
+(Hmul32 x y) => (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
+(Hmul32u x y) => (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
+
+(Mod64 x y) => (MODD x y)
+(Mod64u ...) => (MODDU ...)
+// MODW/MODWU has a 64-bit dividend and a 32-bit divisor,
+// so a sign/zero extension of the dividend is required.
+(Mod32 x y) => (MODW (MOVWreg x) y)
+(Mod32u x y) => (MODWU (MOVWZreg x) y)
+(Mod16 x y) => (MODW (MOVHreg x) (MOVHreg y))
+(Mod16u x y) => (MODWU (MOVHZreg x) (MOVHZreg y))
+(Mod8 x y) => (MODW (MOVBreg x) (MOVBreg y))
+(Mod8u x y) => (MODWU (MOVBZreg x) (MOVBZreg y))
+
+// (x + y) / 2 with x>=y -> (x - y) / 2 + y
+(Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
+
+(And64 ...) => (AND ...)
+(And(32|16|8) ...) => (ANDW ...)
+
+(Or64 ...) => (OR ...)
+(Or(32|16|8) ...) => (ORW ...)
+
+(Xor64 ...) => (XOR ...)
+(Xor(32|16|8) ...) => (XORW ...)
+
+(Neg64 ...) => (NEG ...)
+(Neg(32|16|8) ...) => (NEGW ...)
+(Neg32F ...) => (FNEGS ...)
+(Neg64F ...) => (FNEG ...)
+
+(Com64 ...) => (NOT ...)
+(Com(32|16|8) ...) => (NOTW ...)
+(NOT x) => (XOR (MOVDconst [-1]) x)
+(NOTW x) => (XORWconst [-1] x)
+
+// Lowering boolean ops
+(AndB ...) => (ANDW ...)
+(OrB ...) => (ORW ...)
+(Not x) => (XORWconst [1] x)
+
+// Lowering pointer arithmetic
+(OffPtr [off] ptr:(SP)) => (MOVDaddr [int32(off)] ptr)
+(OffPtr [off] ptr) && is32Bit(off) => (ADDconst [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
+
+// TODO: optimize these cases?
+(Ctz64NonZero ...) => (Ctz64 ...)
+(Ctz32NonZero ...) => (Ctz32 ...)
+
+// Ctz(x) = 64 - findLeftmostOne((x-1)&^x)
+(Ctz64 <t> x) => (SUB (MOVDconst [64]) (FLOGR (AND <t> (SUBconst <t> [1] x) (NOT <t> x))))
+(Ctz32 <t> x) => (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
+
+(BitLen64 x) => (SUB (MOVDconst [64]) (FLOGR x))
+
+// POPCNT treats the input register as a vector of 8 bytes, producing
+// a population count for each individual byte. For inputs larger than
+// a single byte we therefore need to sum the individual bytes produced
+// by the POPCNT instruction. For example, the following instruction
+// sequence could be used to calculate the population count of a 4-byte
+// value:
+//
+// MOVD $0x12345678, R1 // R1=0x12345678 <-- input
+// POPCNT R1, R2 // R2=0x02030404
+// SRW $16, R2, R3 // R3=0x00000203
+// ADDW R2, R3, R4 // R4=0x02030607
+// SRW $8, R4, R5 // R5=0x00020306
+// ADDW R4, R5, R6 // R6=0x0205090d
+// MOVBZ R6, R7 // R7=0x0000000d <-- result is 13
+//
+(PopCount8 x) => (POPCNT (MOVBZreg x))
+(PopCount16 x) => (MOVBZreg (SumBytes2 (POPCNT <typ.UInt16> x)))
+(PopCount32 x) => (MOVBZreg (SumBytes4 (POPCNT <typ.UInt32> x)))
+(PopCount64 x) => (MOVBZreg (SumBytes8 (POPCNT <typ.UInt64> x)))
+
+// SumBytes{2,4,8} pseudo operations sum the values of the rightmost
+// 2, 4 or 8 bytes respectively. The result is a single byte however
+// other bytes might contain junk so a zero extension is required if
+// the desired output type is larger than 1 byte.
+(SumBytes2 x) => (ADDW (SRWconst <typ.UInt8> x [8]) x)
+(SumBytes4 x) => (SumBytes2 (ADDW <typ.UInt16> (SRWconst <typ.UInt16> x [16]) x))
+(SumBytes8 x) => (SumBytes4 (ADDW <typ.UInt32> (SRDconst <typ.UInt32> x [32]) x))
+
+(Bswap64 ...) => (MOVDBR ...)
+(Bswap32 ...) => (MOVWBR ...)
+
+// add with carry
+(Select0 (Add64carry x y c))
+ => (Select0 <typ.UInt64> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))
+(Select1 (Add64carry x y c))
+ => (Select0 <typ.UInt64> (ADDE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))))
+
+// subtract with borrow
+(Select0 (Sub64borrow x y c))
+ => (Select0 <typ.UInt64> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c))))
+(Select1 (Sub64borrow x y c))
+ => (NEG (Select0 <typ.UInt64> (SUBE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c)))))))
+
+// math package intrinsics
+(Sqrt ...) => (FSQRT ...)
+(Floor x) => (FIDBR [7] x)
+(Ceil x) => (FIDBR [6] x)
+(Trunc x) => (FIDBR [5] x)
+(RoundToEven x) => (FIDBR [4] x)
+(Round x) => (FIDBR [1] x)
+(FMA x y z) => (FMADD z x y)
+
+// Atomic loads and stores.
+// The SYNC instruction (fast-BCR-serialization) prevents store-load
+// reordering. Other sequences of memory operations (load-load,
+// store-store and load-store) are already guaranteed not to be reordered.
+(AtomicLoad(8|32|Acq32|64|Ptr) ptr mem) => (MOV(BZ|WZ|WZ|D|D)atomicload ptr mem)
+(AtomicStore(8|32|64|PtrNoWB) ptr val mem) => (SYNC (MOV(B|W|D|D)atomicstore ptr val mem))
+
+// Store-release doesn't require store-load ordering.
+(AtomicStoreRel32 ptr val mem) => (MOVWatomicstore ptr val mem)
+
+// Atomic adds.
+(AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (LAA ptr val mem))
+(AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (LAAG ptr val mem))
+(Select0 <t> (AddTupleFirst32 val tuple)) => (ADDW val (Select0 <t> tuple))
+(Select1 (AddTupleFirst32 _ tuple)) => (Select1 tuple)
+(Select0 <t> (AddTupleFirst64 val tuple)) => (ADD val (Select0 <t> tuple))
+(Select1 (AddTupleFirst64 _ tuple)) => (Select1 tuple)
+
+// Atomic exchanges.
+(AtomicExchange32 ptr val mem) => (LoweredAtomicExchange32 ptr val mem)
+(AtomicExchange64 ptr val mem) => (LoweredAtomicExchange64 ptr val mem)
+
+// Atomic compare and swap.
+(AtomicCompareAndSwap32 ptr old new_ mem) => (LoweredAtomicCas32 ptr old new_ mem)
+(AtomicCompareAndSwap64 ptr old new_ mem) => (LoweredAtomicCas64 ptr old new_ mem)
+
+// Atomic and: *(*uint8)(ptr) &= val
+//
+// Round pointer down to nearest word boundary and pad value with ones before
+// applying atomic AND operation to target word.
+//
+// *(*uint32)(ptr &^ 3) &= rotateleft(uint32(val) | 0xffffff00, ((3 << 3) ^ ((ptr & 3) << 3))
+//
+(AtomicAnd8 ptr val mem)
+ => (LANfloor
+ ptr
+ (RLL <typ.UInt32>
+ (ORWconst <typ.UInt32> val [-1<<8])
+ (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
+ mem)
+
+// Atomic or: *(*uint8)(ptr) |= val
+//
+// Round pointer down to nearest word boundary and pad value with zeros before
+// applying atomic OR operation to target word.
+//
+// *(*uint32)(ptr &^ 3) |= uint32(val) << ((3 << 3) ^ ((ptr & 3) << 3))
+//
+(AtomicOr8 ptr val mem)
+ => (LAOfloor
+ ptr
+ (SLW <typ.UInt32>
+ (MOVBZreg <typ.UInt32> val)
+ (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
+ mem)
+
+(AtomicAnd32 ...) => (LAN ...)
+(AtomicOr32 ...) => (LAO ...)
+
+// Lowering extension
+// Note: we always extend to 64 bits even though some ops don't need that many result bits.
+(SignExt8to(16|32|64) ...) => (MOVBreg ...)
+(SignExt16to(32|64) ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
+(ZeroExt16to(32|64) ...) => (MOVHZreg ...)
+(ZeroExt32to64 ...) => (MOVWZreg ...)
+
+(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
+
+// Lowering truncation
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc(16|32|64)to8 ...) => (Copy ...)
+(Trunc(32|64)to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Lowering float <-> int
+(Cvt32to32F ...) => (CEFBRA ...)
+(Cvt32to64F ...) => (CDFBRA ...)
+(Cvt64to32F ...) => (CEGBRA ...)
+(Cvt64to64F ...) => (CDGBRA ...)
+
+(Cvt32Fto32 ...) => (CFEBRA ...)
+(Cvt32Fto64 ...) => (CGEBRA ...)
+(Cvt64Fto32 ...) => (CFDBRA ...)
+(Cvt64Fto64 ...) => (CGDBRA ...)
+
+// Lowering float <-> uint
+(Cvt32Uto32F ...) => (CELFBR ...)
+(Cvt32Uto64F ...) => (CDLFBR ...)
+(Cvt64Uto32F ...) => (CELGBR ...)
+(Cvt64Uto64F ...) => (CDLGBR ...)
+
+(Cvt32Fto32U ...) => (CLFEBR ...)
+(Cvt32Fto64U ...) => (CLGEBR ...)
+(Cvt64Fto32U ...) => (CLFDBR ...)
+(Cvt64Fto64U ...) => (CLGDBR ...)
+
+// Lowering float32 <-> float64
+(Cvt32Fto64F ...) => (LDEBR ...)
+(Cvt64Fto32F ...) => (LEDBR ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
+
+// Lowering shifts
+
+// Lower bounded shifts first. No need to check shift value.
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVHZreg x) y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVBZreg x) y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVHreg x) y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVBreg x) y)
+
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+// result = shift >= 64 ? 0 : arg << shift
+(Lsh(64|32|16|8)x64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+(Lsh(64|32|16|8)x32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+(Lsh(64|32|16|8)x16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+(Lsh(64|32|16|8)x8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+
+(Rsh(64|32)Ux64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+(Rsh(64|32)Ux32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+(Rsh(64|32)Ux16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+(Rsh(64|32)Ux8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+
+(Rsh(16|8)Ux64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPUconst y [64]))
+(Rsh(16|8)Ux32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst y [64]))
+(Rsh(16|8)Ux16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+(Rsh(16|8)Ux8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to 63 (all ones) if the shift value is more than 63.
+// result = arg >> (shift >= 64 ? 63 : shift)
+(Rsh(64|32)x64 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+(Rsh(64|32)x32 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+(Rsh(64|32)x16 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+(Rsh(64|32)x8 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+
+(Rsh(16|8)x64 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+(Rsh(16|8)x32 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+(Rsh(16|8)x16 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+(Rsh(16|8)x8 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+
+// Lowering rotates
+(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+(RotateLeft32 ...) => (RLL ...)
+(RotateLeft64 ...) => (RLLG ...)
+
+// Lowering comparisons
+(Less64 x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Less32 x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Less(16|8) x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y)))
+(Less64U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+(Less32U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+(Less(16|8)U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y)))
+(Less64F x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Less32F x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+(Leq64 x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Leq32 x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Leq(16|8) x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y)))
+(Leq64U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+(Leq32U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+(Leq(16|8)U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y)))
+(Leq64F x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Leq32F x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+(Eq(64|Ptr) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Eq32 x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Eq(16|8|B) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y)))
+(Eq64F x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Eq32F x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+(Neq(64|Ptr) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Neq32 x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Neq(16|8|B) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y)))
+(Neq64F x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Neq32F x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+// Lowering loads
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && isSigned(t) => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) => (MOVWZload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && isSigned(t) => (MOVHload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) => (MOVHZload ptr mem)
+(Load <t> ptr mem) && is8BitInt(t) && isSigned(t) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || (is8BitInt(t) && !isSigned(t))) => (MOVBZload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+// Lowering stores
+// These more-specific FP versions of Store pattern should come first.
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVSstore ptr val mem)
+
+(Store {t} ptr val mem) && t.Size() == 8 => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+
+// Lowering moves
+
+// Load and store for small copies.
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
+(Move [2] dst src mem) => (MOVHstore dst (MOVHZload src mem) mem)
+(Move [4] dst src mem) => (MOVWstore dst (MOVWZload src mem) mem)
+(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
+(Move [16] dst src mem) =>
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [24] dst src mem) =>
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem)))
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBZload [2] src mem)
+ (MOVHstore dst (MOVHZload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVBstore [6] dst (MOVBZload [6] src mem)
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem)))
+
+// MVC for other moves. Use up to 4 instructions (sizes up to 1024 bytes).
+(Move [s] dst src mem) && s > 0 && s <= 256 && logLargeCopy(v, s) =>
+ (MVC [makeValAndOff32(int32(s), 0)] dst src mem)
+(Move [s] dst src mem) && s > 256 && s <= 512 && logLargeCopy(v, s) =>
+ (MVC [makeValAndOff32(int32(s)-256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem))
+(Move [s] dst src mem) && s > 512 && s <= 768 && logLargeCopy(v, s) =>
+ (MVC [makeValAndOff32(int32(s)-512, 512)] dst src (MVC [makeValAndOff32(256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem)))
+(Move [s] dst src mem) && s > 768 && s <= 1024 && logLargeCopy(v, s) =>
+ (MVC [makeValAndOff32(int32(s)-768, 768)] dst src (MVC [makeValAndOff32(256, 512)] dst src (MVC [makeValAndOff32(256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem))))
+
+// Move more than 1024 bytes using a loop.
+(Move [s] dst src mem) && s > 1024 && logLargeCopy(v, s) =>
+ (LoweredMove [s%256] dst src (ADD <src.Type> src (MOVDconst [(s/256)*256])) mem)
+
+// Lowering Zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstoreconst [0] destptr mem)
+(Zero [2] destptr mem) => (MOVHstoreconst [0] destptr mem)
+(Zero [4] destptr mem) => (MOVWstoreconst [0] destptr mem)
+(Zero [8] destptr mem) => (MOVDstoreconst [0] destptr mem)
+(Zero [3] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff32(0,2)] destptr
+ (MOVHstoreconst [0] destptr mem))
+(Zero [5] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff32(0,4)] destptr
+ (MOVWstoreconst [0] destptr mem))
+(Zero [6] destptr mem) =>
+ (MOVHstoreconst [makeValAndOff32(0,4)] destptr
+ (MOVWstoreconst [0] destptr mem))
+(Zero [7] destptr mem) =>
+ (MOVWstoreconst [makeValAndOff32(0,3)] destptr
+ (MOVWstoreconst [0] destptr mem))
+
+(Zero [s] destptr mem) && s > 0 && s <= 1024 =>
+ (CLEAR [makeValAndOff32(int32(s), 0)] destptr mem)
+
+// Zero more than 1024 bytes using a loop.
+(Zero [s] destptr mem) && s > 1024 =>
+ (LoweredZero [s%256] destptr (ADDconst <destptr.Type> destptr [(int32(s)/256)*256]) mem)
+
+// Lowering constants
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
+(Const(32|64)F ...) => (FMOV(S|D)const ...)
+(ConstNil) => (MOVDconst [0])
+(ConstBool [b]) => (MOVDconst [b2i(b)])
+
+// Lowering calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+
+// Miscellaneous
+(IsNonNil p) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
+(IsInBounds idx len) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+(IsSliceInBounds idx len) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetG ...) => (LoweredGetG ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(Addr {sym} base) => (MOVDaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVDaddr {sym} base)
+(ITab (Load ptr mem)) => (MOVDload ptr mem)
+
+// block rewrites
+(If cond yes no) => (CLIJ {s390x.LessOrGreater} (MOVBZreg <typ.Bool> cond) [0] yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// ***************************
+// Above: lowering rules
+// Below: optimizations
+// ***************************
+// TODO: Should the optimizations be a separate pass?
+
+// Note: when removing unnecessary sign/zero extensions.
+//
+// After a value is spilled it is restored using a sign- or zero-extension
+// to register-width as appropriate for its type. For example, a uint8 will
+// be restored using a MOVBZ (llgc) instruction which will zero extend the
+// 8-bit value to 64-bits.
+//
+// This is a hazard when folding sign- and zero-extensions since we need to
+// ensure not only that the value in the argument register is correctly
+// extended but also that it will still be correctly extended if it is
+// spilled and restored.
+//
+// In general this means we need type checks when the RHS of a rule is an
+// OpCopy (i.e. "(... x:(...) ...) -> x").
+
+// Merge double extensions.
+(MOV(H|HZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(W|WZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(W|WZ)reg e:(MOV(H|HZ)reg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+
+// Bypass redundant sign extensions.
+(MOV(B|BZ)reg e:(MOVBreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(B|BZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(B|BZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(H|HZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+(MOV(H|HZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+(MOV(W|WZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x)
+
+// Bypass redundant zero extensions.
+(MOV(B|BZ)reg e:(MOVBZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(B|BZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(B|BZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(H|HZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+(MOV(H|HZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+(MOV(W|WZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x)
+
+// Remove zero extensions after zero extending load.
+// Note: take care that if x is spilled it is restored correctly.
+(MOV(B|H|W)Zreg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x
+(MOV(H|W)Zreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x
+(MOVWZreg x:(MOVWZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 4) => x
+
+// Remove sign extensions after sign extending load.
+// Note: take care that if x is spilled it is restored correctly.
+(MOV(B|H|W)reg x:(MOVBload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
+(MOV(H|W)reg x:(MOVHload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
+(MOVWreg x:(MOVWload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
+
+// Remove sign extensions after zero extending load.
+// These type checks are probably unnecessary but do them anyway just in case.
+(MOV(H|W)reg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x
+(MOVWreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x
+
+// Fold sign and zero extensions into loads.
+//
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+//
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOV(B|H|W)Zreg <t> x:(MOV(B|H|W)load [o] {s} p mem))
+ && x.Uses == 1
+ && clobber(x)
+ => @x.Block (MOV(B|H|W)Zload <t> [o] {s} p mem)
+(MOV(B|H|W)reg <t> x:(MOV(B|H|W)Zload [o] {s} p mem))
+ && x.Uses == 1
+ && clobber(x)
+ => @x.Block (MOV(B|H|W)load <t> [o] {s} p mem)
+
+// Remove zero extensions after argument load.
+(MOVBZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() == 1 => x
+(MOVHZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() <= 2 => x
+(MOVWZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() <= 4 => x
+
+// Remove sign extensions after argument load.
+(MOVBreg x:(Arg <t>)) && t.IsSigned() && t.Size() == 1 => x
+(MOVHreg x:(Arg <t>)) && t.IsSigned() && t.Size() <= 2 => x
+(MOVWreg x:(Arg <t>)) && t.IsSigned() && t.Size() <= 4 => x
+
+// Fold zero extensions into constants.
+(MOVBZreg (MOVDconst [c])) => (MOVDconst [int64( uint8(c))])
+(MOVHZreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
+
+// Fold sign extensions into constants.
+(MOVBreg (MOVDconst [c])) => (MOVDconst [int64( int8(c))])
+(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
+(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
+
+// Remove zero extension of conditional move.
+// Note: only for MOVBZreg for now since it is added as part of 'if' statement lowering.
+(MOVBZreg x:(LOCGR (MOVDconst [c]) (MOVDconst [d]) _))
+ && int64(uint8(c)) == c
+ && int64(uint8(d)) == d
+ && (!x.Type.IsSigned() || x.Type.Size() > 1)
+ => x
+
+// Fold boolean tests into blocks.
+// Note: this must match If statement lowering.
+(CLIJ {s390x.LessOrGreater} (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp) [0] yes no)
+ && int32(x) != 0
+ => (BRC {d} cmp yes no)
+
+// Canonicalize BRC condition code mask by removing impossible conditions.
+// Integer comparisons cannot generate the unordered condition.
+(BRC {c} x:((CMP|CMPW|CMPU|CMPWU) _ _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no)
+(BRC {c} x:((CMP|CMPW|CMPU|CMPWU)const _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no)
+
+// Compare-and-branch.
+// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
+(BRC {c} (CMP x y) yes no) => (CGRJ {c&^s390x.Unordered} x y yes no)
+(BRC {c} (CMPW x y) yes no) => (CRJ {c&^s390x.Unordered} x y yes no)
+(BRC {c} (CMPU x y) yes no) => (CLGRJ {c&^s390x.Unordered} x y yes no)
+(BRC {c} (CMPWU x y) yes no) => (CLRJ {c&^s390x.Unordered} x y yes no)
+
+// Compare-and-branch (immediate).
+// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
+(BRC {c} (CMPconst x [y]) yes no) && y == int32( int8(y)) => (CGIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
+(BRC {c} (CMPWconst x [y]) yes no) && y == int32( int8(y)) => (CIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
+(BRC {c} (CMPUconst x [y]) yes no) && y == int32(uint8(y)) => (CLGIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
+(BRC {c} (CMPWUconst x [y]) yes no) && y == int32(uint8(y)) => (CLIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
+
+// Absorb immediate into compare-and-branch.
+(C(R|GR)J {c} x (MOVDconst [y]) yes no) && is8Bit(y) => (C(I|GI)J {c} x [ int8(y)] yes no)
+(CL(R|GR)J {c} x (MOVDconst [y]) yes no) && isU8Bit(y) => (CL(I|GI)J {c} x [uint8(y)] yes no)
+(C(R|GR)J {c} (MOVDconst [x]) y yes no) && is8Bit(x) => (C(I|GI)J {c.ReverseComparison()} y [ int8(x)] yes no)
+(CL(R|GR)J {c} (MOVDconst [x]) y yes no) && isU8Bit(x) => (CL(I|GI)J {c.ReverseComparison()} y [uint8(x)] yes no)
+
+// Prefer comparison with immediate to compare-and-branch.
+(CGRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) => (BRC {c} (CMPconst x [int32(y)]) yes no)
+(CRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) => (BRC {c} (CMPWconst x [int32(y)]) yes no)
+(CLGRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPUconst x [int32(y)]) yes no)
+(CLRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPWUconst x [int32(y)]) yes no)
+(CGRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) => (BRC {c.ReverseComparison()} (CMPconst y [int32(x)]) yes no)
+(CRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) => (BRC {c.ReverseComparison()} (CMPWconst y [int32(x)]) yes no)
+(CLGRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPUconst y [int32(x)]) yes no)
+(CLRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPWUconst y [int32(x)]) yes no)
+
+// Absorb sign/zero extensions into 32-bit compare-and-branch.
+(CIJ {c} (MOV(W|WZ)reg x) [y] yes no) => (CIJ {c} x [y] yes no)
+(CLIJ {c} (MOV(W|WZ)reg x) [y] yes no) => (CLIJ {c} x [y] yes no)
+
+// Bring out-of-range signed immediates into range by varying branch condition.
+(BRC {s390x.Less} (CMPconst x [ 128]) yes no) => (CGIJ {s390x.LessOrEqual} x [ 127] yes no)
+(BRC {s390x.Less} (CMPWconst x [ 128]) yes no) => (CIJ {s390x.LessOrEqual} x [ 127] yes no)
+(BRC {s390x.LessOrEqual} (CMPconst x [-129]) yes no) => (CGIJ {s390x.Less} x [-128] yes no)
+(BRC {s390x.LessOrEqual} (CMPWconst x [-129]) yes no) => (CIJ {s390x.Less} x [-128] yes no)
+(BRC {s390x.Greater} (CMPconst x [-129]) yes no) => (CGIJ {s390x.GreaterOrEqual} x [-128] yes no)
+(BRC {s390x.Greater} (CMPWconst x [-129]) yes no) => (CIJ {s390x.GreaterOrEqual} x [-128] yes no)
+(BRC {s390x.GreaterOrEqual} (CMPconst x [ 128]) yes no) => (CGIJ {s390x.Greater} x [ 127] yes no)
+(BRC {s390x.GreaterOrEqual} (CMPWconst x [ 128]) yes no) => (CIJ {s390x.Greater} x [ 127] yes no)
+
+// Bring out-of-range unsigned immediates into range by varying branch condition.
+(BRC {s390x.Less} (CMP(WU|U)const x [256]) yes no) => (C(L|LG)IJ {s390x.LessOrEqual} x [255] yes no)
+(BRC {s390x.GreaterOrEqual} (CMP(WU|U)const x [256]) yes no) => (C(L|LG)IJ {s390x.Greater} x [255] yes no)
+
+// Bring out-of-range immediates into range by switching signedness (only == and !=).
+(BRC {c} (CMPconst x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLGIJ {c} x [uint8(y)] yes no)
+(BRC {c} (CMPWconst x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLIJ {c} x [uint8(y)] yes no)
+(BRC {c} (CMPUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CGIJ {c} x [ int8(y)] yes no)
+(BRC {c} (CMPWUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CIJ {c} x [ int8(y)] yes no)
+
+// Fold constants into instructions.
+(ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [int32(c)] x)
+(ADDW x (MOVDconst [c])) => (ADDWconst [int32(c)] x)
+
+(SUB x (MOVDconst [c])) && is32Bit(c) => (SUBconst x [int32(c)])
+(SUB (MOVDconst [c]) x) && is32Bit(c) => (NEG (SUBconst <v.Type> x [int32(c)]))
+(SUBW x (MOVDconst [c])) => (SUBWconst x [int32(c)])
+(SUBW (MOVDconst [c]) x) => (NEGW (SUBWconst <v.Type> x [int32(c)]))
+
+(MULLD x (MOVDconst [c])) && is32Bit(c) => (MULLDconst [int32(c)] x)
+(MULLW x (MOVDconst [c])) => (MULLWconst [int32(c)] x)
+
+// NILF instructions leave the high 32 bits unchanged which is
+// equivalent to the leftmost 32 bits being set.
+// TODO(mundaym): modify the assembler to accept 64-bit values
+// and use isU32Bit(^c).
+(AND x (MOVDconst [c]))
+ && s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil
+ => (RISBGZ x {*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c))})
+(AND x (MOVDconst [c]))
+ && is32Bit(c)
+ && c < 0
+ => (ANDconst [c] x)
+(AND x (MOVDconst [c]))
+ && is32Bit(c)
+ && c >= 0
+ => (MOVWZreg (ANDWconst <typ.UInt32> [int32(c)] x))
+
+(ANDW x (MOVDconst [c])) => (ANDWconst [int32(c)] x)
+
+((AND|ANDW)const [c] ((AND|ANDW)const [d] x)) => ((AND|ANDW)const [c&d] x)
+
+((OR|XOR) x (MOVDconst [c])) && isU32Bit(c) => ((OR|XOR)const [c] x)
+((OR|XOR)W x (MOVDconst [c])) => ((OR|XOR)Wconst [int32(c)] x)
+
+// Constant shifts.
+(S(LD|RD|RAD) x (MOVDconst [c])) => (S(LD|RD|RAD)const x [uint8(c&63)])
+(S(LW|RW|RAW) x (MOVDconst [c])) && c&32 == 0 => (S(LW|RW|RAW)const x [uint8(c&31)])
+(S(LW|RW) _ (MOVDconst [c])) && c&32 != 0 => (MOVDconst [0])
+(SRAW x (MOVDconst [c])) && c&32 != 0 => (SRAWconst x [31])
+
+// Shifts only use the rightmost 6 bits of the shift value.
+(S(LD|RD|RAD|LW|RW|RAW) x (RISBGZ y {r}))
+ && r.Amount == 0
+ && r.OutMask()&63 == 63
+ => (S(LD|RD|RAD|LW|RW|RAW) x y)
+(S(LD|RD|RAD|LW|RW|RAW) x (AND (MOVDconst [c]) y))
+ => (S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+(S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst [c] y)) && c&63 == 63
+ => (S(LD|RD|RAD|LW|RW|RAW) x y)
+(SLD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLD x y)
+(SRD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRD x y)
+(SRAD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAD x y)
+(SLW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLW x y)
+(SRW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRW x y)
+(SRAW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAW x y)
+
+// Match rotate by constant.
+(RLLG x (MOVDconst [c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))})
+(RLL x (MOVDconst [c])) => (RLLconst x [uint8(c&31)])
+
+// Match rotate by constant pattern.
+((ADD|OR|XOR) (SLDconst x [c]) (SRDconst x [64-c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
+((ADD|OR|XOR)W (SLWconst x [c]) (SRWconst x [32-c])) => (RLLconst x [c])
+
+// Signed 64-bit comparison with immediate.
+(CMP x (MOVDconst [c])) && is32Bit(c) => (CMPconst x [int32(c)])
+(CMP (MOVDconst [c]) x) && is32Bit(c) => (InvertFlags (CMPconst x [int32(c)]))
+
+// Unsigned 64-bit comparison with immediate.
+(CMPU x (MOVDconst [c])) && isU32Bit(c) => (CMPUconst x [int32(c)])
+(CMPU (MOVDconst [c]) x) && isU32Bit(c) => (InvertFlags (CMPUconst x [int32(c)]))
+
+// Signed and unsigned 32-bit comparison with immediate.
+(CMP(W|WU) x (MOVDconst [c])) => (CMP(W|WU)const x [int32(c)])
+(CMP(W|WU) (MOVDconst [c]) x) => (InvertFlags (CMP(W|WU)const x [int32(c)]))
+
+// Match (x >> c) << d to 'rotate then insert selected bits [into zero]'.
+(SLDconst (SRDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63))})
+
+// Match (x << c) >> d to 'rotate then insert selected bits [into zero]'.
+(SRDconst (SLDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63))})
+
+// Absorb input zero extension into 'rotate then insert selected bits [into zero]'.
+(RISBGZ (MOVWZreg x) {r}) && r.InMerge(0xffffffff) != nil => (RISBGZ x {*r.InMerge(0xffffffff)})
+(RISBGZ (MOVHZreg x) {r}) && r.InMerge(0x0000ffff) != nil => (RISBGZ x {*r.InMerge(0x0000ffff)})
+(RISBGZ (MOVBZreg x) {r}) && r.InMerge(0x000000ff) != nil => (RISBGZ x {*r.InMerge(0x000000ff)})
+
+// Absorb 'rotate then insert selected bits [into zero]' into zero extension.
+(MOVWZreg (RISBGZ x {r})) && r.OutMerge(0xffffffff) != nil => (RISBGZ x {*r.OutMerge(0xffffffff)})
+(MOVHZreg (RISBGZ x {r})) && r.OutMerge(0x0000ffff) != nil => (RISBGZ x {*r.OutMerge(0x0000ffff)})
+(MOVBZreg (RISBGZ x {r})) && r.OutMerge(0x000000ff) != nil => (RISBGZ x {*r.OutMerge(0x000000ff)})
+
+// Absorb shift into 'rotate then insert selected bits [into zero]'.
+//
+// Any unsigned shift can be represented as a rotate and mask operation:
+//
+// x << c => RotateLeft64(x, c) & (^uint64(0) << c)
+// x >> c => RotateLeft64(x, -c) & (^uint64(0) >> c)
+//
+// Therefore when a shift is used as the input to a rotate then insert
+// selected bits instruction we can merge the two together. We just have
+// to be careful that the resultant mask is representable (non-zero and
+// contiguous). For example, assuming that x is variable and c, y and m
+// are constants, a shift followed by a rotate then insert selected bits
+// could be represented as:
+//
+// RotateLeft64(RotateLeft64(x, c) & (^uint64(0) << c), y) & m
+//
+// We can split the rotation by y into two, one rotate for x and one for
+// the mask:
+//
+// RotateLeft64(RotateLeft64(x, c), y) & (RotateLeft64(^uint64(0) << c, y)) & m
+//
+// The rotations of x by c followed by y can then be combined:
+//
+// RotateLeft64(x, c+y) & (RotateLeft64(^uint64(0) << c, y)) & m
+// ^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// rotate mask
+//
+// To perform this optimization we therefore just need to check that it
+// is valid to merge the shift mask (^(uint64(0)<<c)) into the selected
+// bits mask (i.e. that the resultant mask is non-zero and contiguous).
+//
+(RISBGZ (SLDconst x [c]) {r}) && r.InMerge(^uint64(0)<<c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)<<c)).RotateLeft(c)})
+(RISBGZ (SRDconst x [c]) {r}) && r.InMerge(^uint64(0)>>c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)>>c)).RotateLeft(-c)})
+
+// Absorb 'rotate then insert selected bits [into zero]' into left shift.
+(SLDconst (RISBGZ x {r}) [c])
+ && s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil
+ => (RISBGZ x {(*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount)})
+
+// Absorb 'rotate then insert selected bits [into zero]' into right shift.
+(SRDconst (RISBGZ x {r}) [c])
+ && s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil
+ => (RISBGZ x {(*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount)})
+
+// Merge 'rotate then insert selected bits [into zero]' instructions together.
+(RISBGZ (RISBGZ x {y}) {z})
+ && z.InMerge(y.OutMask()) != nil
+ => (RISBGZ x {(*z.InMerge(y.OutMask())).RotateLeft(y.Amount)})
+
+// Convert RISBGZ into 64-bit shift (helps CSE).
+(RISBGZ x {r}) && r.End == 63 && r.Start == -r.Amount&63 => (SRDconst x [-r.Amount&63])
+(RISBGZ x {r}) && r.Start == 0 && r.End == 63-r.Amount => (SLDconst x [r.Amount])
+
+// Optimize single bit isolation when it is known to be equivalent to
+// the most significant bit due to mask produced by arithmetic shift.
+// Simply isolate the most significant bit itself and place it in the
+// correct position.
+//
+// Example: (int64(x) >> 63) & 0x8 -> RISBGZ $60, $60, $4, Rsrc, Rdst
+(RISBGZ (SRADconst x [c]) {r})
+ && r.Start == r.End // single bit selected
+ && (r.Start+r.Amount)&63 <= c // equivalent to most significant bit of x
+ => (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+
+// Use sign/zero extend instead of RISBGZ.
+(RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x)
+(RISBGZ x {r}) && r == s390x.NewRotateParams(48, 63, 0) => (MOVHZreg x)
+(RISBGZ x {r}) && r == s390x.NewRotateParams(32, 63, 0) => (MOVWZreg x)
+
+// Use sign/zero extend instead of ANDW.
+(ANDWconst [0x00ff] x) => (MOVBZreg x)
+(ANDWconst [0xffff] x) => (MOVHZreg x)
+
+// Strength reduce multiplication to the sum (or difference) of two powers of two.
+//
+// Examples:
+// 5x -> 4x + 1x
+// 10x -> 8x + 2x
+// 120x -> 128x - 8x
+// -120x -> 8x - 128x
+//
+// We know that the rightmost bit of any positive value, once isolated, must either
+// be a power of 2 (because it is a single bit) or 0 (if the original value is 0).
+// In all of these rules we use a rightmost bit calculation to determine one operand
+// for the addition or subtraction. We then just need to calculate if the other
+// operand is a valid power of 2 before we can match the rule.
+//
+// Notes:
+// - the generic rules have already matched single powers of two so we ignore them here
+// - isPowerOfTwo32 asserts that its argument is greater than 0
+// - c&(c-1) = clear rightmost bit
+// - c&^(c-1) = isolate rightmost bit
+
+// c = 2ˣ + 2ʸ => c - 2ˣ = 2ʸ
+(MULL(D|W)const <t> x [c]) && isPowerOfTwo32(c&(c-1))
+ => ((ADD|ADDW) (SL(D|W)const <t> x [uint8(log32(c&(c-1)))])
+ (SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
+
+// c = 2ʸ - 2ˣ => c + 2ˣ = 2ʸ
+(MULL(D|W)const <t> x [c]) && isPowerOfTwo32(c+(c&^(c-1)))
+ => ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(c+(c&^(c-1))))])
+ (SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
+
+// c = 2ˣ - 2ʸ => -c + 2ˣ = 2ʸ
+(MULL(D|W)const <t> x [c]) && isPowerOfTwo32(-c+(-c&^(-c-1)))
+ => ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(-c&^(-c-1)))])
+ (SL(D|W)const <t> x [uint8(log32(-c+(-c&^(-c-1))))]))
+
+// Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them).
+(ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)
+(ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is20Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)
+(ADD idx (MOVDaddr [c] {s} ptr)) && ptr.Op != OpSB => (MOVDaddridx [c] {s} ptr idx)
+
+// fold ADDconst into MOVDaddrx
+(ADDconst [c] (MOVDaddridx [d] {s} x y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
+(MOVDaddridx [c] {s} (ADDconst [d] x) y) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
+(MOVDaddridx [c] {s} x (ADDconst [d] y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
+
+// reverse ordering of compare instruction
+(LOCGR {c} x y (InvertFlags cmp)) => (LOCGR {c.ReverseComparison()} x y cmp)
+
+// replace load from same location as preceding store with copy
+(MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
+(MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWreg x)
+(MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHreg x)
+(MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBreg x)
+(MOVWZload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWZreg x)
+(MOVHZload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHZreg x)
+(MOVBZload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBZreg x)
+(MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LGDR x)
+(FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LDGR x)
+(FMOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
+(FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
+
+// prefer FPR <-> GPR moves over combined load ops
+(MULLDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (MULLD x (LGDR <t> y))
+(ADDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (ADD x (LGDR <t> y))
+(SUBload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (SUB x (LGDR <t> y))
+(ORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (OR x (LGDR <t> y))
+(ANDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (AND x (LGDR <t> y))
+(XORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (XOR x (LGDR <t> y))
+
+// detect attempts to set/clear the sign bit
+// may need to be reworked when NIHH/OIHH are added
+(RISBGZ (LGDR <t> x) {r}) && r == s390x.NewRotateParams(1, 63, 0) => (LGDR <t> (LPDFR <x.Type> x))
+(LDGR <t> (RISBGZ x {r})) && r == s390x.NewRotateParams(1, 63, 0) => (LPDFR (LDGR <t> x))
+(OR (MOVDconst [-1<<63]) (LGDR <t> x)) => (LGDR <t> (LNDFR <x.Type> x))
+(LDGR <t> (OR (MOVDconst [-1<<63]) x)) => (LNDFR (LDGR <t> x))
+
+// detect attempts to set the sign bit with load
+(LDGR <t> x:(ORload <t1> [off] {sym} (MOVDconst [-1<<63]) ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (LNDFR <t> (LDGR <t> (MOVDload <t1> [off] {sym} ptr mem)))
+
+// detect copysign
+(OR (RISBGZ (LGDR x) {r}) (LGDR (LPDFR <t> y)))
+ && r == s390x.NewRotateParams(0, 0, 0)
+ => (LGDR (CPSDR <t> y x))
+(OR (RISBGZ (LGDR x) {r}) (MOVDconst [c]))
+ && c >= 0
+ && r == s390x.NewRotateParams(0, 0, 0)
+ => (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [math.Float64frombits(uint64(c))]) x))
+(CPSDR y (FMOVDconst [c])) && !math.Signbit(c) => (LPDFR y)
+(CPSDR y (FMOVDconst [c])) && math.Signbit(c) => (LNDFR y)
+
+// absorb negations into set/clear sign bit
+(FNEG (LPDFR x)) => (LNDFR x)
+(FNEG (LNDFR x)) => (LPDFR x)
+(FNEGS (LPDFR x)) => (LNDFR x)
+(FNEGS (LNDFR x)) => (LPDFR x)
+
+// no need to convert float32 to float64 to set/clear sign bit
+(LEDBR (LPDFR (LDEBR x))) => (LPDFR x)
+(LEDBR (LNDFR (LDEBR x))) => (LNDFR x)
+
+// remove unnecessary FPR <-> GPR moves
+(LDGR (LGDR x)) => x
+(LGDR (LDGR x)) => x
+
+// Don't extend before storing
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWZreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHZreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBZreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+
+// Fold constants into memory operations.
+// Note that this is not always a good idea because if not all the uses of
+// the ADDconst get eliminated, we still have to compute the ADDconst and we now
+// have potentially two live values (ptr and (ADDconst [off] ptr)) instead of one.
+// Nevertheless, let's do it!
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWload [off1+off2] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHload [off1+off2] {sym} ptr mem)
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBload [off1+off2] {sym} ptr mem)
+(MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWZload [off1+off2] {sym} ptr mem)
+(MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHZload [off1+off2] {sym} ptr mem)
+(MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBZload [off1+off2] {sym} ptr mem)
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSload [off1+off2] {sym} ptr mem)
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDload [off1+off2] {sym} ptr mem)
+
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBstore [off1+off2] {sym} ptr val mem)
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSstore [off1+off2] {sym} ptr val mem)
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDstore [off1+off2] {sym} ptr val mem)
+
+(ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDload [off1+off2] {sym} x ptr mem)
+(ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDWload [off1+off2] {sym} x ptr mem)
+(MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLDload [off1+off2] {sym} x ptr mem)
+(MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLWload [off1+off2] {sym} x ptr mem)
+(SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBload [off1+off2] {sym} x ptr mem)
+(SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBWload [off1+off2] {sym} x ptr mem)
+
+(ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDload [off1+off2] {sym} x ptr mem)
+(ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDWload [off1+off2] {sym} x ptr mem)
+(ORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORload [off1+off2] {sym} x ptr mem)
+(ORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORWload [off1+off2] {sym} x ptr mem)
+(XORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORload [off1+off2] {sym} x ptr mem)
+(XORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORWload [off1+off2] {sym} x ptr mem)
+
+// Fold constants into stores.
+(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
+ (MOVDstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
+ (MOVWstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
+ (MOVHstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && is20Bit(int64(off)) && ptr.Op != OpSB =>
+ (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem)
+
+// Fold address offsets into constant stores.
+(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off()+int64(off)) =>
+ (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off()+int64(off)) =>
+ (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off()+int64(off)) =>
+ (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(sc.Off()+int64(off)) =>
+ (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
+
+// Merge address calculations into loads and stores.
+// Offsets from SB must not be merged into unaligned memory accesses because
+// loads/stores using PC-relative addressing directly must be aligned to the
+// size of the target.
+(MOVDload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
+ (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
+ (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVWload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVDstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVHstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+(ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+
+(ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+
+// Cannot store constant to SB directly (no 'move relative long immediate' instructions).
+(MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVDstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVHstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+
+// MOVDaddr into MOVDaddridx
+(MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+(MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB =>
+ (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// Absorb InvertFlags into branches.
+(BRC {c} (InvertFlags cmp) yes no) => (BRC {c.ReverseComparison()} cmp yes no)
+
+// Constant comparisons.
+(CMPconst (MOVDconst [x]) [y]) && x==int64(y) => (FlagEQ)
+(CMPconst (MOVDconst [x]) [y]) && x<int64(y) => (FlagLT)
+(CMPconst (MOVDconst [x]) [y]) && x>int64(y) => (FlagGT)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)==uint64(y) => (FlagEQ)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
+
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) => (FlagLT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) => (FlagGT)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)==uint32(y) => (FlagEQ)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
+
+(CMP(W|WU)const (MOVBZreg _) [c]) && 0xff < c => (FlagLT)
+(CMP(W|WU)const (MOVHZreg _) [c]) && 0xffff < c => (FlagLT)
+
+(CMPconst (SRDconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT)
+(CMPWconst (SRWconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT)
+
+(CMPUconst (SRDconst _ [c]) [n]) && c > 0 && c < 64 && (1<<uint(64-c)) <= uint64(n) => (FlagLT)
+(CMPWUconst (SRWconst _ [c]) [n]) && c > 0 && c < 32 && (1<<uint(32-c)) <= uint32(n) => (FlagLT)
+
+(CMPWconst (ANDWconst _ [m]) [n]) && int32(m) >= 0 && int32(m) < int32(n) => (FlagLT)
+(CMPWUconst (ANDWconst _ [m]) [n]) && uint32(m) < uint32(n) => (FlagLT)
+
+(CMPconst (RISBGZ x {r}) [c]) && c > 0 && r.OutMask() < uint64(c) => (FlagLT)
+(CMPUconst (RISBGZ x {r}) [c]) && r.OutMask() < uint64(uint32(c)) => (FlagLT)
+
+// Constant compare-and-branch with immediate.
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && int64(x) == int64(y) => (First yes no)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && int64(x) < int64(y) => (First yes no)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && int64(x) > int64(y) => (First yes no)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && int32(x) == int32(y) => (First yes no)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && int32(x) < int32(y) => (First yes no)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && int32(x) > int32(y) => (First yes no)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && uint64(x) == uint64(y) => (First yes no)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && uint64(x) < uint64(y) => (First yes no)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint64(x) > uint64(y) => (First yes no)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && uint32(x) == uint32(y) => (First yes no)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && uint32(x) < uint32(y) => (First yes no)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint32(x) > uint32(y) => (First yes no)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && int64(x) == int64(y) => (First no yes)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && int64(x) < int64(y) => (First no yes)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && int64(x) > int64(y) => (First no yes)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && int32(x) == int32(y) => (First no yes)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && int32(x) < int32(y) => (First no yes)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && int32(x) > int32(y) => (First no yes)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && uint64(x) == uint64(y) => (First no yes)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && uint64(x) < uint64(y) => (First no yes)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint64(x) > uint64(y) => (First no yes)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && uint32(x) == uint32(y) => (First no yes)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && uint32(x) < uint32(y) => (First no yes)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint32(x) > uint32(y) => (First no yes)
+
+// Constant compare-and-branch with immediate when unsigned comparison with zero.
+(C(L|LG)IJ {s390x.GreaterOrEqual} _ [0] yes no) => (First yes no)
+(C(L|LG)IJ {s390x.Less} _ [0] yes no) => (First no yes)
+
+// Constant compare-and-branch when operands match.
+(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal != 0 => (First yes no)
+(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal == 0 => (First no yes)
+
+// Convert 64-bit comparisons to 32-bit comparisons and signed comparisons
+// to unsigned comparisons.
+// Helps simplify constant comparison detection.
+(CM(P|PU)const (MOV(W|WZ)reg x) [c]) => (CMP(W|WU)const x [c])
+(CM(P|P|PU|PU)const x:(MOV(H|HZ|H|HZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c])
+(CM(P|P|PU|PU)const x:(MOV(B|BZ|B|BZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c])
+(CMPconst (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0 && c >= 0 => (CMPWUconst x [c])
+(CMPUconst (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0 => (CMPWUconst x [c])
+(CMPconst x:(SRDconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPUconst x [n])
+(CMPWconst x:(SRWconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPWUconst x [n])
+
+// Absorb sign and zero extensions into 32-bit comparisons.
+(CMP(W|W|WU|WU) x (MOV(W|WZ|W|WZ)reg y)) => (CMP(W|W|WU|WU) x y)
+(CMP(W|W|WU|WU) (MOV(W|WZ|W|WZ)reg x) y) => (CMP(W|W|WU|WU) x y)
+(CMP(W|W|WU|WU)const (MOV(W|WZ|W|WZ)reg x) [c]) => (CMP(W|W|WU|WU)const x [c])
+
+// Absorb flag constants into branches.
+(BRC {c} (FlagEQ) yes no) && c&s390x.Equal != 0 => (First yes no)
+(BRC {c} (FlagLT) yes no) && c&s390x.Less != 0 => (First yes no)
+(BRC {c} (FlagGT) yes no) && c&s390x.Greater != 0 => (First yes no)
+(BRC {c} (FlagOV) yes no) && c&s390x.Unordered != 0 => (First yes no)
+
+(BRC {c} (FlagEQ) yes no) && c&s390x.Equal == 0 => (First no yes)
+(BRC {c} (FlagLT) yes no) && c&s390x.Less == 0 => (First no yes)
+(BRC {c} (FlagGT) yes no) && c&s390x.Greater == 0 => (First no yes)
+(BRC {c} (FlagOV) yes no) && c&s390x.Unordered == 0 => (First no yes)
+
+// Absorb flag constants into SETxx ops.
+(LOCGR {c} _ x (FlagEQ)) && c&s390x.Equal != 0 => x
+(LOCGR {c} _ x (FlagLT)) && c&s390x.Less != 0 => x
+(LOCGR {c} _ x (FlagGT)) && c&s390x.Greater != 0 => x
+(LOCGR {c} _ x (FlagOV)) && c&s390x.Unordered != 0 => x
+
+(LOCGR {c} x _ (FlagEQ)) && c&s390x.Equal == 0 => x
+(LOCGR {c} x _ (FlagLT)) && c&s390x.Less == 0 => x
+(LOCGR {c} x _ (FlagGT)) && c&s390x.Greater == 0 => x
+(LOCGR {c} x _ (FlagOV)) && c&s390x.Unordered == 0 => x
+
+// Remove redundant *const ops
+(ADDconst [0] x) => x
+(ADDWconst [c] x) && int32(c)==0 => x
+(SUBconst [0] x) => x
+(SUBWconst [c] x) && int32(c) == 0 => x
+(ANDconst [0] _) => (MOVDconst [0])
+(ANDWconst [c] _) && int32(c)==0 => (MOVDconst [0])
+(ANDconst [-1] x) => x
+(ANDWconst [c] x) && int32(c)==-1 => x
+(ORconst [0] x) => x
+(ORWconst [c] x) && int32(c)==0 => x
+(ORconst [-1] _) => (MOVDconst [-1])
+(ORWconst [c] _) && int32(c)==-1 => (MOVDconst [-1])
+(XORconst [0] x) => x
+(XORWconst [c] x) && int32(c)==0 => x
+
+// Shifts by zero (may be inserted during multiplication strength reduction).
+((SLD|SLW|SRD|SRW|SRAD|SRAW)const x [0]) => x
+
+// Convert constant subtracts to constant adds.
+(SUBconst [c] x) && c != -(1<<31) => (ADDconst [-c] x)
+(SUBWconst [c] x) => (ADDWconst [-int32(c)] x)
+
+// generic constant folding
+// TODO: more of this
+(ADDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d])
+(ADDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d])
+(ADDconst [c] (ADDconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDconst [c+d] x)
+(ADDWconst [c] (ADDWconst [d] x)) => (ADDWconst [int32(c+d)] x)
+(SUBconst (MOVDconst [d]) [c]) => (MOVDconst [d-int64(c)])
+(SUBconst (SUBconst x [d]) [c]) && is32Bit(-int64(c)-int64(d)) => (ADDconst [-c-d] x)
+(SRADconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)])
+(SRAWconst [c] (MOVDconst [d])) => (MOVDconst [int64(int32(d))>>uint64(c)])
+(NEG (MOVDconst [c])) => (MOVDconst [-c])
+(NEGW (MOVDconst [c])) => (MOVDconst [int64(int32(-c))])
+(MULLDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)*d])
+(MULLWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c*int32(d))])
+(AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
+(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d])
+(ANDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)&d])
+(OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
+(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d])
+(ORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)|d])
+(XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
+(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d])
+(XORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)^d])
+(LoweredRound32F x:(FMOVSconst)) => x
+(LoweredRound64F x:(FMOVDconst)) => x
+
+// generic simplifications
+// TODO: more of this
+(ADD x (NEG y)) => (SUB x y)
+(ADDW x (NEGW y)) => (SUBW x y)
+(SUB x x) => (MOVDconst [0])
+(SUBW x x) => (MOVDconst [0])
+(AND x x) => x
+(ANDW x x) => x
+(OR x x) => x
+(ORW x x) => x
+(XOR x x) => (MOVDconst [0])
+(XORW x x) => (MOVDconst [0])
+(NEG (ADDconst [c] (NEG x))) && c != -(1<<31) => (ADDconst [-c] x)
+(MOVBZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
+(MOVHZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
+(MOVBreg (ANDWconst [m] x)) && int8(m) >= 0 => (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
+(MOVHreg (ANDWconst [m] x)) && int16(m) >= 0 => (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
+
+// carry flag generation
+// (only constant fold carry of zero)
+(Select1 (ADDCconst (MOVDconst [c]) [d]))
+ && uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0
+ => (FlagEQ)
+(Select1 (ADDCconst (MOVDconst [c]) [d]))
+ && uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0
+ => (FlagLT)
+
+// borrow flag generation
+// (only constant fold borrow of zero)
+(Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ && uint64(d) <= uint64(c) && c-d == 0
+ => (FlagGT)
+(Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ && uint64(d) <= uint64(c) && c-d != 0
+ => (FlagOV)
+
+// add with carry
+(ADDE x y (FlagEQ)) => (ADDC x y)
+(ADDE x y (FlagLT)) => (ADDC x y)
+(ADDC x (MOVDconst [c])) && is16Bit(c) => (ADDCconst x [int16(c)])
+(Select0 (ADDCconst (MOVDconst [c]) [d])) => (MOVDconst [c+int64(d)])
+
+// subtract with borrow
+(SUBE x y (FlagGT)) => (SUBC x y)
+(SUBE x y (FlagOV)) => (SUBC x y)
+(Select0 (SUBC (MOVDconst [c]) (MOVDconst [d]))) => (MOVDconst [c-d])
+
+// collapse carry chain
+(ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c)))))
+ => (ADDE x y c)
+
+// collapse borrow chain
+(SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c))))))
+ => (SUBE x y c)
+
+// branch on carry
+(C(G|LG)IJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.NoCarry} carry)
+(C(G|LG)IJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.Carry} carry)
+(C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry} carry)
+(C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.NoCarry} carry)
+(C(G|LG)IJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry} carry)
+
+// branch on borrow
+(C(G|LG)IJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.NoBorrow} borrow)
+(C(G|LG)IJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.Borrow} borrow)
+(C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow} borrow)
+(C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.NoBorrow} borrow)
+(C(G|LG)IJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow} borrow)
+
+// fused multiply-add
+(Select0 (F(ADD|SUB) (FMUL y z) x)) => (FM(ADD|SUB) x y z)
+(Select0 (F(ADDS|SUBS) (FMULS y z) x)) => (FM(ADDS|SUBS) x y z)
+
+// Convert floating point comparisons against zero into 'load and test' instructions.
+(F(CMP|CMPS) x (FMOV(D|S)const [0.0])) => (LT(D|E)BR x)
+(F(CMP|CMPS) (FMOV(D|S)const [0.0]) x) => (InvertFlags (LT(D|E)BR <v.Type> x))
+
+// FSUB, FSUBS, FADD, FADDS now produce a condition code representing the
+// comparison of the result with 0.0. If a compare with zero instruction
+// (e.g. LTDBR) is following one of those instructions, we can use the
+// generated flag and remove the comparison instruction.
+// Note: when inserting Select1 ops we need to ensure they are in the
+// same block as their argument. We could also use @x.Block for this
+// but moving the flag generating value to a different block seems to
+// increase the likelihood that the flags value will have to be regenerated
+// by flagalloc which is not what we want.
+(LTDBR (Select0 x:(F(ADD|SUB) _ _))) && b == x.Block => (Select1 x)
+(LTEBR (Select0 x:(F(ADDS|SUBS) _ _))) && b == x.Block => (Select1 x)
+
+// Fold memory operations into operations.
+// Exclude global data (SB) because these instructions cannot handle relative addresses.
+// TODO(mundaym): indexed versions of these?
+((ADD|SUB|MULLD|AND|OR|XOR) <t> x g:(MOVDload [off] {sym} ptr mem))
+ && ptr.Op != OpSB
+ && is20Bit(int64(off))
+ && canMergeLoadClobber(v, g, x)
+ && clobber(g)
+ => ((ADD|SUB|MULLD|AND|OR|XOR)load <t> [off] {sym} x ptr mem)
+((ADD|SUB|MULL|AND|OR|XOR)W <t> x g:(MOVWload [off] {sym} ptr mem))
+ && ptr.Op != OpSB
+ && is20Bit(int64(off))
+ && canMergeLoadClobber(v, g, x)
+ && clobber(g)
+ => ((ADD|SUB|MULL|AND|OR|XOR)Wload <t> [off] {sym} x ptr mem)
+((ADD|SUB|MULL|AND|OR|XOR)W <t> x g:(MOVWZload [off] {sym} ptr mem))
+ && ptr.Op != OpSB
+ && is20Bit(int64(off))
+ && canMergeLoadClobber(v, g, x)
+ && clobber(g)
+ => ((ADD|SUB|MULL|AND|OR|XOR)Wload <t> [off] {sym} x ptr mem)
+
+// Combine constant stores into larger (unaligned) stores.
+// Avoid SB because constant stores to relative offsets are
+// emulated by the assembler and also can't handle unaligned offsets.
+(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && a.Off() + 1 == c.Off()
+ && clobber(x)
+ => (MOVHstoreconst [makeValAndOff32(c.Val32()&0xff | a.Val32()<<8, a.Off32())] {s} p mem)
+(MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && a.Off() + 2 == c.Off()
+ && clobber(x)
+ => (MOVWstore [a.Off32()] {s} p (MOVDconst [int64(c.Val32()&0xffff | a.Val32()<<16)]) mem)
+(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && a.Off() + 4 == c.Off()
+ && clobber(x)
+ => (MOVDstore [a.Off32()] {s} p (MOVDconst [c.Val()&0xffffffff | a.Val()<<32]) mem)
+
+// Combine stores into larger (unaligned) stores.
+// It doesn't work on global data (based on SB) because stores with relative addressing
+// require that the memory operand be aligned.
+(MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} p w0 mem)
+(MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRWconst [8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p w0:(SRWconst [j] w) x:(MOVBstore [i-1] {s} p (SRWconst [j+8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} p w0 mem)
+(MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-2] {s} p w mem)
+(MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-2] {s} p w0 mem)
+(MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRWconst [16] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-2] {s} p w mem)
+(MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-2] {s} p w0 mem)
+(MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDstore [i-4] {s} p w mem)
+(MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDstore [i-4] {s} p w0 mem)
+
+// Combine stores into larger (unaligned) stores with the bytes reversed (little endian).
+// Store-with-bytes-reversed instructions do not support relative memory addresses,
+// so these stores can't operate on global data (SB).
+(MOVBstore [i] {s} p (SRDconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHBRstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p (SRDconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRDconst [j-8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHBRstore [i-1] {s} p w0 mem)
+(MOVBstore [i] {s} p (SRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHBRstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p (SRWconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRWconst [j-8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHBRstore [i-1] {s} p w0 mem)
+(MOVHBRstore [i] {s} p (SRDconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWBRstore [i-2] {s} p w mem)
+(MOVHBRstore [i] {s} p (SRDconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRDconst [j-16] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWBRstore [i-2] {s} p w0 mem)
+(MOVHBRstore [i] {s} p (SRWconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWBRstore [i-2] {s} p w mem)
+(MOVHBRstore [i] {s} p (SRWconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRWconst [j-16] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWBRstore [i-2] {s} p w0 mem)
+(MOVWBRstore [i] {s} p (SRDconst [32] w) x:(MOVWBRstore [i-4] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDBRstore [i-4] {s} p w mem)
+(MOVWBRstore [i] {s} p (SRDconst [j] w) x:(MOVWBRstore [i-4] {s} p w0:(SRDconst [j-32] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDBRstore [i-4] {s} p w0 mem)
+
+// Combining byte loads into larger (unaligned) loads.
+
+// Big-endian loads
+
+(ORW x1:(MOVBZload [i1] {s} p mem)
+ sh:(SLWconst [8] x0:(MOVBZload [i0] {s} p mem)))
+ && i1 == i0+1
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem)
+
+(OR x1:(MOVBZload [i1] {s} p mem)
+ sh:(SLDconst [8] x0:(MOVBZload [i0] {s} p mem)))
+ && i1 == i0+1
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem)
+
+(ORW x1:(MOVHZload [i1] {s} p mem)
+ sh:(SLWconst [16] x0:(MOVHZload [i0] {s} p mem)))
+ && i1 == i0+2
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem)
+
+(OR x1:(MOVHZload [i1] {s} p mem)
+ sh:(SLDconst [16] x0:(MOVHZload [i0] {s} p mem)))
+ && i1 == i0+2
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem)
+
+(OR x1:(MOVWZload [i1] {s} p mem)
+ sh:(SLDconst [32] x0:(MOVWZload [i0] {s} p mem)))
+ && i1 == i0+4
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem)
+
+(ORW
+ s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))
+ or:(ORW
+ s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))
+ y))
+ && i1 == i0+1
+ && j1 == j0-8
+ && j1 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (ORW <v.Type> (SLWconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
+
+(OR
+ s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))
+ or:(OR
+ s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))
+ y))
+ && i1 == i0+1
+ && j1 == j0-8
+ && j1 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
+
+(OR
+ s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem))
+ or:(OR
+ s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem))
+ y))
+ && i1 == i0+2
+ && j1 == j0-16
+ && j1 % 32 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVWZload [i0] {s} p mem)) y)
+
+// Little-endian loads
+
+(ORW x0:(MOVBZload [i0] {s} p mem)
+ sh:(SLWconst [8] x1:(MOVBZload [i1] {s} p mem)))
+ && p.Op != OpSB
+ && i1 == i0+1
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem))
+
+(OR x0:(MOVBZload [i0] {s} p mem)
+ sh:(SLDconst [8] x1:(MOVBZload [i1] {s} p mem)))
+ && p.Op != OpSB
+ && i1 == i0+1
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem))
+
+(ORW r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))
+ sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))))
+ && i1 == i0+2
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem)
+
+(OR r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))
+ sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))))
+ && i1 == i0+2
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem))
+
+(OR r0:(MOVWZreg x0:(MOVWBRload [i0] {s} p mem))
+ sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRload [i1] {s} p mem))))
+ && i1 == i0+4
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem)
+
+(ORW
+ s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))
+ or:(ORW
+ s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))
+ y))
+ && p.Op != OpSB
+ && i1 == i0+1
+ && j1 == j0+8
+ && j0 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (ORW <v.Type> (SLWconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
+
+(OR
+ s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))
+ or:(OR
+ s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))
+ y))
+ && p.Op != OpSB
+ && i1 == i0+1
+ && j1 == j0+8
+ && j0 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
+
+(OR
+ s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))
+ or:(OR
+ s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)))
+ y))
+ && i1 == i0+2
+ && j1 == j0+16
+ && j0 % 32 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, r0, r1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y)
+
+// Combine stores into store multiples.
+// 32-bit
+(MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && is20Bit(int64(i)-4)
+ && clobber(x)
+ => (STM2 [i-4] {s} p w0 w1 mem)
+(MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-8)
+ && clobber(x)
+ => (STM3 [i-8] {s} p w0 w1 w2 mem)
+(MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-12)
+ && clobber(x)
+ => (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
+(STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-8)
+ && clobber(x)
+ => (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
+// 64-bit
+(MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && is20Bit(int64(i)-8)
+ && clobber(x)
+ => (STMG2 [i-8] {s} p w0 w1 mem)
+(MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-16)
+ && clobber(x)
+ => (STMG3 [i-16] {s} p w0 w1 w2 mem)
+(MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-24)
+ && clobber(x)
+ => (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
+(STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-16)
+ && clobber(x)
+ => (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
+
+// Convert 32-bit store multiples into 64-bit stores.
+(STM2 [i] {s} p (SRDconst [32] x) x mem) => (MOVDstore [i] {s} p x mem)
diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go
new file mode 100644
index 0000000..b24fd61
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go
@@ -0,0 +1,816 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - When doing sub-register operations, we try to write the whole
+// destination register to avoid a partial-register write.
+// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
+// filled by sign-extending the used portion. Users of AuxInt which interpret
+// AuxInt as unsigned (e.g. shifts) must be careful.
+// - The SB 'register' is implemented using instruction-relative addressing. This
+// places some limitations on when and how memory operands that are addressed
+// relative to SB can be used:
+//
+// 1. Pseudo-instructions do not always map to a single machine instruction when
+// using the SB 'register' to address data. This is because many machine
+// instructions do not have relative long (RL suffix) equivalents. For example,
+// ADDload, which is assembled as AG.
+//
+// 2. Loads and stores using relative addressing require the data be aligned
+// according to its size (8-bytes for double words, 4-bytes for words
+// and so on).
+//
+// We can always work around these by inserting LARL instructions (load address
+// relative long) in the assembler, but typically this results in worse code
+// generation because the address can't be re-used. Inserting instructions in the
+// assembler also means clobbering the temp register and it is a long-term goal
+// to prevent the compiler doing this so that it can be allocated as a normal
+// register.
+//
+// For more information about the z/Architecture, the instruction set and the
+// addressing modes it supports take a look at the z/Architecture Principles of
+// Operation: http://publibfp.boulder.ibm.com/epubs/pdf/dz9zr010.pdf
+//
+// Suffixes encode the bit width of pseudo-instructions.
+// D (double word) = 64 bit (frequently omitted)
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// B (byte) = 8 bit
+// S (single prec.) = 32 bit (double precision is omitted)
+
+// copied from ../../s390x/reg.go
+var regNamesS390X = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "g", // R13
+ "R14",
+ "SP", // R15
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ //pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesS390X) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesS390X {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ sp = buildReg("SP")
+ sb = buildReg("SB")
+ r0 = buildReg("R0")
+ tmp = buildReg("R11") // R11 is used as a temporary in a small number of instructions.
+
+ // R10 is reserved by the assembler.
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14")
+ gpg = gp | buildReg("g")
+ gpsp = gp | sp
+
+ // R0 is considered to contain the value 0 in address calculations.
+ ptr = gp &^ r0
+ ptrsp = ptr | sp
+ ptrspsb = ptrsp | sb
+
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15")
+ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ )
+ // Common slices of register masks
+ var (
+ gponly = []regMask{gp}
+ fponly = []regMask{fp}
+ )
+
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: []regMask{}, outputs: gponly}
+ gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
+ gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
+ gp21tmp = regInfo{inputs: []regMask{gp &^ tmp, gp &^ tmp}, outputs: []regMask{gp &^ tmp}, clobbers: tmp}
+
+ // R0 evaluates to 0 when used as the number of bits to shift
+ // so we need to exclude it from that operand.
+ sh21 = regInfo{inputs: []regMask{gp, ptr}, outputs: gponly}
+
+ addr = regInfo{inputs: []regMask{sp | sb}, outputs: gponly}
+ addridx = regInfo{inputs: []regMask{sp | sb, ptrsp}, outputs: gponly}
+
+ gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}}
+ gp1flags = regInfo{inputs: []regMask{gpsp}}
+ gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp11flags = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp2flags1flags = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+
+ gpload = regInfo{inputs: []regMask{ptrspsb, 0}, outputs: gponly}
+ gploadidx = regInfo{inputs: []regMask{ptrspsb, ptrsp, 0}, outputs: gponly}
+ gpopload = regInfo{inputs: []regMask{gp, ptrsp, 0}, outputs: gponly}
+ gpstore = regInfo{inputs: []regMask{ptrspsb, gpsp, 0}}
+ gpstoreconst = regInfo{inputs: []regMask{ptrspsb, 0}}
+ gpstoreidx = regInfo{inputs: []regMask{ptrsp, ptrsp, gpsp, 0}}
+ gpstorebr = regInfo{inputs: []regMask{ptrsp, gpsp, 0}}
+ gpstorelaa = regInfo{inputs: []regMask{ptrspsb, gpsp, 0}, outputs: gponly}
+ gpstorelab = regInfo{inputs: []regMask{r1, gpsp, 0}, clobbers: r1}
+
+ gpmvc = regInfo{inputs: []regMask{ptrsp, ptrsp, 0}}
+
+ fp01 = regInfo{inputs: []regMask{}, outputs: fponly}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly}
+ fp21clobber = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
+ fpgp = regInfo{inputs: fponly, outputs: gponly}
+ gpfp = regInfo{inputs: gponly, outputs: fponly}
+ fp11 = regInfo{inputs: fponly, outputs: fponly}
+ fp1flags = regInfo{inputs: []regMask{fp}}
+ fp11clobber = regInfo{inputs: fponly, outputs: fponly}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+
+ fpload = regInfo{inputs: []regMask{ptrspsb, 0}, outputs: fponly}
+ fploadidx = regInfo{inputs: []regMask{ptrsp, ptrsp, 0}, outputs: fponly}
+
+ fpstore = regInfo{inputs: []regMask{ptrspsb, fp, 0}}
+ fpstoreidx = regInfo{inputs: []regMask{ptrsp, ptrsp, fp, 0}}
+
+ sync = regInfo{inputs: []regMask{0}}
+
+ // LoweredAtomicCas may overwrite arg1, so force it to R0 for now.
+ cas = regInfo{inputs: []regMask{ptrsp, r0, gpsp, 0}, outputs: []regMask{gp, 0}, clobbers: r0}
+
+ // LoweredAtomicExchange overwrites the output before executing
+ // CS{,G}, so the output register must not be the same as the
+ // input register. For now we just force the output register to
+ // R0.
+ exchange = regInfo{inputs: []regMask{ptrsp, gpsp &^ r0, 0}, outputs: []regMask{r0, 0}}
+ )
+
+ var S390Xops = []opData{
+ // fp ops
+ {name: "FADDS", argLength: 2, reg: fp21clobber, typ: "(Float32,Flags)", asm: "FADDS", commutative: true, resultInArg0: true}, // fp32 arg0 + arg1
+ {name: "FADD", argLength: 2, reg: fp21clobber, typ: "(Float64,Flags)", asm: "FADD", commutative: true, resultInArg0: true}, // fp64 arg0 + arg1
+ {name: "FSUBS", argLength: 2, reg: fp21clobber, typ: "(Float32,Flags)", asm: "FSUBS", resultInArg0: true}, // fp32 arg0 - arg1
+ {name: "FSUB", argLength: 2, reg: fp21clobber, typ: "(Float64,Flags)", asm: "FSUB", resultInArg0: true}, // fp64 arg0 - arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, resultInArg0: true}, // fp32 arg0 * arg1
+ {name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true, resultInArg0: true}, // fp64 arg0 * arg1
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", resultInArg0: true}, // fp32 arg0 / arg1
+ {name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV", resultInArg0: true}, // fp64 arg0 / arg1
+ {name: "FNEGS", argLength: 1, reg: fp11clobber, asm: "FNEGS", clobberFlags: true}, // fp32 -arg0
+ {name: "FNEG", argLength: 1, reg: fp11clobber, asm: "FNEG", clobberFlags: true}, // fp64 -arg0
+ {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS", resultInArg0: true}, // fp32 arg1 * arg2 + arg0
+ {name: "FMADD", argLength: 3, reg: fp31, asm: "FMADD", resultInArg0: true}, // fp64 arg1 * arg2 + arg0
+ {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS", resultInArg0: true}, // fp32 arg1 * arg2 - arg0
+ {name: "FMSUB", argLength: 3, reg: fp31, asm: "FMSUB", resultInArg0: true}, // fp64 arg1 * arg2 - arg0
+ {name: "LPDFR", argLength: 1, reg: fp11, asm: "LPDFR"}, // fp64/fp32 set sign bit
+ {name: "LNDFR", argLength: 1, reg: fp11, asm: "LNDFR"}, // fp64/fp32 clear sign bit
+ {name: "CPSDR", argLength: 2, reg: fp21, asm: "CPSDR"}, // fp64/fp32 copy arg1 sign bit to arg0
+
+ // Round to integer, float64 only.
+ //
+ // aux | rounding mode
+ // ----+-----------------------------------
+ // 1 | round to nearest, ties away from 0
+ // 4 | round to nearest, ties to even
+ // 5 | round toward 0
+ // 6 | round toward +∞
+ // 7 | round toward -∞
+ {name: "FIDBR", argLength: 1, reg: fp11, asm: "FIDBR", aux: "Int8"},
+
+ {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp32 load
+ {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp64 load
+ {name: "FMOVSconst", reg: fp01, asm: "FMOVS", aux: "Float32", rematerializeable: true}, // fp32 constant
+ {name: "FMOVDconst", reg: fp01, asm: "FMOVD", aux: "Float64", rematerializeable: true}, // fp64 constant
+ {name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by i
+ {name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by i
+
+ {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp32 store
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp64 store
+ {name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", aux: "SymOff", symEffect: "Write"}, // fp32 indexed by i store
+ {name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by i store
+
+ // binary ops
+ {name: "ADD", argLength: 2, reg: gp21sp, asm: "ADD", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDW", argLength: 2, reg: gp21sp, asm: "ADDW", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDWconst", argLength: 1, reg: gp11sp, asm: "ADDW", aux: "Int32", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDload", argLength: 3, reg: gpopload, asm: "ADD", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + *arg1. arg2=mem
+ {name: "ADDWload", argLength: 3, reg: gpopload, asm: "ADDW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + *arg1. arg2=mem
+
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB", clobberFlags: true}, // arg0 - arg1
+ {name: "SUBW", argLength: 2, reg: gp21, asm: "SUBW", clobberFlags: true}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+ {name: "SUBWconst", argLength: 1, reg: gp11, asm: "SUBW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+ {name: "SUBload", argLength: 3, reg: gpopload, asm: "SUB", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - *arg1. arg2=mem
+ {name: "SUBWload", argLength: 3, reg: gpopload, asm: "SUBW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - *arg1. arg2=mem
+
+ {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int32", typ: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
+ {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
+ {name: "MULLDload", argLength: 3, reg: gpopload, asm: "MULLD", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem
+ {name: "MULLWload", argLength: 3, reg: gpopload, asm: "MULLW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem
+
+ {name: "MULHD", argLength: 2, reg: gp21tmp, asm: "MULHD", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "MULHDU", argLength: 2, reg: gp21tmp, asm: "MULHDU", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width
+
+ {name: "DIVD", argLength: 2, reg: gp21tmp, asm: "DIVD", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
+ {name: "DIVW", argLength: 2, reg: gp21tmp, asm: "DIVW", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
+ {name: "DIVDU", argLength: 2, reg: gp21tmp, asm: "DIVDU", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
+ {name: "DIVWU", argLength: 2, reg: gp21tmp, asm: "DIVWU", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
+
+ {name: "MODD", argLength: 2, reg: gp21tmp, asm: "MODD", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
+ {name: "MODW", argLength: 2, reg: gp21tmp, asm: "MODW", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
+
+ {name: "MODDU", argLength: 2, reg: gp21tmp, asm: "MODDU", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
+ {name: "MODWU", argLength: 2, reg: gp21tmp, asm: "MODWU", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDW", argLength: 2, reg: gp21, asm: "ANDW", commutative: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDWconst", argLength: 1, reg: gp11, asm: "ANDW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDload", argLength: 3, reg: gpopload, asm: "AND", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & *arg1. arg2=mem
+ {name: "ANDWload", argLength: 3, reg: gpopload, asm: "ANDW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & *arg1. arg2=mem
+
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORW", argLength: 2, reg: gp21, asm: "ORW", commutative: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORWconst", argLength: 1, reg: gp11, asm: "ORW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORload", argLength: 3, reg: gpopload, asm: "OR", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | *arg1. arg2=mem
+ {name: "ORWload", argLength: 3, reg: gpopload, asm: "ORW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | *arg1. arg2=mem
+
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORW", argLength: 2, reg: gp21, asm: "XORW", commutative: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORWconst", argLength: 1, reg: gp11, asm: "XORW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORload", argLength: 3, reg: gpopload, asm: "XOR", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ *arg1. arg2=mem
+ {name: "XORWload", argLength: 3, reg: gpopload, asm: "XORW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ *arg1. arg2=mem
+
+ // Arithmetic ops with carry/borrow chain.
+ //
+ // A carry is represented by a condition code of 2 or 3 (GT or OV).
+ // A borrow is represented by a condition code of 0 or 1 (EQ or LT).
+ {name: "ADDC", argLength: 2, reg: gp21flags, asm: "ADDC", typ: "(UInt64,Flags)", commutative: true}, // (arg0 + arg1, carry out)
+ {name: "ADDCconst", argLength: 1, reg: gp11flags, asm: "ADDC", typ: "(UInt64,Flags)", aux: "Int16"}, // (arg0 + auxint, carry out)
+ {name: "ADDE", argLength: 3, reg: gp2flags1flags, asm: "ADDE", typ: "(UInt64,Flags)", commutative: true, resultInArg0: true}, // (arg0 + arg1 + arg2 (carry in), carry out)
+ {name: "SUBC", argLength: 2, reg: gp21flags, asm: "SUBC", typ: "(UInt64,Flags)"}, // (arg0 - arg1, borrow out)
+ {name: "SUBE", argLength: 3, reg: gp2flags1flags, asm: "SUBE", typ: "(UInt64,Flags)", resultInArg0: true}, // (arg0 - arg1 - arg2 (borrow in), borrow out)
+
+ // Comparisons.
+ {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
+
+ {name: "CMPU", argLength: 2, reg: gp2flags, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPWU", argLength: 2, reg: gp2flags, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1
+
+ {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPUconst", argLength: 1, reg: gp1flags, asm: "CMPU", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPWUconst", argLength: 1, reg: gp1flags, asm: "CMPWU", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+
+ {name: "FCMPS", argLength: 2, reg: fp2flags, asm: "CEBR", typ: "Flags"}, // arg0 compare to arg1, f32
+ {name: "FCMP", argLength: 2, reg: fp2flags, asm: "FCMPU", typ: "Flags"}, // arg0 compare to arg1, f64
+ {name: "LTDBR", argLength: 1, reg: fp1flags, asm: "LTDBR", typ: "Flags"}, // arg0 compare to 0, f64
+ {name: "LTEBR", argLength: 1, reg: fp1flags, asm: "LTEBR", typ: "Flags"}, // arg0 compare to 0, f32
+
+ {name: "SLD", argLength: 2, reg: sh21, asm: "SLD"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "UInt8"}, // arg0 << auxint, shift amount 0-63
+ {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "UInt8"}, // arg0 << auxint, shift amount 0-31
+
+ {name: "SRD", argLength: 2, reg: sh21, asm: "SRD"}, // unsigned arg0 >> arg1, shift amount is mod 64
+ {name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned uint32(arg0) >> arg1, shift amount is mod 64
+ {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "UInt8"}, // unsigned arg0 >> auxint, shift amount 0-63
+ {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "UInt8"}, // unsigned uint32(arg0) >> auxint, shift amount 0-31
+
+ // Arithmetic shifts clobber flags.
+ {name: "SRAD", argLength: 2, reg: sh21, asm: "SRAD", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
+ {name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed int32(arg0) >> arg1, shift amount is mod 64
+ {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "UInt8", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
+ {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "UInt8", clobberFlags: true}, // signed int32(arg0) >> auxint, shift amount 0-31
+
+ // Rotate instructions.
+ // Note: no RLLGconst - use RISBGZ instead.
+ {name: "RLLG", argLength: 2, reg: sh21, asm: "RLLG"}, // arg0 rotate left arg1, rotate amount 0-63
+ {name: "RLL", argLength: 2, reg: sh21, asm: "RLL"}, // arg0 rotate left arg1, rotate amount 0-31
+ {name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "UInt8"}, // arg0 rotate left auxint, rotate amount 0-31
+
+ // Rotate then (and|or|xor|insert) selected bits instructions.
+ //
+ // Aux is an s390x.RotateParams struct containing Start, End and rotation
+ // Amount fields.
+ //
+ // arg1 is rotated left by the rotation amount then the bits from the start
+ // bit to the end bit (inclusive) are combined with arg0 using the logical
+ // operation specified. Bit indices are specified from left to right - the
+ // MSB is 0 and the LSB is 63.
+ //
+ // Examples:
+ // | aux |
+ // | instruction | start | end | amount | arg0 | arg1 | result |
+ // +-------------+-------+-----+--------+-----------------------+-----------------------+-----------------------+
+ // | RXSBG (XOR) | 0 | 1 | 0 | 0xffff_ffff_ffff_ffff | 0xffff_ffff_ffff_ffff | 0x3fff_ffff_ffff_ffff |
+ // | RXSBG (XOR) | 62 | 63 | 0 | 0xffff_ffff_ffff_ffff | 0xffff_ffff_ffff_ffff | 0xffff_ffff_ffff_fffc |
+ // | RXSBG (XOR) | 0 | 47 | 16 | 0xffff_ffff_ffff_ffff | 0x0000_0000_0000_ffff | 0xffff_ffff_0000_ffff |
+ // +-------------+-------+-----+--------+-----------------------+-----------------------+-----------------------+
+ //
+ {name: "RXSBG", argLength: 2, reg: gp21, asm: "RXSBG", resultInArg0: true, aux: "S390XRotateParams", clobberFlags: true}, // rotate then xor selected bits
+ {name: "RISBGZ", argLength: 1, reg: gp11, asm: "RISBGZ", aux: "S390XRotateParams", clobberFlags: true}, // rotate then insert selected bits [into zero]
+
+ // unary ops
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG", clobberFlags: true}, // -arg0
+ {name: "NEGW", argLength: 1, reg: gp11, asm: "NEGW", clobberFlags: true}, // -arg0
+
+ {name: "NOT", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0
+ {name: "NOTW", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0
+
+ {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0)
+
+ // Conditional register-register moves.
+ // The aux for these values is an s390x.CCMask value representing the condition code mask.
+ {name: "LOCGR", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "LOCGR", aux: "S390XCCMask"}, // load arg1 into arg0 if the condition code in arg2 matches a masked bit in aux.
+
+ {name: "MOVBreg", argLength: 1, reg: gp11sp, asm: "MOVB", typ: "Int64"}, // sign extend arg0 from int8 to int64
+ {name: "MOVBZreg", argLength: 1, reg: gp11sp, asm: "MOVBZ", typ: "UInt64"}, // zero extend arg0 from int8 to int64
+ {name: "MOVHreg", argLength: 1, reg: gp11sp, asm: "MOVH", typ: "Int64"}, // sign extend arg0 from int16 to int64
+ {name: "MOVHZreg", argLength: 1, reg: gp11sp, asm: "MOVHZ", typ: "UInt64"}, // zero extend arg0 from int16 to int64
+ {name: "MOVWreg", argLength: 1, reg: gp11sp, asm: "MOVW", typ: "Int64"}, // sign extend arg0 from int32 to int64
+ {name: "MOVWZreg", argLength: 1, reg: gp11sp, asm: "MOVWZ", typ: "UInt64"}, // zero extend arg0 from int32 to int64
+
+ {name: "MOVDconst", reg: gp01, asm: "MOVD", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
+
+ {name: "LDGR", argLength: 1, reg: gpfp, asm: "LDGR"}, // move int64 to float64 (no conversion)
+ {name: "LGDR", argLength: 1, reg: fpgp, asm: "LGDR"}, // move float64 to int64 (no conversion)
+
+ {name: "CFDBRA", argLength: 1, reg: fpgp, asm: "CFDBRA", clobberFlags: true}, // convert float64 to int32
+ {name: "CGDBRA", argLength: 1, reg: fpgp, asm: "CGDBRA", clobberFlags: true}, // convert float64 to int64
+ {name: "CFEBRA", argLength: 1, reg: fpgp, asm: "CFEBRA", clobberFlags: true}, // convert float32 to int32
+ {name: "CGEBRA", argLength: 1, reg: fpgp, asm: "CGEBRA", clobberFlags: true}, // convert float32 to int64
+ {name: "CEFBRA", argLength: 1, reg: gpfp, asm: "CEFBRA", clobberFlags: true}, // convert int32 to float32
+ {name: "CDFBRA", argLength: 1, reg: gpfp, asm: "CDFBRA", clobberFlags: true}, // convert int32 to float64
+ {name: "CEGBRA", argLength: 1, reg: gpfp, asm: "CEGBRA", clobberFlags: true}, // convert int64 to float32
+ {name: "CDGBRA", argLength: 1, reg: gpfp, asm: "CDGBRA", clobberFlags: true}, // convert int64 to float64
+ {name: "CLFEBR", argLength: 1, reg: fpgp, asm: "CLFEBR", clobberFlags: true}, // convert float32 to uint32
+ {name: "CLFDBR", argLength: 1, reg: fpgp, asm: "CLFDBR", clobberFlags: true}, // convert float64 to uint32
+ {name: "CLGEBR", argLength: 1, reg: fpgp, asm: "CLGEBR", clobberFlags: true}, // convert float32 to uint64
+ {name: "CLGDBR", argLength: 1, reg: fpgp, asm: "CLGDBR", clobberFlags: true}, // convert float64 to uint64
+ {name: "CELFBR", argLength: 1, reg: gpfp, asm: "CELFBR", clobberFlags: true}, // convert uint32 to float32
+ {name: "CDLFBR", argLength: 1, reg: gpfp, asm: "CDLFBR", clobberFlags: true}, // convert uint32 to float64
+ {name: "CELGBR", argLength: 1, reg: gpfp, asm: "CELGBR", clobberFlags: true}, // convert uint64 to float32
+ {name: "CDLGBR", argLength: 1, reg: gpfp, asm: "CDLGBR", clobberFlags: true}, // convert uint64 to float64
+
+ {name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32
+ {name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64
+
+ {name: "MOVDaddr", argLength: 1, reg: addr, aux: "SymOff", rematerializeable: true, symEffect: "Read"}, // arg0 + auxint + offset encoded in aux
+ {name: "MOVDaddridx", argLength: 2, reg: addridx, aux: "SymOff", symEffect: "Read"}, // arg0 + arg1 + auxint + aux
+
+ // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
+ {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes from arg0+auxint+aux. arg1=mem
+
+ {name: "MOVWBR", argLength: 1, reg: gp11, asm: "MOVWBR"}, // arg0 swap bytes
+ {name: "MOVDBR", argLength: 1, reg: gp11, asm: "MOVDBR"}, // arg0 swap bytes
+
+ {name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes.
+ {name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes.
+ {name: "MOVDBRload", argLength: 2, reg: gpload, asm: "MOVDBR", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVHBRstore", argLength: 3, reg: gpstorebr, asm: "MOVHBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVWBRstore", argLength: 3, reg: gpstorebr, asm: "MOVWBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVDBRstore", argLength: 3, reg: gpstorebr, asm: "MOVDBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem. Reverse bytes.
+
+ {name: "MVC", argLength: 3, reg: gpmvc, asm: "MVC", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, faultOnNilArg1: true, symEffect: "None"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size,off
+
+ // indexed loads/stores
+ {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVBloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVB", aux: "SymOff", typ: "Int8", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem. Sign extend.
+ {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVHloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVH", aux: "SymOff", typ: "Int16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Sign extend.
+ {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVWloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVW", aux: "SymOff", typ: "Int32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Sign extend.
+ {name: "MOVDloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVD", aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHBR", aux: "SymOff", typ: "Int16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWBR", aux: "SymOff", typ: "Int32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVDBR", aux: "SymOff", typ: "Int64", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVH", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVD", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVHBR", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
+ {name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVWBR", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
+ {name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVDBR", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
+
+ // For storeconst ops, the AuxInt field encodes both
+ // the value to store and an address offset of the store.
+ // Cast AuxInt to a ValAndOff to extract Val and Off fields.
+ {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem
+ {name: "MOVHstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVH", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 2 bytes of ...
+ {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 4 bytes of ...
+ {name: "MOVDstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVD", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of ...
+
+ {name: "CLEAR", argLength: 2, reg: regInfo{inputs: []regMask{ptr, 0}}, asm: "CLEAR", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Write"},
+
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{ptrsp, buildReg("R12"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{ptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Pseudo-ops
+ {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R12 (the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R12")}}, zeroWidth: true},
+ // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{ptrsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
+ // Round ops to block fused-multiply-add extraction.
+ {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+ {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary,
+ // but clobbers R14 (LR) because it's a call.
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R2"), buildReg("R3")}, clobbers: (callerSave &^ gpg) | buildReg("R14")}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+
+ // Constant condition code values. The condition code can be 0, 1, 2 or 3.
+ {name: "FlagEQ"}, // CC=0 (equal)
+ {name: "FlagLT"}, // CC=1 (less than)
+ {name: "FlagGT"}, // CC=2 (greater than)
+ {name: "FlagOV"}, // CC=3 (overflow)
+
+ // Fast-BCR-serialization to ensure store-load ordering.
+ {name: "SYNC", argLength: 1, reg: sync, asm: "SYNC", typ: "Mem"},
+
+ // Atomic loads. These are just normal loads but return <value,memory> tuples
+ // so they can be properly ordered with other loads.
+ // load from arg0+auxint+aux. arg1=mem.
+ {name: "MOVBZatomicload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVWZatomicload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVDatomicload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+
+ // Atomic stores. These are just normal stores.
+ // store arg1 to arg0+auxint+aux. arg2=mem.
+ {name: "MOVBatomicstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "Write"},
+ {name: "MOVWatomicstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "Write"},
+ {name: "MOVDatomicstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "Write"},
+
+ // Atomic adds.
+ // *(arg0+auxint+aux) += arg1. arg2=mem.
+ // Returns a tuple of <old contents of *(arg0+auxint+aux), memory>.
+ {name: "LAA", argLength: 3, reg: gpstorelaa, asm: "LAA", typ: "(UInt32,Mem)", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "LAAG", argLength: 3, reg: gpstorelaa, asm: "LAAG", typ: "(UInt64,Mem)", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "AddTupleFirst32", argLength: 2}, // arg1=tuple <x,y>. Returns <x+arg0,y>.
+ {name: "AddTupleFirst64", argLength: 2}, // arg1=tuple <x,y>. Returns <x+arg0,y>.
+
+ // Atomic bitwise operations.
+ // Note: 'floor' operations round the pointer down to the nearest word boundary
+ // which reflects how they are used in the runtime.
+ {name: "LAN", argLength: 3, reg: gpstore, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 &= arg1. arg2 = mem.
+ {name: "LANfloor", argLength: 3, reg: gpstorelab, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) &= arg1. arg2 = mem.
+ {name: "LAO", argLength: 3, reg: gpstore, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 |= arg1. arg2 = mem.
+ {name: "LAOfloor", argLength: 3, reg: gpstorelab, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) |= arg1. arg2 = mem.
+
+ // Compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *(arg0+auxint+aux) == arg1 {
+ // *(arg0+auxint+aux) = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // Note that these instructions also return the old value in arg1, but we ignore it.
+ // TODO: have these return flags instead of bool. The current system generates:
+ // CS ...
+ // MOVD $0, ret
+ // BNE 2(PC)
+ // MOVD $1, ret
+ // CMPW ret, $0
+ // BNE ...
+ // instead of just
+ // CS ...
+ // BEQ ...
+ // but we can't do that because memory-using ops can't generate flags yet
+ // (flagalloc wants to move flag-generating instructions around).
+ {name: "LoweredAtomicCas32", argLength: 4, reg: cas, asm: "CS", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "LoweredAtomicCas64", argLength: 4, reg: cas, asm: "CSG", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+
+ // Lowered atomic swaps, emulated using compare-and-swap.
+ // store arg1 to arg0+auxint+aux, arg2=mem.
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: exchange, asm: "CS", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: exchange, asm: "CSG", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+
+ // find leftmost one
+ {
+ name: "FLOGR",
+ argLength: 1,
+ reg: regInfo{inputs: gponly, outputs: []regMask{buildReg("R0")}, clobbers: buildReg("R1")},
+ asm: "FLOGR",
+ typ: "UInt64",
+ clobberFlags: true,
+ },
+
+ // population count
+ //
+ // Counts the number of ones in each byte of arg0
+ // and places the result into the corresponding byte
+ // of the result.
+ {
+ name: "POPCNT",
+ argLength: 1,
+ reg: gp11,
+ asm: "POPCNT",
+ typ: "UInt64",
+ clobberFlags: true,
+ },
+
+ // unsigned multiplication (64x64 → 128)
+ //
+ // Multiply the two 64-bit input operands together and place the 128-bit result into
+ // an even-odd register pair. The second register in the target pair also contains
+ // one of the input operands. Since we don't currently have a way to specify an
+ // even-odd register pair we hardcode this register pair as R2:R3.
+ {
+ name: "MLGR",
+ argLength: 2,
+ reg: regInfo{inputs: []regMask{gp, r3}, outputs: []regMask{r2, r3}},
+ asm: "MLGR",
+ },
+
+ // pseudo operations to sum the output of the POPCNT instruction
+ {name: "SumBytes2", argLength: 1, typ: "UInt8"}, // sum the rightmost 2 bytes in arg0 ignoring overflow
+ {name: "SumBytes4", argLength: 1, typ: "UInt8"}, // sum the rightmost 4 bytes in arg0 ignoring overflow
+ {name: "SumBytes8", argLength: 1, typ: "UInt8"}, // sum all the bytes in arg0 ignoring overflow
+
+ // store multiple
+ {
+ name: "STMG2",
+ argLength: 4,
+ reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), 0}},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMG",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STMG3",
+ argLength: 5,
+ reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), buildReg("R3"), 0}},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMG",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STMG4",
+ argLength: 6,
+ reg: regInfo{inputs: []regMask{
+ ptrsp,
+ buildReg("R1"),
+ buildReg("R2"),
+ buildReg("R3"),
+ buildReg("R4"),
+ 0,
+ }},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMG",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STM2",
+ argLength: 4,
+ reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), 0}},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMY",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STM3",
+ argLength: 5,
+ reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), buildReg("R3"), 0}},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMY",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STM4",
+ argLength: 6,
+ reg: regInfo{inputs: []regMask{
+ ptrsp,
+ buildReg("R1"),
+ buildReg("R2"),
+ buildReg("R3"),
+ buildReg("R4"),
+ 0,
+ }},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMY",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+
+ // large move
+ // auxint = remaining bytes after loop (rem)
+ // arg0 = address of dst memory (in R1, changed as a side effect)
+ // arg1 = address of src memory (in R2, changed as a side effect)
+ // arg2 = pointer to last address to move in loop + 256
+ // arg3 = mem
+ // returns mem
+ //
+ // mvc: MVC $256, 0(R2), 0(R1)
+ // MOVD $256(R1), R1
+ // MOVD $256(R2), R2
+ // CMP R2, Rarg2
+ // BNE mvc
+ // MVC $rem, 0(R2), 0(R1) // if rem > 0
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), buildReg("R2"), gpsp},
+ clobbers: buildReg("R1 R2"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // large clear
+ // auxint = remaining bytes after loop (rem)
+ // arg0 = address of dst memory (in R1, changed as a side effect)
+ // arg1 = pointer to last address to zero in loop + 256
+ // arg2 = mem
+ // returns mem
+ //
+ // clear: CLEAR $256, 0(R1)
+ // MOVD $256(R1), R1
+ // CMP R1, Rarg2
+ // BNE clear
+ // CLEAR $rem, 0(R1) // if rem > 0
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), gpsp},
+ clobbers: buildReg("R1"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ },
+ }
+
+ // All blocks on s390x have their condition code mask (s390x.CCMask) as the Aux value.
+ // The condition code mask is a 4-bit mask where each bit corresponds to a condition
+ // code value. If the value of the condition code matches a bit set in the condition
+ // code mask then the first successor is executed. Otherwise the second successor is
+ // executed.
+ //
+ // | condition code value | mask bit |
+ // +----------------------+------------+
+ // | 0 (equal) | 0b1000 (8) |
+ // | 1 (less than) | 0b0100 (4) |
+ // | 2 (greater than) | 0b0010 (2) |
+ // | 3 (unordered) | 0b0001 (1) |
+ //
+ // Note: that compare-and-branch instructions must not have bit 3 (0b0001) set.
+ var S390Xblocks = []blockData{
+ // branch on condition
+ {name: "BRC", controls: 1, aux: "S390XCCMask"}, // condition code value (flags) is Controls[0]
+
+ // compare-and-branch (register-register)
+ // - integrates comparison of Controls[0] with Controls[1]
+ // - both control values must be in general purpose registers
+ {name: "CRJ", controls: 2, aux: "S390XCCMask"}, // signed 32-bit integer comparison
+ {name: "CGRJ", controls: 2, aux: "S390XCCMask"}, // signed 64-bit integer comparison
+ {name: "CLRJ", controls: 2, aux: "S390XCCMask"}, // unsigned 32-bit integer comparison
+ {name: "CLGRJ", controls: 2, aux: "S390XCCMask"}, // unsigned 64-bit integer comparison
+
+ // compare-and-branch (register-immediate)
+ // - integrates comparison of Controls[0] with AuxInt
+ // - control value must be in a general purpose register
+ // - the AuxInt value is sign-extended for signed comparisons
+ // and zero-extended for unsigned comparisons
+ {name: "CIJ", controls: 1, aux: "S390XCCMaskInt8"}, // signed 32-bit integer comparison
+ {name: "CGIJ", controls: 1, aux: "S390XCCMaskInt8"}, // signed 64-bit integer comparison
+ {name: "CLIJ", controls: 1, aux: "S390XCCMaskUint8"}, // unsigned 32-bit integer comparison
+ {name: "CLGIJ", controls: 1, aux: "S390XCCMaskUint8"}, // unsigned 64-bit integer comparison
+ }
+
+ archs = append(archs, arch{
+ name: "S390X",
+ pkg: "cmd/internal/obj/s390x",
+ genfile: "../../s390x/ssa.go",
+ ops: S390Xops,
+ blocks: S390Xblocks,
+ regnames: regNamesS390X,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R14"]),
+ imports: []string{
+ "cmd/internal/obj/s390x",
+ },
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/Wasm.rules b/src/cmd/compile/internal/ssa/gen/Wasm.rules
new file mode 100644
index 0000000..fc45cd3
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/Wasm.rules
@@ -0,0 +1,408 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(64|32|16|8|Ptr) ...) => (I64Add ...)
+(Add(64|32)F ...) => (F(64|32)Add ...)
+
+(Sub(64|32|16|8|Ptr) ...) => (I64Sub ...)
+(Sub(64|32)F ...) => (F(64|32)Sub ...)
+
+(Mul(64|32|16|8) ...) => (I64Mul ...)
+(Mul(64|32)F ...) => (F(64|32)Mul ...)
+
+(Div64 [false] x y) => (I64DivS x y)
+(Div32 [false] x y) => (I64DivS (SignExt32to64 x) (SignExt32to64 y))
+(Div16 [false] x y) => (I64DivS (SignExt16to64 x) (SignExt16to64 y))
+(Div8 x y) => (I64DivS (SignExt8to64 x) (SignExt8to64 y))
+(Div64u ...) => (I64DivU ...)
+(Div32u x y) => (I64DivU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Div16u x y) => (I64DivU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Div8u x y) => (I64DivU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Div(64|32)F ...) => (F(64|32)Div ...)
+
+(Mod64 [false] x y) => (I64RemS x y)
+(Mod32 [false] x y) => (I64RemS (SignExt32to64 x) (SignExt32to64 y))
+(Mod16 [false] x y) => (I64RemS (SignExt16to64 x) (SignExt16to64 y))
+(Mod8 x y) => (I64RemS (SignExt8to64 x) (SignExt8to64 y))
+(Mod64u ...) => (I64RemU ...)
+(Mod32u x y) => (I64RemU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Mod16u x y) => (I64RemU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Mod8u x y) => (I64RemU (ZeroExt8to64 x) (ZeroExt8to64 y))
+
+(And(64|32|16|8|B) ...) => (I64And ...)
+
+(Or(64|32|16|8|B) ...) => (I64Or ...)
+
+(Xor(64|32|16|8) ...) => (I64Xor ...)
+
+(Neg(64|32|16|8) x) => (I64Sub (I64Const [0]) x)
+(Neg(64|32)F ...) => (F(64|32)Neg ...)
+
+(Com(64|32|16|8) x) => (I64Xor x (I64Const [-1]))
+
+(Not ...) => (I64Eqz ...)
+
+// Lowering pointer arithmetic
+(OffPtr ...) => (I64AddConst ...)
+
+// Lowering extension
+// It is unnecessary to extend loads
+(SignExt32to64 x:(I64Load32S _ _)) => x
+(SignExt16to(64|32) x:(I64Load16S _ _)) => x
+(SignExt8to(64|32|16) x:(I64Load8S _ _)) => x
+(ZeroExt32to64 x:(I64Load32U _ _)) => x
+(ZeroExt16to(64|32) x:(I64Load16U _ _)) => x
+(ZeroExt8to(64|32|16) x:(I64Load8U _ _)) => x
+(SignExt32to64 x) && objabi.GOWASM.SignExt => (I64Extend32S x)
+(SignExt8to(64|32|16) x) && objabi.GOWASM.SignExt => (I64Extend8S x)
+(SignExt16to(64|32) x) && objabi.GOWASM.SignExt => (I64Extend16S x)
+(SignExt32to64 x) => (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32]))
+(SignExt16to(64|32) x) => (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
+(SignExt8to(64|32|16) x) => (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
+(ZeroExt32to64 x) => (I64And x (I64Const [0xffffffff]))
+(ZeroExt16to(64|32) x) => (I64And x (I64Const [0xffff]))
+(ZeroExt8to(64|32|16) x) => (I64And x (I64Const [0xff]))
+
+(Slicemask x) => (I64ShrS (I64Sub (I64Const [0]) x) (I64Const [63]))
+
+// Lowering truncation
+// Because we ignore the high parts, truncates are just copies.
+(Trunc64to(32|16|8) ...) => (Copy ...)
+(Trunc32to(16|8) ...) => (Copy ...)
+(Trunc16to8 ...) => (Copy ...)
+
+// Lowering float <=> int
+(Cvt32to(64|32)F x) => (F(64|32)ConvertI64S (SignExt32to64 x))
+(Cvt64to(64|32)F ...) => (F(64|32)ConvertI64S ...)
+(Cvt32Uto(64|32)F x) => (F(64|32)ConvertI64U (ZeroExt32to64 x))
+(Cvt64Uto(64|32)F ...) => (F(64|32)ConvertI64U ...)
+
+(Cvt32Fto32 ...) => (I64TruncSatF32S ...)
+(Cvt32Fto64 ...) => (I64TruncSatF32S ...)
+(Cvt64Fto32 ...) => (I64TruncSatF64S ...)
+(Cvt64Fto64 ...) => (I64TruncSatF64S ...)
+(Cvt32Fto32U ...) => (I64TruncSatF32U ...)
+(Cvt32Fto64U ...) => (I64TruncSatF32U ...)
+(Cvt64Fto32U ...) => (I64TruncSatF64U ...)
+(Cvt64Fto64U ...) => (I64TruncSatF64U ...)
+
+(Cvt32Fto64F ...) => (F64PromoteF32 ...)
+(Cvt64Fto32F ...) => (F32DemoteF64 ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round32F ...) => (Copy ...)
+(Round64F ...) => (Copy ...)
+
+// Lowering shifts
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+
+(Lsh64x64 x y) && shiftIsBounded(v) => (I64Shl x y)
+(Lsh64x64 x (I64Const [c])) && uint64(c) < 64 => (I64Shl x (I64Const [c]))
+(Lsh64x64 x (I64Const [c])) && uint64(c) >= 64 => (I64Const [0])
+(Lsh64x64 x y) => (Select (I64Shl x y) (I64Const [0]) (I64LtU y (I64Const [64])))
+(Lsh64x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Lsh32x64 ...) => (Lsh64x64 ...)
+(Lsh32x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Lsh16x64 ...) => (Lsh64x64 ...)
+(Lsh16x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Lsh8x64 ...) => (Lsh64x64 ...)
+(Lsh8x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Rsh64Ux64 x y) && shiftIsBounded(v) => (I64ShrU x y)
+(Rsh64Ux64 x (I64Const [c])) && uint64(c) < 64 => (I64ShrU x (I64Const [c]))
+(Rsh64Ux64 x (I64Const [c])) && uint64(c) >= 64 => (I64Const [0])
+(Rsh64Ux64 x y) => (Select (I64ShrU x y) (I64Const [0]) (I64LtU y (I64Const [64])))
+(Rsh64Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Rsh32Ux64 [c] x y) => (Rsh64Ux64 [c] (ZeroExt32to64 x) y)
+(Rsh32Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt(32|16|8)to64 y))
+
+(Rsh16Ux64 [c] x y) => (Rsh64Ux64 [c] (ZeroExt16to64 x) y)
+(Rsh16Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt(32|16|8)to64 y))
+
+(Rsh8Ux64 [c] x y) => (Rsh64Ux64 [c] (ZeroExt8to64 x) y)
+(Rsh8Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt(32|16|8)to64 y))
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to (width - 1) if the shift value is >= width.
+
+(Rsh64x64 x y) && shiftIsBounded(v) => (I64ShrS x y)
+(Rsh64x64 x (I64Const [c])) && uint64(c) < 64 => (I64ShrS x (I64Const [c]))
+(Rsh64x64 x (I64Const [c])) && uint64(c) >= 64 => (I64ShrS x (I64Const [63]))
+(Rsh64x64 x y) => (I64ShrS x (Select <typ.Int64> y (I64Const [63]) (I64LtU y (I64Const [64]))))
+(Rsh64x(32|16|8) [c] x y) => (Rsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Rsh32x64 [c] x y) => (Rsh64x64 [c] (SignExt32to64 x) y)
+(Rsh32x(32|16|8) [c] x y) => (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt(32|16|8)to64 y))
+
+(Rsh16x64 [c] x y) => (Rsh64x64 [c] (SignExt16to64 x) y)
+(Rsh16x(32|16|8) [c] x y) => (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt(32|16|8)to64 y))
+
+(Rsh8x64 [c] x y) => (Rsh64x64 [c] (SignExt8to64 x) y)
+(Rsh8x(32|16|8) [c] x y) => (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt(32|16|8)to64 y))
+
+// Lowering rotates
+(RotateLeft8 <t> x (I64Const [c])) => (Or8 (Lsh8x64 <t> x (I64Const [c&7])) (Rsh8Ux64 <t> x (I64Const [-c&7])))
+(RotateLeft16 <t> x (I64Const [c])) => (Or16 (Lsh16x64 <t> x (I64Const [c&15])) (Rsh16Ux64 <t> x (I64Const [-c&15])))
+(RotateLeft32 ...) => (I32Rotl ...)
+(RotateLeft64 ...) => (I64Rotl ...)
+
+// Lowering comparisons
+(Less64 ...) => (I64LtS ...)
+(Less32 x y) => (I64LtS (SignExt32to64 x) (SignExt32to64 y))
+(Less16 x y) => (I64LtS (SignExt16to64 x) (SignExt16to64 y))
+(Less8 x y) => (I64LtS (SignExt8to64 x) (SignExt8to64 y))
+(Less64U ...) => (I64LtU ...)
+(Less32U x y) => (I64LtU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Less16U x y) => (I64LtU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Less8U x y) => (I64LtU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Less(64|32)F ...) => (F(64|32)Lt ...)
+
+(Leq64 ...) => (I64LeS ...)
+(Leq32 x y) => (I64LeS (SignExt32to64 x) (SignExt32to64 y))
+(Leq16 x y) => (I64LeS (SignExt16to64 x) (SignExt16to64 y))
+(Leq8 x y) => (I64LeS (SignExt8to64 x) (SignExt8to64 y))
+(Leq64U ...) => (I64LeU ...)
+(Leq32U x y) => (I64LeU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Leq16U x y) => (I64LeU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Leq8U x y) => (I64LeU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Leq(64|32)F ...) => (F(64|32)Le ...)
+
+(Eq64 ...) => (I64Eq ...)
+(Eq32 x y) => (I64Eq (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Eq16 x y) => (I64Eq (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Eq8 x y) => (I64Eq (ZeroExt8to64 x) (ZeroExt8to64 y))
+(EqB ...) => (I64Eq ...)
+(EqPtr ...) => (I64Eq ...)
+(Eq(64|32)F ...) => (F(64|32)Eq ...)
+
+(Neq64 ...) => (I64Ne ...)
+(Neq32 x y) => (I64Ne (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Neq16 x y) => (I64Ne (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Neq8 x y) => (I64Ne (ZeroExt8to64 x) (ZeroExt8to64 y))
+(NeqB ...) => (I64Ne ...)
+(NeqPtr ...) => (I64Ne ...)
+(Neq(64|32)F ...) => (F(64|32)Ne ...)
+
+// Lowering loads
+(Load <t> ptr mem) && is32BitFloat(t) => (F32Load ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (F64Load ptr mem)
+(Load <t> ptr mem) && t.Size() == 8 => (I64Load ptr mem)
+(Load <t> ptr mem) && t.Size() == 4 && !t.IsSigned() => (I64Load32U ptr mem)
+(Load <t> ptr mem) && t.Size() == 4 && t.IsSigned() => (I64Load32S ptr mem)
+(Load <t> ptr mem) && t.Size() == 2 && !t.IsSigned() => (I64Load16U ptr mem)
+(Load <t> ptr mem) && t.Size() == 2 && t.IsSigned() => (I64Load16S ptr mem)
+(Load <t> ptr mem) && t.Size() == 1 && !t.IsSigned() => (I64Load8U ptr mem)
+(Load <t> ptr mem) && t.Size() == 1 && t.IsSigned() => (I64Load8S ptr mem)
+
+// Lowering stores
+(Store {t} ptr val mem) && is64BitFloat(t) => (F64Store ptr val mem)
+(Store {t} ptr val mem) && is32BitFloat(t) => (F32Store ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 => (I64Store ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 => (I64Store32 ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (I64Store16 ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (I64Store8 ptr val mem)
+
+// Lowering moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (I64Store8 dst (I64Load8U src mem) mem)
+(Move [2] dst src mem) => (I64Store16 dst (I64Load16U src mem) mem)
+(Move [4] dst src mem) => (I64Store32 dst (I64Load32U src mem) mem)
+(Move [8] dst src mem) => (I64Store dst (I64Load src mem) mem)
+(Move [16] dst src mem) =>
+ (I64Store [8] dst (I64Load [8] src mem)
+ (I64Store dst (I64Load src mem) mem))
+(Move [3] dst src mem) =>
+ (I64Store8 [2] dst (I64Load8U [2] src mem)
+ (I64Store16 dst (I64Load16U src mem) mem))
+(Move [5] dst src mem) =>
+ (I64Store8 [4] dst (I64Load8U [4] src mem)
+ (I64Store32 dst (I64Load32U src mem) mem))
+(Move [6] dst src mem) =>
+ (I64Store16 [4] dst (I64Load16U [4] src mem)
+ (I64Store32 dst (I64Load32U src mem) mem))
+(Move [7] dst src mem) =>
+ (I64Store32 [3] dst (I64Load32U [3] src mem)
+ (I64Store32 dst (I64Load32U src mem) mem))
+(Move [s] dst src mem) && s > 8 && s < 16 =>
+ (I64Store [s-8] dst (I64Load [s-8] src mem)
+ (I64Store dst (I64Load src mem) mem))
+
+// Adjust moves to be a multiple of 16 bytes.
+(Move [s] dst src mem)
+ && s > 16 && s%16 != 0 && s%16 <= 8 =>
+ (Move [s-s%16]
+ (OffPtr <dst.Type> dst [s%16])
+ (OffPtr <src.Type> src [s%16])
+ (I64Store dst (I64Load src mem) mem))
+(Move [s] dst src mem)
+ && s > 16 && s%16 != 0 && s%16 > 8 =>
+ (Move [s-s%16]
+ (OffPtr <dst.Type> dst [s%16])
+ (OffPtr <src.Type> src [s%16])
+ (I64Store [8] dst (I64Load [8] src mem)
+ (I64Store dst (I64Load src mem) mem)))
+
+// Large copying uses helper.
+(Move [s] dst src mem) && s%8 == 0 && logLargeCopy(v, s) =>
+ (LoweredMove [s/8] dst src mem)
+
+// Lowering Zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (I64Store8 destptr (I64Const [0]) mem)
+(Zero [2] destptr mem) => (I64Store16 destptr (I64Const [0]) mem)
+(Zero [4] destptr mem) => (I64Store32 destptr (I64Const [0]) mem)
+(Zero [8] destptr mem) => (I64Store destptr (I64Const [0]) mem)
+
+(Zero [3] destptr mem) =>
+ (I64Store8 [2] destptr (I64Const [0])
+ (I64Store16 destptr (I64Const [0]) mem))
+(Zero [5] destptr mem) =>
+ (I64Store8 [4] destptr (I64Const [0])
+ (I64Store32 destptr (I64Const [0]) mem))
+(Zero [6] destptr mem) =>
+ (I64Store16 [4] destptr (I64Const [0])
+ (I64Store32 destptr (I64Const [0]) mem))
+(Zero [7] destptr mem) =>
+ (I64Store32 [3] destptr (I64Const [0])
+ (I64Store32 destptr (I64Const [0]) mem))
+
+// Strip off any fractional word zeroing.
+(Zero [s] destptr mem) && s%8 != 0 && s > 8 =>
+ (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
+ (I64Store destptr (I64Const [0]) mem))
+
+// Zero small numbers of words directly.
+(Zero [16] destptr mem) =>
+ (I64Store [8] destptr (I64Const [0])
+ (I64Store destptr (I64Const [0]) mem))
+(Zero [24] destptr mem) =>
+ (I64Store [16] destptr (I64Const [0])
+ (I64Store [8] destptr (I64Const [0])
+ (I64Store destptr (I64Const [0]) mem)))
+(Zero [32] destptr mem) =>
+ (I64Store [24] destptr (I64Const [0])
+ (I64Store [16] destptr (I64Const [0])
+ (I64Store [8] destptr (I64Const [0])
+ (I64Store destptr (I64Const [0]) mem))))
+
+// Large zeroing uses helper.
+(Zero [s] destptr mem) && s%8 == 0 && s > 32 =>
+ (LoweredZero [s/8] destptr mem)
+
+// Lowering constants
+(Const64 ...) => (I64Const ...)
+(Const(32|16|8) [c]) => (I64Const [int64(c)])
+(Const(64|32)F ...) => (F(64|32)Const ...)
+(ConstNil) => (I64Const [0])
+(ConstBool [c]) => (I64Const [b2i(c)])
+
+// Lowering calls
+(StaticCall ...) => (LoweredStaticCall ...)
+(ClosureCall ...) => (LoweredClosureCall ...)
+(InterCall ...) => (LoweredInterCall ...)
+
+// Miscellaneous
+(Convert ...) => (LoweredConvert ...)
+(IsNonNil p) => (I64Eqz (I64Eqz p))
+(IsInBounds ...) => (I64LtU ...)
+(IsSliceInBounds ...) => (I64LeU ...)
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(Addr {sym} base) => (LoweredAddr {sym} [0] base)
+(LocalAddr {sym} base _) => (LoweredAddr {sym} base)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+// --- Intrinsics ---
+(Sqrt ...) => (F64Sqrt ...)
+(Trunc ...) => (F64Trunc ...)
+(Ceil ...) => (F64Ceil ...)
+(Floor ...) => (F64Floor ...)
+(RoundToEven ...) => (F64Nearest ...)
+(Abs ...) => (F64Abs ...)
+(Copysign ...) => (F64Copysign ...)
+
+(Ctz64 ...) => (I64Ctz ...)
+(Ctz32 x) => (I64Ctz (I64Or x (I64Const [0x100000000])))
+(Ctz16 x) => (I64Ctz (I64Or x (I64Const [0x10000])))
+(Ctz8 x) => (I64Ctz (I64Or x (I64Const [0x100])))
+
+(Ctz(64|32|16|8)NonZero ...) => (I64Ctz ...)
+
+(BitLen64 x) => (I64Sub (I64Const [64]) (I64Clz x))
+
+(PopCount64 ...) => (I64Popcnt ...)
+(PopCount32 x) => (I64Popcnt (ZeroExt32to64 x))
+(PopCount16 x) => (I64Popcnt (ZeroExt16to64 x))
+(PopCount8 x) => (I64Popcnt (ZeroExt8to64 x))
+
+(CondSelect ...) => (Select ...)
+
+// --- Optimizations ---
+(I64Add (I64Const [x]) (I64Const [y])) => (I64Const [x + y])
+(I64Mul (I64Const [x]) (I64Const [y])) => (I64Const [x * y])
+(I64And (I64Const [x]) (I64Const [y])) => (I64Const [x & y])
+(I64Or (I64Const [x]) (I64Const [y])) => (I64Const [x | y])
+(I64Xor (I64Const [x]) (I64Const [y])) => (I64Const [x ^ y])
+(F64Add (F64Const [x]) (F64Const [y])) => (F64Const [x + y])
+(F64Mul (F64Const [x]) (F64Const [y])) && !math.IsNaN(x * y) => (F64Const [x * y])
+(I64Eq (I64Const [x]) (I64Const [y])) && x == y => (I64Const [1])
+(I64Eq (I64Const [x]) (I64Const [y])) && x != y => (I64Const [0])
+(I64Ne (I64Const [x]) (I64Const [y])) && x == y => (I64Const [0])
+(I64Ne (I64Const [x]) (I64Const [y])) && x != y => (I64Const [1])
+
+(I64Shl (I64Const [x]) (I64Const [y])) => (I64Const [x << uint64(y)])
+(I64ShrU (I64Const [x]) (I64Const [y])) => (I64Const [int64(uint64(x) >> uint64(y))])
+(I64ShrS (I64Const [x]) (I64Const [y])) => (I64Const [x >> uint64(y)])
+
+// TODO: declare these operations as commutative and get rid of these rules?
+(I64Add (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Add y (I64Const [x]))
+(I64Mul (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Mul y (I64Const [x]))
+(I64And (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64And y (I64Const [x]))
+(I64Or (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Or y (I64Const [x]))
+(I64Xor (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Xor y (I64Const [x]))
+(F64Add (F64Const [x]) y) && y.Op != OpWasmF64Const => (F64Add y (F64Const [x]))
+(F64Mul (F64Const [x]) y) && y.Op != OpWasmF64Const => (F64Mul y (F64Const [x]))
+(I64Eq (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Eq y (I64Const [x]))
+(I64Ne (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Ne y (I64Const [x]))
+
+(I64Eq x (I64Const [0])) => (I64Eqz x)
+(I64LtU (I64Const [0]) x) => (I64Eqz (I64Eqz x))
+(I64LeU x (I64Const [0])) => (I64Eqz x)
+(I64LtU x (I64Const [1])) => (I64Eqz x)
+(I64LeU (I64Const [1]) x) => (I64Eqz (I64Eqz x))
+(I64Ne x (I64Const [0])) => (I64Eqz (I64Eqz x))
+
+(I64Add x (I64Const [y])) => (I64AddConst [y] x)
+(I64AddConst [0] x) => x
+(I64Eqz (I64Eqz (I64Eqz x))) => (I64Eqz x)
+
+// folding offset into load/store
+((I64Load|I64Load32U|I64Load32S|I64Load16U|I64Load16S|I64Load8U|I64Load8S) [off] (I64AddConst [off2] ptr) mem)
+ && isU32Bit(off+off2) =>
+ ((I64Load|I64Load32U|I64Load32S|I64Load16U|I64Load16S|I64Load8U|I64Load8S) [off+off2] ptr mem)
+
+((I64Store|I64Store32|I64Store16|I64Store8) [off] (I64AddConst [off2] ptr) val mem)
+ && isU32Bit(off+off2) =>
+ ((I64Store|I64Store32|I64Store16|I64Store8) [off+off2] ptr val mem)
+
+// folding offset into address
+(I64AddConst [off] (LoweredAddr {sym} [off2] base)) && isU32Bit(off+int64(off2)) =>
+ (LoweredAddr {sym} [int32(off)+off2] base)
+(I64AddConst [off] x:(SP)) && isU32Bit(off) => (LoweredAddr [int32(off)] x) // so it is rematerializeable
+
+// transforming readonly globals into constants
+(I64Load [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read64(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+(I64Load32U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+(I64Load16U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+(I64Load8U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read8(sym, off+int64(off2)))])
diff --git a/src/cmd/compile/internal/ssa/gen/WasmOps.go b/src/cmd/compile/internal/ssa/gen/WasmOps.go
new file mode 100644
index 0000000..36c53bc
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/WasmOps.go
@@ -0,0 +1,278 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import "strings"
+
+var regNamesWasm = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28",
+ "F29",
+ "F30",
+ "F31",
+
+ "SP",
+ "g",
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesWasm) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesWasm {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ var (
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15")
+ fp32 = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15")
+ fp64 = buildReg("F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
+ gpsp = gp | buildReg("SP")
+ gpspsb = gpsp | buildReg("SB")
+ // The "registers", which are actually local variables, can get clobbered
+ // if we're switching goroutines, because it unwinds the WebAssembly stack.
+ callerSave = gp | fp32 | fp64 | buildReg("g")
+ )
+
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpsp}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpsp, gpsp}, outputs: []regMask{gp}}
+ gp31 = regInfo{inputs: []regMask{gpsp, gpsp, gpsp}, outputs: []regMask{gp}}
+ fp32_01 = regInfo{inputs: nil, outputs: []regMask{fp32}}
+ fp32_11 = regInfo{inputs: []regMask{fp32}, outputs: []regMask{fp32}}
+ fp32_21 = regInfo{inputs: []regMask{fp32, fp32}, outputs: []regMask{fp32}}
+ fp32_21gp = regInfo{inputs: []regMask{fp32, fp32}, outputs: []regMask{gp}}
+ fp64_01 = regInfo{inputs: nil, outputs: []regMask{fp64}}
+ fp64_11 = regInfo{inputs: []regMask{fp64}, outputs: []regMask{fp64}}
+ fp64_21 = regInfo{inputs: []regMask{fp64, fp64}, outputs: []regMask{fp64}}
+ fp64_21gp = regInfo{inputs: []regMask{fp64, fp64}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+ fp32load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{fp32}}
+ fp32store = regInfo{inputs: []regMask{gpspsb, fp32, 0}}
+ fp64load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{fp64}}
+ fp64store = regInfo{inputs: []regMask{gpspsb, fp64, 0}}
+ )
+
+ var WasmOps = []opData{
+ {name: "LoweredStaticCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "LoweredClosureCall", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp, 0}, clobbers: callerSave}, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "LoweredInterCall", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ {name: "LoweredAddr", argLength: 1, reg: gp11, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // returns base+aux+auxint, arg0=base
+ {name: "LoweredMove", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp}}, aux: "Int64"}, // large move. arg0=dst, arg1=src, arg2=mem, auxint=len/8, returns mem
+ {name: "LoweredZero", argLength: 2, reg: regInfo{inputs: []regMask{gp}}, aux: "Int64"}, // large zeroing. arg0=start, arg1=mem, auxint=len/8, returns mem
+
+ {name: "LoweredGetClosurePtr", reg: gp01}, // returns wasm.REG_CTXT, the closure pointer
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, // returns the PC of the caller of the current function
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, // returns the SP of the caller of the current function
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp}}, aux: "Sym", symEffect: "None"}, // invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+
+ // LoweredConvert converts between pointers and integers.
+ // We have a special op for this so as to not confuse GCCallOff
+ // (particularly stack maps). It takes a memory arg so it
+ // gets correctly ordered with respect to GC safepoints.
+ // arg0=ptr/int arg1=mem, output=int/ptr
+ //
+ // TODO(neelance): LoweredConvert should not be necessary any more, since OpConvert does not need to be lowered any more (CL 108496).
+ {name: "LoweredConvert", argLength: 2, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}},
+
+ // The following are native WebAssembly instructions, see https://webassembly.github.io/spec/core/syntax/instructions.html
+
+ {name: "Select", asm: "Select", argLength: 3, reg: gp31}, // returns arg0 if arg2 != 0, otherwise returns arg1
+
+ {name: "I64Load8U", asm: "I64Load8U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt8"}, // read unsigned 8-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load8S", asm: "I64Load8S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int8"}, // read signed 8-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load16U", asm: "I64Load16U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt16"}, // read unsigned 16-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load16S", asm: "I64Load16S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int16"}, // read signed 16-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load32U", asm: "I64Load32U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt32"}, // read unsigned 32-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load32S", asm: "I64Load32S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int32"}, // read signed 32-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load", asm: "I64Load", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt64"}, // read 64-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Store8", asm: "I64Store8", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 8-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
+ {name: "I64Store16", asm: "I64Store16", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 16-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
+ {name: "I64Store32", asm: "I64Store32", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 32-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
+ {name: "I64Store", asm: "I64Store", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 64-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
+
+ {name: "F32Load", asm: "F32Load", argLength: 2, reg: fp32load, aux: "Int64", typ: "Float32"}, // read 32-bit float from address arg0+aux, arg1=mem
+ {name: "F64Load", asm: "F64Load", argLength: 2, reg: fp64load, aux: "Int64", typ: "Float64"}, // read 64-bit float from address arg0+aux, arg1=mem
+ {name: "F32Store", asm: "F32Store", argLength: 3, reg: fp32store, aux: "Int64", typ: "Mem"}, // store 32-bit float arg1 at address arg0+aux, arg2=mem, returns mem
+ {name: "F64Store", asm: "F64Store", argLength: 3, reg: fp64store, aux: "Int64", typ: "Mem"}, // store 64-bit float arg1 at address arg0+aux, arg2=mem, returns mem
+
+ {name: "I64Const", reg: gp01, aux: "Int64", rematerializeable: true, typ: "Int64"}, // returns the constant integer aux
+ {name: "F32Const", reg: fp32_01, aux: "Float32", rematerializeable: true, typ: "Float32"}, // returns the constant float aux
+ {name: "F64Const", reg: fp64_01, aux: "Float64", rematerializeable: true, typ: "Float64"}, // returns the constant float aux
+
+ {name: "I64Eqz", asm: "I64Eqz", argLength: 1, reg: gp11, typ: "Bool"}, // arg0 == 0
+ {name: "I64Eq", asm: "I64Eq", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 == arg1
+ {name: "I64Ne", asm: "I64Ne", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 != arg1
+ {name: "I64LtS", asm: "I64LtS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 < arg1 (signed)
+ {name: "I64LtU", asm: "I64LtU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 < arg1 (unsigned)
+ {name: "I64GtS", asm: "I64GtS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 > arg1 (signed)
+ {name: "I64GtU", asm: "I64GtU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 > arg1 (unsigned)
+ {name: "I64LeS", asm: "I64LeS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 <= arg1 (signed)
+ {name: "I64LeU", asm: "I64LeU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 <= arg1 (unsigned)
+ {name: "I64GeS", asm: "I64GeS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 >= arg1 (signed)
+ {name: "I64GeU", asm: "I64GeU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 >= arg1 (unsigned)
+
+ {name: "F32Eq", asm: "F32Eq", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 == arg1
+ {name: "F32Ne", asm: "F32Ne", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 != arg1
+ {name: "F32Lt", asm: "F32Lt", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 < arg1
+ {name: "F32Gt", asm: "F32Gt", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 > arg1
+ {name: "F32Le", asm: "F32Le", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 <= arg1
+ {name: "F32Ge", asm: "F32Ge", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 >= arg1
+
+ {name: "F64Eq", asm: "F64Eq", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 == arg1
+ {name: "F64Ne", asm: "F64Ne", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 != arg1
+ {name: "F64Lt", asm: "F64Lt", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 < arg1
+ {name: "F64Gt", asm: "F64Gt", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 > arg1
+ {name: "F64Le", asm: "F64Le", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 <= arg1
+ {name: "F64Ge", asm: "F64Ge", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 >= arg1
+
+ {name: "I64Add", asm: "I64Add", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 + arg1
+ {name: "I64AddConst", asm: "I64Add", argLength: 1, reg: gp11, aux: "Int64", typ: "Int64"}, // arg0 + aux
+ {name: "I64Sub", asm: "I64Sub", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 - arg1
+ {name: "I64Mul", asm: "I64Mul", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 * arg1
+ {name: "I64DivS", asm: "I64DivS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 / arg1 (signed)
+ {name: "I64DivU", asm: "I64DivU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 / arg1 (unsigned)
+ {name: "I64RemS", asm: "I64RemS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 % arg1 (signed)
+ {name: "I64RemU", asm: "I64RemU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 % arg1 (unsigned)
+ {name: "I64And", asm: "I64And", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 & arg1
+ {name: "I64Or", asm: "I64Or", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 | arg1
+ {name: "I64Xor", asm: "I64Xor", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 ^ arg1
+ {name: "I64Shl", asm: "I64Shl", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 << (arg1 % 64)
+ {name: "I64ShrS", asm: "I64ShrS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 >> (arg1 % 64) (signed)
+ {name: "I64ShrU", asm: "I64ShrU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 >> (arg1 % 64) (unsigned)
+
+ {name: "F32Neg", asm: "F32Neg", argLength: 1, reg: fp32_11, typ: "Float32"}, // -arg0
+ {name: "F32Add", asm: "F32Add", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 + arg1
+ {name: "F32Sub", asm: "F32Sub", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 - arg1
+ {name: "F32Mul", asm: "F32Mul", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 * arg1
+ {name: "F32Div", asm: "F32Div", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 / arg1
+
+ {name: "F64Neg", asm: "F64Neg", argLength: 1, reg: fp64_11, typ: "Float64"}, // -arg0
+ {name: "F64Add", asm: "F64Add", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 + arg1
+ {name: "F64Sub", asm: "F64Sub", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 - arg1
+ {name: "F64Mul", asm: "F64Mul", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 * arg1
+ {name: "F64Div", asm: "F64Div", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 / arg1
+
+ {name: "I64TruncSatF64S", asm: "I64TruncSatF64S", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating)
+ {name: "I64TruncSatF64U", asm: "I64TruncSatF64U", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to an unsigned integer (saturating)
+ {name: "I64TruncSatF32S", asm: "I64TruncSatF32S", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating)
+ {name: "I64TruncSatF32U", asm: "I64TruncSatF32U", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to an unsigned integer (saturating)
+ {name: "F32ConvertI64S", asm: "F32ConvertI64S", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp32}}, typ: "Float32"}, // converts the signed integer arg0 to a float
+ {name: "F32ConvertI64U", asm: "F32ConvertI64U", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp32}}, typ: "Float32"}, // converts the unsigned integer arg0 to a float
+ {name: "F64ConvertI64S", asm: "F64ConvertI64S", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp64}}, typ: "Float64"}, // converts the signed integer arg0 to a float
+ {name: "F64ConvertI64U", asm: "F64ConvertI64U", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp64}}, typ: "Float64"}, // converts the unsigned integer arg0 to a float
+ {name: "F32DemoteF64", asm: "F32DemoteF64", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{fp32}}, typ: "Float32"},
+ {name: "F64PromoteF32", asm: "F64PromoteF32", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{fp64}}, typ: "Float64"},
+
+ {name: "I64Extend8S", asm: "I64Extend8S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 8 to 64 bit
+ {name: "I64Extend16S", asm: "I64Extend16S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 16 to 64 bit
+ {name: "I64Extend32S", asm: "I64Extend32S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 32 to 64 bit
+
+ {name: "F32Sqrt", asm: "F32Sqrt", argLength: 1, reg: fp64_11, typ: "Float32"}, // sqrt(arg0)
+ {name: "F32Trunc", asm: "F32Trunc", argLength: 1, reg: fp64_11, typ: "Float32"}, // trunc(arg0)
+ {name: "F32Ceil", asm: "F32Ceil", argLength: 1, reg: fp64_11, typ: "Float32"}, // ceil(arg0)
+ {name: "F32Floor", asm: "F32Floor", argLength: 1, reg: fp64_11, typ: "Float32"}, // floor(arg0)
+ {name: "F32Nearest", asm: "F32Nearest", argLength: 1, reg: fp64_11, typ: "Float32"}, // round(arg0)
+ {name: "F32Abs", asm: "F32Abs", argLength: 1, reg: fp64_11, typ: "Float32"}, // abs(arg0)
+ {name: "F32Copysign", asm: "F32Copysign", argLength: 2, reg: fp64_21, typ: "Float32"}, // copysign(arg0, arg1)
+
+ {name: "F64Sqrt", asm: "F64Sqrt", argLength: 1, reg: fp64_11, typ: "Float64"}, // sqrt(arg0)
+ {name: "F64Trunc", asm: "F64Trunc", argLength: 1, reg: fp64_11, typ: "Float64"}, // trunc(arg0)
+ {name: "F64Ceil", asm: "F64Ceil", argLength: 1, reg: fp64_11, typ: "Float64"}, // ceil(arg0)
+ {name: "F64Floor", asm: "F64Floor", argLength: 1, reg: fp64_11, typ: "Float64"}, // floor(arg0)
+ {name: "F64Nearest", asm: "F64Nearest", argLength: 1, reg: fp64_11, typ: "Float64"}, // round(arg0)
+ {name: "F64Abs", asm: "F64Abs", argLength: 1, reg: fp64_11, typ: "Float64"}, // abs(arg0)
+ {name: "F64Copysign", asm: "F64Copysign", argLength: 2, reg: fp64_21, typ: "Float64"}, // copysign(arg0, arg1)
+
+ {name: "I64Ctz", asm: "I64Ctz", argLength: 1, reg: gp11, typ: "Int64"}, // ctz(arg0)
+ {name: "I64Clz", asm: "I64Clz", argLength: 1, reg: gp11, typ: "Int64"}, // clz(arg0)
+ {name: "I32Rotl", asm: "I32Rotl", argLength: 2, reg: gp21, typ: "Int32"}, // rotl(arg0, arg1)
+ {name: "I64Rotl", asm: "I64Rotl", argLength: 2, reg: gp21, typ: "Int64"}, // rotl(arg0, arg1)
+ {name: "I64Popcnt", asm: "I64Popcnt", argLength: 1, reg: gp11, typ: "Int64"}, // popcnt(arg0)
+ }
+
+ archs = append(archs, arch{
+ name: "Wasm",
+ pkg: "cmd/internal/obj/wasm",
+ genfile: "../../wasm/ssa.go",
+ ops: WasmOps,
+ blocks: nil,
+ regnames: regNamesWasm,
+ gpregmask: gp,
+ fpregmask: fp32 | fp64,
+ fp32regmask: fp32,
+ fp64regmask: fp64,
+ framepointerreg: -1, // not used
+ linkreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/cover.bash b/src/cmd/compile/internal/ssa/gen/cover.bash
new file mode 100755
index 0000000..6c860fc
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/cover.bash
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+# Copyright 2020 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# A quick and dirty way to obtain code coverage from rulegen's main func. For
+# example:
+#
+# ./cover.bash && go tool cover -html=cover.out
+#
+# This script is needed to set up a temporary test file, so that we don't break
+# regular 'go run *.go' usage to run the generator.
+
+cat >main_test.go <<-EOF
+ // +build ignore
+
+ package main
+
+ import "testing"
+
+ func TestCoverage(t *testing.T) { main() }
+EOF
+
+go test -run='^TestCoverage$' -coverprofile=cover.out "$@" *.go
+
+rm -f main_test.go
diff --git a/src/cmd/compile/internal/ssa/gen/dec.rules b/src/cmd/compile/internal/ssa/gen/dec.rules
new file mode 100644
index 0000000..4c677f8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/dec.rules
@@ -0,0 +1,92 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains rules to decompose builtin compound types
+// (complex,string,slice,interface) into their constituent
+// types. These rules work together with the decomposeBuiltIn
+// pass which handles phis of these types.
+
+// complex ops
+(ComplexReal (ComplexMake real _ )) => real
+(ComplexImag (ComplexMake _ imag )) => imag
+
+(Load <t> ptr mem) && t.IsComplex() && t.Size() == 8 =>
+ (ComplexMake
+ (Load <typ.Float32> ptr mem)
+ (Load <typ.Float32>
+ (OffPtr <typ.Float32Ptr> [4] ptr)
+ mem)
+ )
+(Store {t} dst (ComplexMake real imag) mem) && t.Size() == 8 =>
+ (Store {typ.Float32}
+ (OffPtr <typ.Float32Ptr> [4] dst)
+ imag
+ (Store {typ.Float32} dst real mem))
+(Load <t> ptr mem) && t.IsComplex() && t.Size() == 16 =>
+ (ComplexMake
+ (Load <typ.Float64> ptr mem)
+ (Load <typ.Float64>
+ (OffPtr <typ.Float64Ptr> [8] ptr)
+ mem)
+ )
+(Store {t} dst (ComplexMake real imag) mem) && t.Size() == 16 =>
+ (Store {typ.Float64}
+ (OffPtr <typ.Float64Ptr> [8] dst)
+ imag
+ (Store {typ.Float64} dst real mem))
+
+// string ops
+(StringPtr (StringMake ptr _)) => ptr
+(StringLen (StringMake _ len)) => len
+
+(Load <t> ptr mem) && t.IsString() =>
+ (StringMake
+ (Load <typ.BytePtr> ptr mem)
+ (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
+ mem))
+(Store dst (StringMake ptr len) mem) =>
+ (Store {typ.Int}
+ (OffPtr <typ.IntPtr> [config.PtrSize] dst)
+ len
+ (Store {typ.BytePtr} dst ptr mem))
+
+// slice ops
+(SlicePtr (SliceMake ptr _ _ )) => ptr
+(SliceLen (SliceMake _ len _)) => len
+(SliceCap (SliceMake _ _ cap)) => cap
+
+(Load <t> ptr mem) && t.IsSlice() =>
+ (SliceMake
+ (Load <t.Elem().PtrTo()> ptr mem)
+ (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
+ mem)
+ (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr)
+ mem))
+(Store {t} dst (SliceMake ptr len cap) mem) =>
+ (Store {typ.Int}
+ (OffPtr <typ.IntPtr> [2*config.PtrSize] dst)
+ cap
+ (Store {typ.Int}
+ (OffPtr <typ.IntPtr> [config.PtrSize] dst)
+ len
+ (Store {t.Elem().PtrTo()} dst ptr mem)))
+
+// interface ops
+(ITab (IMake itab _)) => itab
+(IData (IMake _ data)) => data
+
+(Load <t> ptr mem) && t.IsInterface() =>
+ (IMake
+ (Load <typ.Uintptr> ptr mem)
+ (Load <typ.BytePtr>
+ (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr)
+ mem))
+(Store dst (IMake itab data) mem) =>
+ (Store {typ.BytePtr}
+ (OffPtr <typ.BytePtrPtr> [config.PtrSize] dst)
+ data
+ (Store {typ.Uintptr} dst itab mem))
diff --git a/src/cmd/compile/internal/ssa/gen/dec64.rules b/src/cmd/compile/internal/ssa/gen/dec64.rules
new file mode 100644
index 0000000..9297ed8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/dec64.rules
@@ -0,0 +1,396 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains rules to decompose [u]int64 types on 32-bit
+// architectures. These rules work together with the decomposeBuiltIn
+// pass which handles phis of these typ.
+
+(Int64Hi (Int64Make hi _)) => hi
+(Int64Lo (Int64Make _ lo)) => lo
+
+(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() =>
+ (Int64Make
+ (Load <typ.Int32> (OffPtr <typ.Int32Ptr> [4] ptr) mem)
+ (Load <typ.UInt32> ptr mem))
+
+(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && !t.IsSigned() =>
+ (Int64Make
+ (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem)
+ (Load <typ.UInt32> ptr mem))
+
+(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && t.IsSigned() =>
+ (Int64Make
+ (Load <typ.Int32> ptr mem)
+ (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
+
+(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && !t.IsSigned() =>
+ (Int64Make
+ (Load <typ.UInt32> ptr mem)
+ (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
+
+(Store {t} dst (Int64Make hi lo) mem) && t.Size() == 8 && !config.BigEndian =>
+ (Store {hi.Type}
+ (OffPtr <hi.Type.PtrTo()> [4] dst)
+ hi
+ (Store {lo.Type} dst lo mem))
+
+(Store {t} dst (Int64Make hi lo) mem) && t.Size() == 8 && config.BigEndian =>
+ (Store {lo.Type}
+ (OffPtr <lo.Type.PtrTo()> [4] dst)
+ lo
+ (Store {hi.Type} dst hi mem))
+
+// These are not enabled during decomposeBuiltin if late call expansion, but they are always enabled for softFloat
+(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
+ (Int64Make
+ (Arg <typ.Int32> {n} [off+4])
+ (Arg <typ.UInt32> {n} [off]))
+(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
+ (Int64Make
+ (Arg <typ.UInt32> {n} [off+4])
+ (Arg <typ.UInt32> {n} [off]))
+
+(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
+ (Int64Make
+ (Arg <typ.Int32> {n} [off])
+ (Arg <typ.UInt32> {n} [off+4]))
+(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
+ (Int64Make
+ (Arg <typ.UInt32> {n} [off])
+ (Arg <typ.UInt32> {n} [off+4]))
+
+(Add64 x y) =>
+ (Int64Make
+ (Add32withcarry <typ.Int32>
+ (Int64Hi x)
+ (Int64Hi y)
+ (Select1 <types.TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y))))
+ (Select0 <typ.UInt32> (Add32carry (Int64Lo x) (Int64Lo y))))
+
+(Sub64 x y) =>
+ (Int64Make
+ (Sub32withcarry <typ.Int32>
+ (Int64Hi x)
+ (Int64Hi y)
+ (Select1 <types.TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y))))
+ (Select0 <typ.UInt32> (Sub32carry (Int64Lo x) (Int64Lo y))))
+
+(Mul64 x y) =>
+ (Int64Make
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32> (Int64Lo x) (Int64Hi y))
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32> (Int64Hi x) (Int64Lo y))
+ (Select0 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y)))))
+ (Select1 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
+
+(And64 x y) =>
+ (Int64Make
+ (And32 <typ.UInt32> (Int64Hi x) (Int64Hi y))
+ (And32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+
+(Or64 x y) =>
+ (Int64Make
+ (Or32 <typ.UInt32> (Int64Hi x) (Int64Hi y))
+ (Or32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+
+(Xor64 x y) =>
+ (Int64Make
+ (Xor32 <typ.UInt32> (Int64Hi x) (Int64Hi y))
+ (Xor32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+
+(Neg64 <t> x) => (Sub64 (Const64 <t> [0]) x)
+
+(Com64 x) =>
+ (Int64Make
+ (Com32 <typ.UInt32> (Int64Hi x))
+ (Com32 <typ.UInt32> (Int64Lo x)))
+
+// Sadly, just because we know that x is non-zero,
+// we don't know whether either component is,
+// so just treat Ctz64NonZero the same as Ctz64.
+(Ctz64NonZero ...) => (Ctz64 ...)
+
+(Ctz64 x) =>
+ (Add32 <typ.UInt32>
+ (Ctz32 <typ.UInt32> (Int64Lo x))
+ (And32 <typ.UInt32>
+ (Com32 <typ.UInt32> (Zeromask (Int64Lo x)))
+ (Ctz32 <typ.UInt32> (Int64Hi x))))
+
+(BitLen64 x) =>
+ (Add32 <typ.Int>
+ (BitLen32 <typ.Int> (Int64Hi x))
+ (BitLen32 <typ.Int>
+ (Or32 <typ.UInt32>
+ (Int64Lo x)
+ (Zeromask (Int64Hi x)))))
+
+(Bswap64 x) =>
+ (Int64Make
+ (Bswap32 <typ.UInt32> (Int64Lo x))
+ (Bswap32 <typ.UInt32> (Int64Hi x)))
+
+(SignExt32to64 x) => (Int64Make (Signmask x) x)
+(SignExt16to64 x) => (SignExt32to64 (SignExt16to32 x))
+(SignExt8to64 x) => (SignExt32to64 (SignExt8to32 x))
+
+(ZeroExt32to64 x) => (Int64Make (Const32 <typ.UInt32> [0]) x)
+(ZeroExt16to64 x) => (ZeroExt32to64 (ZeroExt16to32 x))
+(ZeroExt8to64 x) => (ZeroExt32to64 (ZeroExt8to32 x))
+
+(Trunc64to32 (Int64Make _ lo)) => lo
+(Trunc64to16 (Int64Make _ lo)) => (Trunc32to16 lo)
+(Trunc64to8 (Int64Make _ lo)) => (Trunc32to8 lo)
+// Most general
+(Trunc64to32 x) => (Int64Lo x)
+(Trunc64to16 x) => (Trunc32to16 (Int64Lo x))
+(Trunc64to8 x) => (Trunc32to8 (Int64Lo x))
+
+(Lsh32x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Rsh32x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask x)
+(Rsh32Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Lsh16x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Rsh16x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask (SignExt16to32 x))
+(Rsh16Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Lsh8x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Rsh8x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask (SignExt8to32 x))
+(Rsh8Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+
+(Lsh32x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh32x32 [c] x lo)
+(Rsh32x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh32x32 [c] x lo)
+(Rsh32Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh32Ux32 [c] x lo)
+(Lsh16x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh16x32 [c] x lo)
+(Rsh16x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh16x32 [c] x lo)
+(Rsh16Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh16Ux32 [c] x lo)
+(Lsh8x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh8x32 [c] x lo)
+(Rsh8x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh8x32 [c] x lo)
+(Rsh8Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh8Ux32 [c] x lo)
+
+(Lsh64x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const64 [0])
+(Rsh64x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x)))
+(Rsh64Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const64 [0])
+
+(Lsh64x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh64x32 [c] x lo)
+(Rsh64x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh64x32 [c] x lo)
+(Rsh64Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh64Ux32 [c] x lo)
+
+// turn x64 non-constant shifts to x32 shifts
+// if high 32-bit of the shift is nonzero, make a huge shift
+(Lsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh64Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Lsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh32Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Lsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh16Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Lsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh8Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+
+// Most general
+(Lsh64x64 x y) => (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh64x64 x y) => (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh64Ux64 x y) => (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Lsh32x64 x y) => (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh32x64 x y) => (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh32Ux64 x y) => (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Lsh16x64 x y) => (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh16x64 x y) => (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh16Ux64 x y) => (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Lsh8x64 x y) => (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh8x64 x y) => (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh8Ux64 x y) => (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+
+// Clean up constants a little
+(Or32 <typ.UInt32> (Zeromask (Const32 [c])) y) && c == 0 => y
+(Or32 <typ.UInt32> (Zeromask (Const32 [c])) y) && c != 0 => (Const32 <typ.UInt32> [-1])
+
+// 64x left shift
+// result.hi = hi<<s | lo>>(32-s) | lo<<(s-32) // >> is unsigned, large shifts result 0
+// result.lo = lo<<s
+(Lsh64x32 x s) =>
+ (Int64Make
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Lsh32x32 <typ.UInt32> (Int64Hi x) s)
+ (Rsh32Ux32 <typ.UInt32>
+ (Int64Lo x)
+ (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
+ (Lsh32x32 <typ.UInt32>
+ (Int64Lo x)
+ (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32]))))
+ (Lsh32x32 <typ.UInt32> (Int64Lo x) s))
+(Lsh64x16 x s) =>
+ (Int64Make
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Lsh32x16 <typ.UInt32> (Int64Hi x) s)
+ (Rsh32Ux16 <typ.UInt32>
+ (Int64Lo x)
+ (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
+ (Lsh32x16 <typ.UInt32>
+ (Int64Lo x)
+ (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32]))))
+ (Lsh32x16 <typ.UInt32> (Int64Lo x) s))
+(Lsh64x8 x s) =>
+ (Int64Make
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Lsh32x8 <typ.UInt32> (Int64Hi x) s)
+ (Rsh32Ux8 <typ.UInt32>
+ (Int64Lo x)
+ (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
+ (Lsh32x8 <typ.UInt32>
+ (Int64Lo x)
+ (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32]))))
+ (Lsh32x8 <typ.UInt32> (Int64Lo x) s))
+
+// 64x unsigned right shift
+// result.hi = hi>>s
+// result.lo = lo>>s | hi<<(32-s) | hi>>(s-32) // >> is unsigned, large shifts result 0
+(Rsh64Ux32 x s) =>
+ (Int64Make
+ (Rsh32Ux32 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x32 <typ.UInt32>
+ (Int64Hi x)
+ (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
+ (Rsh32Ux32 <typ.UInt32>
+ (Int64Hi x)
+ (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))))
+(Rsh64Ux16 x s) =>
+ (Int64Make
+ (Rsh32Ux16 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x16 <typ.UInt32>
+ (Int64Hi x)
+ (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
+ (Rsh32Ux16 <typ.UInt32>
+ (Int64Hi x)
+ (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))))
+(Rsh64Ux8 x s) =>
+ (Int64Make
+ (Rsh32Ux8 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x8 <typ.UInt32>
+ (Int64Hi x)
+ (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
+ (Rsh32Ux8 <typ.UInt32>
+ (Int64Hi x)
+ (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))))
+
+// 64x signed right shift
+// result.hi = hi>>s
+// result.lo = lo>>s | hi<<(32-s) | (hi>>(s-32))&zeromask(s>>5) // hi>>(s-32) is signed, large shifts result 0/-1
+(Rsh64x32 x s) =>
+ (Int64Make
+ (Rsh32x32 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x32 <typ.UInt32>
+ (Int64Hi x)
+ (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
+ (And32 <typ.UInt32>
+ (Rsh32x32 <typ.UInt32>
+ (Int64Hi x)
+ (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))
+ (Zeromask
+ (Rsh32Ux32 <typ.UInt32> s (Const32 <typ.UInt32> [5]))))))
+(Rsh64x16 x s) =>
+ (Int64Make
+ (Rsh32x16 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x16 <typ.UInt32>
+ (Int64Hi x)
+ (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
+ (And32 <typ.UInt32>
+ (Rsh32x16 <typ.UInt32>
+ (Int64Hi x)
+ (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))
+ (Zeromask
+ (ZeroExt16to32
+ (Rsh16Ux32 <typ.UInt16> s (Const32 <typ.UInt32> [5])))))))
+(Rsh64x8 x s) =>
+ (Int64Make
+ (Rsh32x8 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x8 <typ.UInt32>
+ (Int64Hi x)
+ (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
+ (And32 <typ.UInt32>
+ (Rsh32x8 <typ.UInt32>
+ (Int64Hi x)
+ (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))
+ (Zeromask
+ (ZeroExt8to32
+ (Rsh8Ux32 <typ.UInt8> s (Const32 <typ.UInt32> [5])))))))
+
+(Const64 <t> [c]) && t.IsSigned() =>
+ (Int64Make (Const32 <typ.Int32> [int32(c>>32)]) (Const32 <typ.UInt32> [int32(c)]))
+(Const64 <t> [c]) && !t.IsSigned() =>
+ (Int64Make (Const32 <typ.UInt32> [int32(c>>32)]) (Const32 <typ.UInt32> [int32(c)]))
+
+(Eq64 x y) =>
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Eq32 (Int64Lo x) (Int64Lo y)))
+
+(Neq64 x y) =>
+ (OrB
+ (Neq32 (Int64Hi x) (Int64Hi y))
+ (Neq32 (Int64Lo x) (Int64Lo y)))
+
+(Less64U x y) =>
+ (OrB
+ (Less32U (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Less32U (Int64Lo x) (Int64Lo y))))
+
+(Leq64U x y) =>
+ (OrB
+ (Less32U (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Leq32U (Int64Lo x) (Int64Lo y))))
+
+(Less64 x y) =>
+ (OrB
+ (Less32 (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Less32U (Int64Lo x) (Int64Lo y))))
+
+(Leq64 x y) =>
+ (OrB
+ (Less32 (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Leq32U (Int64Lo x) (Int64Lo y))))
diff --git a/src/cmd/compile/internal/ssa/gen/dec64Ops.go b/src/cmd/compile/internal/ssa/gen/dec64Ops.go
new file mode 100644
index 0000000..8c5883b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/dec64Ops.go
@@ -0,0 +1,20 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+var dec64Ops = []opData{}
+
+var dec64Blocks = []blockData{}
+
+func init() {
+ archs = append(archs, arch{
+ name: "dec64",
+ ops: dec64Ops,
+ blocks: dec64Blocks,
+ generic: true,
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/decArgs.rules b/src/cmd/compile/internal/ssa/gen/decArgs.rules
new file mode 100644
index 0000000..1c9a0bb
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/decArgs.rules
@@ -0,0 +1,58 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Decompose compound argument values
+// Do this early to simplify tracking names for debugging.
+
+(Arg {n} [off]) && v.Type.IsString() =>
+ (StringMake
+ (Arg <typ.BytePtr> {n} [off])
+ (Arg <typ.Int> {n} [off+int32(config.PtrSize)]))
+
+(Arg {n} [off]) && v.Type.IsSlice() =>
+ (SliceMake
+ (Arg <v.Type.Elem().PtrTo()> {n} [off])
+ (Arg <typ.Int> {n} [off+int32(config.PtrSize)])
+ (Arg <typ.Int> {n} [off+2*int32(config.PtrSize)]))
+
+(Arg {n} [off]) && v.Type.IsInterface() =>
+ (IMake
+ (Arg <typ.Uintptr> {n} [off])
+ (Arg <typ.BytePtr> {n} [off+int32(config.PtrSize)]))
+
+(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 16 =>
+ (ComplexMake
+ (Arg <typ.Float64> {n} [off])
+ (Arg <typ.Float64> {n} [off+8]))
+
+(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 8 =>
+ (ComplexMake
+ (Arg <typ.Float32> {n} [off])
+ (Arg <typ.Float32> {n} [off+4]))
+
+(Arg <t>) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) =>
+ (StructMake0)
+(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) =>
+ (StructMake1
+ (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]))
+(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) =>
+ (StructMake2
+ (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))])
+ (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))]))
+(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) =>
+ (StructMake3
+ (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))])
+ (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))])
+ (Arg <t.FieldType(2)> {n} [off+int32(t.FieldOff(2))]))
+(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) =>
+ (StructMake4
+ (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))])
+ (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))])
+ (Arg <t.FieldType(2)> {n} [off+int32(t.FieldOff(2))])
+ (Arg <t.FieldType(3)> {n} [off+int32(t.FieldOff(3))]))
+
+(Arg <t>) && t.IsArray() && t.NumElem() == 0 =>
+ (ArrayMake0)
+(Arg <t> {n} [off]) && t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) =>
+ (ArrayMake1 (Arg <t.Elem()> {n} [off]))
diff --git a/src/cmd/compile/internal/ssa/gen/decArgsOps.go b/src/cmd/compile/internal/ssa/gen/decArgsOps.go
new file mode 100644
index 0000000..b73d9d3
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/decArgsOps.go
@@ -0,0 +1,20 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+var decArgsOps = []opData{}
+
+var decArgsBlocks = []blockData{}
+
+func init() {
+ archs = append(archs, arch{
+ name: "decArgs",
+ ops: decArgsOps,
+ blocks: decArgsBlocks,
+ generic: true,
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/decOps.go b/src/cmd/compile/internal/ssa/gen/decOps.go
new file mode 100644
index 0000000..b826481
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/decOps.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+var decOps = []opData{}
+
+var decBlocks = []blockData{}
+
+func init() {
+ archs = append(archs, arch{
+ name: "dec",
+ ops: decOps,
+ blocks: decBlocks,
+ generic: true,
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
new file mode 100644
index 0000000..1784923
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -0,0 +1,2535 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Simplifications that apply to all backend architectures. As an example, this
+// Go source code
+//
+// y := 0 * x
+//
+// can be translated into y := 0 without losing any information, which saves a
+// pointless multiplication instruction. Other .rules files in this directory
+// (for example AMD64.rules) contain rules specific to the architecture in the
+// filename. The rules here apply to every architecture.
+//
+// The code for parsing this file lives in rulegen.go; this file generates
+// ssa/rewritegeneric.go.
+
+// values are specified using the following format:
+// (op <type> [auxint] {aux} arg0 arg1 ...)
+// the type, aux, and auxint fields are optional
+// on the matching side
+// - the type, aux, and auxint fields must match if they are specified.
+// - the first occurrence of a variable defines that variable. Subsequent
+// uses must match (be == to) the first use.
+// - v is defined to be the value matched.
+// - an additional conditional can be provided after the match pattern with "&&".
+// on the generated side
+// - the type of the top-level expression is the same as the one on the left-hand side.
+// - the type of any subexpressions must be specified explicitly (or
+// be specified in the op's type field).
+// - auxint will be 0 if not specified.
+// - aux will be nil if not specified.
+
+// blocks are specified using the following format:
+// (kind controlvalue succ0 succ1 ...)
+// controlvalue must be "nil" or a value expression
+// succ* fields must be variables
+// For now, the generated successors must be a permutation of the matched successors.
+
+// constant folding
+(Trunc16to8 (Const16 [c])) => (Const8 [int8(c)])
+(Trunc32to8 (Const32 [c])) => (Const8 [int8(c)])
+(Trunc32to16 (Const32 [c])) => (Const16 [int16(c)])
+(Trunc64to8 (Const64 [c])) => (Const8 [int8(c)])
+(Trunc64to16 (Const64 [c])) => (Const16 [int16(c)])
+(Trunc64to32 (Const64 [c])) => (Const32 [int32(c)])
+(Cvt64Fto32F (Const64F [c])) => (Const32F [float32(c)])
+(Cvt32Fto64F (Const32F [c])) => (Const64F [float64(c)])
+(Cvt32to32F (Const32 [c])) => (Const32F [float32(c)])
+(Cvt32to64F (Const32 [c])) => (Const64F [float64(c)])
+(Cvt64to32F (Const64 [c])) => (Const32F [float32(c)])
+(Cvt64to64F (Const64 [c])) => (Const64F [float64(c)])
+(Cvt32Fto32 (Const32F [c])) => (Const32 [int32(c)])
+(Cvt32Fto64 (Const32F [c])) => (Const64 [int64(c)])
+(Cvt64Fto32 (Const64F [c])) => (Const32 [int32(c)])
+(Cvt64Fto64 (Const64F [c])) => (Const64 [int64(c)])
+(Round32F x:(Const32F)) => x
+(Round64F x:(Const64F)) => x
+(CvtBoolToUint8 (ConstBool [false])) => (Const8 [0])
+(CvtBoolToUint8 (ConstBool [true])) => (Const8 [1])
+
+(Trunc16to8 (ZeroExt8to16 x)) => x
+(Trunc32to8 (ZeroExt8to32 x)) => x
+(Trunc32to16 (ZeroExt8to32 x)) => (ZeroExt8to16 x)
+(Trunc32to16 (ZeroExt16to32 x)) => x
+(Trunc64to8 (ZeroExt8to64 x)) => x
+(Trunc64to16 (ZeroExt8to64 x)) => (ZeroExt8to16 x)
+(Trunc64to16 (ZeroExt16to64 x)) => x
+(Trunc64to32 (ZeroExt8to64 x)) => (ZeroExt8to32 x)
+(Trunc64to32 (ZeroExt16to64 x)) => (ZeroExt16to32 x)
+(Trunc64to32 (ZeroExt32to64 x)) => x
+(Trunc16to8 (SignExt8to16 x)) => x
+(Trunc32to8 (SignExt8to32 x)) => x
+(Trunc32to16 (SignExt8to32 x)) => (SignExt8to16 x)
+(Trunc32to16 (SignExt16to32 x)) => x
+(Trunc64to8 (SignExt8to64 x)) => x
+(Trunc64to16 (SignExt8to64 x)) => (SignExt8to16 x)
+(Trunc64to16 (SignExt16to64 x)) => x
+(Trunc64to32 (SignExt8to64 x)) => (SignExt8to32 x)
+(Trunc64to32 (SignExt16to64 x)) => (SignExt16to32 x)
+(Trunc64to32 (SignExt32to64 x)) => x
+
+(ZeroExt8to16 (Const8 [c])) => (Const16 [int16( uint8(c))])
+(ZeroExt8to32 (Const8 [c])) => (Const32 [int32( uint8(c))])
+(ZeroExt8to64 (Const8 [c])) => (Const64 [int64( uint8(c))])
+(ZeroExt16to32 (Const16 [c])) => (Const32 [int32(uint16(c))])
+(ZeroExt16to64 (Const16 [c])) => (Const64 [int64(uint16(c))])
+(ZeroExt32to64 (Const32 [c])) => (Const64 [int64(uint32(c))])
+(SignExt8to16 (Const8 [c])) => (Const16 [int16(c)])
+(SignExt8to32 (Const8 [c])) => (Const32 [int32(c)])
+(SignExt8to64 (Const8 [c])) => (Const64 [int64(c)])
+(SignExt16to32 (Const16 [c])) => (Const32 [int32(c)])
+(SignExt16to64 (Const16 [c])) => (Const64 [int64(c)])
+(SignExt32to64 (Const32 [c])) => (Const64 [int64(c)])
+
+(Neg8 (Const8 [c])) => (Const8 [-c])
+(Neg16 (Const16 [c])) => (Const16 [-c])
+(Neg32 (Const32 [c])) => (Const32 [-c])
+(Neg64 (Const64 [c])) => (Const64 [-c])
+(Neg32F (Const32F [c])) && c != 0 => (Const32F [-c])
+(Neg64F (Const64F [c])) && c != 0 => (Const64F [-c])
+
+(Add8 (Const8 [c]) (Const8 [d])) => (Const8 [c+d])
+(Add16 (Const16 [c]) (Const16 [d])) => (Const16 [c+d])
+(Add32 (Const32 [c]) (Const32 [d])) => (Const32 [c+d])
+(Add64 (Const64 [c]) (Const64 [d])) => (Const64 [c+d])
+(Add32F (Const32F [c]) (Const32F [d])) && c+d == c+d => (Const32F [c+d])
+(Add64F (Const64F [c]) (Const64F [d])) && c+d == c+d => (Const64F [c+d])
+(AddPtr <t> x (Const64 [c])) => (OffPtr <t> x [c])
+(AddPtr <t> x (Const32 [c])) => (OffPtr <t> x [int64(c)])
+
+(Sub8 (Const8 [c]) (Const8 [d])) => (Const8 [c-d])
+(Sub16 (Const16 [c]) (Const16 [d])) => (Const16 [c-d])
+(Sub32 (Const32 [c]) (Const32 [d])) => (Const32 [c-d])
+(Sub64 (Const64 [c]) (Const64 [d])) => (Const64 [c-d])
+(Sub32F (Const32F [c]) (Const32F [d])) && c-d == c-d => (Const32F [c-d])
+(Sub64F (Const64F [c]) (Const64F [d])) && c-d == c-d => (Const64F [c-d])
+
+(Mul8 (Const8 [c]) (Const8 [d])) => (Const8 [c*d])
+(Mul16 (Const16 [c]) (Const16 [d])) => (Const16 [c*d])
+(Mul32 (Const32 [c]) (Const32 [d])) => (Const32 [c*d])
+(Mul64 (Const64 [c]) (Const64 [d])) => (Const64 [c*d])
+(Mul32F (Const32F [c]) (Const32F [d])) && c*d == c*d => (Const32F [c*d])
+(Mul64F (Const64F [c]) (Const64F [d])) && c*d == c*d => (Const64F [c*d])
+
+(And8 (Const8 [c]) (Const8 [d])) => (Const8 [c&d])
+(And16 (Const16 [c]) (Const16 [d])) => (Const16 [c&d])
+(And32 (Const32 [c]) (Const32 [d])) => (Const32 [c&d])
+(And64 (Const64 [c]) (Const64 [d])) => (Const64 [c&d])
+
+(Or8 (Const8 [c]) (Const8 [d])) => (Const8 [c|d])
+(Or16 (Const16 [c]) (Const16 [d])) => (Const16 [c|d])
+(Or32 (Const32 [c]) (Const32 [d])) => (Const32 [c|d])
+(Or64 (Const64 [c]) (Const64 [d])) => (Const64 [c|d])
+
+(Xor8 (Const8 [c]) (Const8 [d])) => (Const8 [c^d])
+(Xor16 (Const16 [c]) (Const16 [d])) => (Const16 [c^d])
+(Xor32 (Const32 [c]) (Const32 [d])) => (Const32 [c^d])
+(Xor64 (Const64 [c]) (Const64 [d])) => (Const64 [c^d])
+
+(Ctz64 (Const64 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz64(c))])
+(Ctz32 (Const32 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz32(c))])
+(Ctz16 (Const16 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz16(c))])
+(Ctz8 (Const8 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz8(c))])
+
+(Ctz64 (Const64 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz64(c))])
+(Ctz32 (Const32 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz32(c))])
+(Ctz16 (Const16 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz16(c))])
+(Ctz8 (Const8 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz8(c))])
+
+(Div8 (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [c/d])
+(Div16 (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [c/d])
+(Div32 (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [c/d])
+(Div64 (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [c/d])
+(Div8u (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [int8(uint8(c)/uint8(d))])
+(Div16u (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [int16(uint16(c)/uint16(d))])
+(Div32u (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [int32(uint32(c)/uint32(d))])
+(Div64u (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [int64(uint64(c)/uint64(d))])
+(Div32F (Const32F [c]) (Const32F [d])) && c/d == c/d => (Const32F [c/d])
+(Div64F (Const64F [c]) (Const64F [d])) && c/d == c/d => (Const64F [c/d])
+(Select0 (Div128u (Const64 [0]) lo y)) => (Div64u lo y)
+(Select1 (Div128u (Const64 [0]) lo y)) => (Mod64u lo y)
+
+(Not (ConstBool [c])) => (ConstBool [!c])
+
+// Convert x * 1 to x.
+(Mul(8|16|32|64) (Const(8|16|32|64) [1]) x) => x
+
+// Convert x * -1 to -x.
+(Mul(8|16|32|64) (Const(8|16|32|64) [-1]) x) => (Neg(8|16|32|64) x)
+
+// Convert multiplication by a power of two to a shift.
+(Mul8 <t> n (Const8 [c])) && isPowerOfTwo8(c) => (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(c)]))
+(Mul16 <t> n (Const16 [c])) && isPowerOfTwo16(c) => (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(c)]))
+(Mul32 <t> n (Const32 [c])) && isPowerOfTwo32(c) => (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(c)]))
+(Mul64 <t> n (Const64 [c])) && isPowerOfTwo64(c) => (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(c)]))
+(Mul8 <t> n (Const8 [c])) && t.IsSigned() && isPowerOfTwo8(-c) => (Neg8 (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(-c)])))
+(Mul16 <t> n (Const16 [c])) && t.IsSigned() && isPowerOfTwo16(-c) => (Neg16 (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(-c)])))
+(Mul32 <t> n (Const32 [c])) && t.IsSigned() && isPowerOfTwo32(-c) => (Neg32 (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(-c)])))
+(Mul64 <t> n (Const64 [c])) && t.IsSigned() && isPowerOfTwo64(-c) => (Neg64 (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(-c)])))
+
+(Mod8 (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [c % d])
+(Mod16 (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [c % d])
+(Mod32 (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [c % d])
+(Mod64 (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [c % d])
+
+(Mod8u (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [int8(uint8(c) % uint8(d))])
+(Mod16u (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [int16(uint16(c) % uint16(d))])
+(Mod32u (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [int32(uint32(c) % uint32(d))])
+(Mod64u (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [int64(uint64(c) % uint64(d))])
+
+(Lsh64x64 (Const64 [c]) (Const64 [d])) => (Const64 [c << uint64(d)])
+(Rsh64x64 (Const64 [c]) (Const64 [d])) => (Const64 [c >> uint64(d)])
+(Rsh64Ux64 (Const64 [c]) (Const64 [d])) => (Const64 [int64(uint64(c) >> uint64(d))])
+(Lsh32x64 (Const32 [c]) (Const64 [d])) => (Const32 [c << uint64(d)])
+(Rsh32x64 (Const32 [c]) (Const64 [d])) => (Const32 [c >> uint64(d)])
+(Rsh32Ux64 (Const32 [c]) (Const64 [d])) => (Const32 [int32(uint32(c) >> uint64(d))])
+(Lsh16x64 (Const16 [c]) (Const64 [d])) => (Const16 [c << uint64(d)])
+(Rsh16x64 (Const16 [c]) (Const64 [d])) => (Const16 [c >> uint64(d)])
+(Rsh16Ux64 (Const16 [c]) (Const64 [d])) => (Const16 [int16(uint16(c) >> uint64(d))])
+(Lsh8x64 (Const8 [c]) (Const64 [d])) => (Const8 [c << uint64(d)])
+(Rsh8x64 (Const8 [c]) (Const64 [d])) => (Const8 [c >> uint64(d)])
+(Rsh8Ux64 (Const8 [c]) (Const64 [d])) => (Const8 [int8(uint8(c) >> uint64(d))])
+
+// Fold IsInBounds when the range of the index cannot exceed the limit.
+(IsInBounds (ZeroExt8to32 _) (Const32 [c])) && (1 << 8) <= c => (ConstBool [true])
+(IsInBounds (ZeroExt8to64 _) (Const64 [c])) && (1 << 8) <= c => (ConstBool [true])
+(IsInBounds (ZeroExt16to32 _) (Const32 [c])) && (1 << 16) <= c => (ConstBool [true])
+(IsInBounds (ZeroExt16to64 _) (Const64 [c])) && (1 << 16) <= c => (ConstBool [true])
+(IsInBounds x x) => (ConstBool [false])
+(IsInBounds (And8 (Const8 [c]) _) (Const8 [d])) && 0 <= c && c < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to16 (And8 (Const8 [c]) _)) (Const16 [d])) && 0 <= c && int16(c) < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to32 (And8 (Const8 [c]) _)) (Const32 [d])) && 0 <= c && int32(c) < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to64 (And8 (Const8 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true])
+(IsInBounds (And16 (Const16 [c]) _) (Const16 [d])) && 0 <= c && c < d => (ConstBool [true])
+(IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d])) && 0 <= c && int32(c) < d => (ConstBool [true])
+(IsInBounds (ZeroExt16to64 (And16 (Const16 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true])
+(IsInBounds (And32 (Const32 [c]) _) (Const32 [d])) && 0 <= c && c < d => (ConstBool [true])
+(IsInBounds (ZeroExt32to64 (And32 (Const32 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true])
+(IsInBounds (And64 (Const64 [c]) _) (Const64 [d])) && 0 <= c && c < d => (ConstBool [true])
+(IsInBounds (Const32 [c]) (Const32 [d])) => (ConstBool [0 <= c && c < d])
+(IsInBounds (Const64 [c]) (Const64 [d])) => (ConstBool [0 <= c && c < d])
+// (Mod64u x y) is always between 0 (inclusive) and y (exclusive).
+(IsInBounds (Mod32u _ y) y) => (ConstBool [true])
+(IsInBounds (Mod64u _ y) y) => (ConstBool [true])
+// Right shifting an unsigned number limits its value.
+(IsInBounds (ZeroExt8to64 (Rsh8Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to32 (Rsh8Ux64 _ (Const64 [c]))) (Const32 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to16 (Rsh8Ux64 _ (Const64 [c]))) (Const16 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
+(IsInBounds (Rsh8Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt16to64 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt16to32 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true])
+(IsInBounds (Rsh16Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt32to64 (Rsh32Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 32 && 1<<uint(32-c)-1 < d => (ConstBool [true])
+(IsInBounds (Rsh32Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 32 && 1<<uint(32-c)-1 < d => (ConstBool [true])
+(IsInBounds (Rsh64Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 64 && 1<<uint(64-c)-1 < d => (ConstBool [true])
+
+(IsSliceInBounds x x) => (ConstBool [true])
+(IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d])) && 0 <= c && c <= d => (ConstBool [true])
+(IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d])) && 0 <= c && c <= d => (ConstBool [true])
+(IsSliceInBounds (Const32 [0]) _) => (ConstBool [true])
+(IsSliceInBounds (Const64 [0]) _) => (ConstBool [true])
+(IsSliceInBounds (Const32 [c]) (Const32 [d])) => (ConstBool [0 <= c && c <= d])
+(IsSliceInBounds (Const64 [c]) (Const64 [d])) => (ConstBool [0 <= c && c <= d])
+(IsSliceInBounds (SliceLen x) (SliceCap x)) => (ConstBool [true])
+
+(Eq(64|32|16|8) x x) => (ConstBool [true])
+(EqB (ConstBool [c]) (ConstBool [d])) => (ConstBool [c == d])
+(EqB (ConstBool [false]) x) => (Not x)
+(EqB (ConstBool [true]) x) => x
+
+(Neq(64|32|16|8) x x) => (ConstBool [false])
+(NeqB (ConstBool [c]) (ConstBool [d])) => (ConstBool [c != d])
+(NeqB (ConstBool [false]) x) => x
+(NeqB (ConstBool [true]) x) => (Not x)
+(NeqB (Not x) (Not y)) => (NeqB x y)
+
+(Eq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Eq64 (Const64 <t> [c-d]) x)
+(Eq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Eq32 (Const32 <t> [c-d]) x)
+(Eq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Eq16 (Const16 <t> [c-d]) x)
+(Eq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Eq8 (Const8 <t> [c-d]) x)
+
+(Neq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Neq64 (Const64 <t> [c-d]) x)
+(Neq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Neq32 (Const32 <t> [c-d]) x)
+(Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Neq16 (Const16 <t> [c-d]) x)
+(Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Neq8 (Const8 <t> [c-d]) x)
+
+// signed integer range: ( c <= x && x (<|<=) d ) -> ( unsigned(x-c) (<|<=) unsigned(d-c) )
+(AndB (Leq64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+(AndB (Leq32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+(AndB (Leq16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+(AndB (Leq8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+
+// signed integer range: ( c < x && x (<|<=) d ) -> ( unsigned(x-(c+1)) (<|<=) unsigned(d-(c+1)) )
+(AndB (Less64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+(AndB (Less32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+(AndB (Less16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+(AndB (Less8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+
+// unsigned integer range: ( c <= x && x (<|<=) d ) -> ( x-c (<|<=) d-c )
+(AndB (Leq64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c) => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+(AndB (Leq32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c) => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+(AndB (Leq16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c) => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+(AndB (Leq8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c) => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+
+// unsigned integer range: ( c < x && x (<|<=) d ) -> ( x-(c+1) (<|<=) d-(c+1) )
+(AndB (Less64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+(AndB (Less32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+(AndB (Less16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+(AndB (Less8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c) => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+
+// signed integer range: ( c (<|<=) x || x < d ) -> ( unsigned(c-d) (<|<=) unsigned(x-d) )
+(OrB ((Less|Leq)64 (Const64 [c]) x) (Less64 x (Const64 [d]))) && c >= d => ((Less|Leq)64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+(OrB ((Less|Leq)32 (Const32 [c]) x) (Less32 x (Const32 [d]))) && c >= d => ((Less|Leq)32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+(OrB ((Less|Leq)16 (Const16 [c]) x) (Less16 x (Const16 [d]))) && c >= d => ((Less|Leq)16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+(OrB ((Less|Leq)8 (Const8 [c]) x) (Less8 x (Const8 [d]))) && c >= d => ((Less|Leq)8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+
+// signed integer range: ( c (<|<=) x || x <= d ) -> ( unsigned(c-(d+1)) (<|<=) unsigned(x-(d+1)) )
+(OrB ((Less|Leq)64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+(OrB ((Less|Leq)32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+(OrB ((Less|Leq)16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+(OrB ((Less|Leq)8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+
+// unsigned integer range: ( c (<|<=) x || x < d ) -> ( c-d (<|<=) x-d )
+(OrB ((Less|Leq)64U (Const64 [c]) x) (Less64U x (Const64 [d]))) && uint64(c) >= uint64(d) => ((Less|Leq)64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+(OrB ((Less|Leq)32U (Const32 [c]) x) (Less32U x (Const32 [d]))) && uint32(c) >= uint32(d) => ((Less|Leq)32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+(OrB ((Less|Leq)16U (Const16 [c]) x) (Less16U x (Const16 [d]))) && uint16(c) >= uint16(d) => ((Less|Leq)16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+(OrB ((Less|Leq)8U (Const8 [c]) x) (Less8U x (Const8 [d]))) && uint8(c) >= uint8(d) => ((Less|Leq)8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+
+// unsigned integer range: ( c (<|<=) x || x <= d ) -> ( c-(d+1) (<|<=) x-(d+1) )
+(OrB ((Less|Leq)64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) && uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) => ((Less|Leq)64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+(OrB ((Less|Leq)32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) && uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) => ((Less|Leq)32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+(OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) => ((Less|Leq)16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+(OrB ((Less|Leq)8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) && uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) => ((Less|Leq)8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+
+// Canonicalize x-const to x+(-const)
+(Sub64 x (Const64 <t> [c])) && x.Op != OpConst64 => (Add64 (Const64 <t> [-c]) x)
+(Sub32 x (Const32 <t> [c])) && x.Op != OpConst32 => (Add32 (Const32 <t> [-c]) x)
+(Sub16 x (Const16 <t> [c])) && x.Op != OpConst16 => (Add16 (Const16 <t> [-c]) x)
+(Sub8 x (Const8 <t> [c])) && x.Op != OpConst8 => (Add8 (Const8 <t> [-c]) x)
+
+// fold negation into comparison operators
+(Not (Eq(64|32|16|8|B|Ptr|64F|32F) x y)) => (Neq(64|32|16|8|B|Ptr|64F|32F) x y)
+(Not (Neq(64|32|16|8|B|Ptr|64F|32F) x y)) => (Eq(64|32|16|8|B|Ptr|64F|32F) x y)
+
+(Not (Less(64|32|16|8) x y)) => (Leq(64|32|16|8) y x)
+(Not (Less(64|32|16|8)U x y)) => (Leq(64|32|16|8)U y x)
+(Not (Leq(64|32|16|8) x y)) => (Less(64|32|16|8) y x)
+(Not (Leq(64|32|16|8)U x y)) => (Less(64|32|16|8)U y x)
+
+// Distribute multiplication c * (d+x) -> c*d + c*x. Useful for:
+// a[i].b = ...; a[i+1].b = ...
+(Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x)) =>
+ (Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x))
+(Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x)) =>
+ (Add32 (Const32 <t> [c*d]) (Mul32 <t> (Const32 <t> [c]) x))
+
+// Rewrite x*y ± x*z to x*(y±z)
+(Add(64|32|16|8) <t> (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z))
+ => (Mul(64|32|16|8) x (Add(64|32|16|8) <t> y z))
+(Sub(64|32|16|8) <t> (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z))
+ => (Mul(64|32|16|8) x (Sub(64|32|16|8) <t> y z))
+
+// rewrite shifts of 8/16/32 bit consts into 64 bit consts to reduce
+// the number of the other rewrite rules for const shifts
+(Lsh64x32 <t> x (Const32 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint32(c))]))
+(Lsh64x16 <t> x (Const16 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint16(c))]))
+(Lsh64x8 <t> x (Const8 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh64x32 <t> x (Const32 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh64x16 <t> x (Const16 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh64x8 <t> x (Const8 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh64Ux32 <t> x (Const32 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh64Ux16 <t> x (Const16 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh64Ux8 <t> x (Const8 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint8(c))]))
+
+(Lsh32x32 <t> x (Const32 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint32(c))]))
+(Lsh32x16 <t> x (Const16 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint16(c))]))
+(Lsh32x8 <t> x (Const8 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh32x32 <t> x (Const32 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh32x16 <t> x (Const16 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh32x8 <t> x (Const8 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh32Ux32 <t> x (Const32 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh32Ux16 <t> x (Const16 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh32Ux8 <t> x (Const8 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint8(c))]))
+
+(Lsh16x32 <t> x (Const32 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint32(c))]))
+(Lsh16x16 <t> x (Const16 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint16(c))]))
+(Lsh16x8 <t> x (Const8 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh16x32 <t> x (Const32 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh16x16 <t> x (Const16 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh16x8 <t> x (Const8 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh16Ux32 <t> x (Const32 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh16Ux16 <t> x (Const16 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh16Ux8 <t> x (Const8 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint8(c))]))
+
+(Lsh8x32 <t> x (Const32 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint32(c))]))
+(Lsh8x16 <t> x (Const16 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint16(c))]))
+(Lsh8x8 <t> x (Const8 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh8x32 <t> x (Const32 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh8x16 <t> x (Const16 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh8x8 <t> x (Const8 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh8Ux32 <t> x (Const32 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh8Ux16 <t> x (Const16 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh8Ux8 <t> x (Const8 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))]))
+
+// shifts by zero
+(Lsh(64|32|16|8)x64 x (Const64 [0])) => x
+(Rsh(64|32|16|8)x64 x (Const64 [0])) => x
+(Rsh(64|32|16|8)Ux64 x (Const64 [0])) => x
+
+// rotates by multiples of register width
+(RotateLeft64 x (Const64 [c])) && c%64 == 0 => x
+(RotateLeft32 x (Const32 [c])) && c%32 == 0 => x
+(RotateLeft16 x (Const16 [c])) && c%16 == 0 => x
+(RotateLeft8 x (Const8 [c])) && c%8 == 0 => x
+
+// zero shifted
+(Lsh64x(64|32|16|8) (Const64 [0]) _) => (Const64 [0])
+(Rsh64x(64|32|16|8) (Const64 [0]) _) => (Const64 [0])
+(Rsh64Ux(64|32|16|8) (Const64 [0]) _) => (Const64 [0])
+(Lsh32x(64|32|16|8) (Const32 [0]) _) => (Const32 [0])
+(Rsh32x(64|32|16|8) (Const32 [0]) _) => (Const32 [0])
+(Rsh32Ux(64|32|16|8) (Const32 [0]) _) => (Const32 [0])
+(Lsh16x(64|32|16|8) (Const16 [0]) _) => (Const16 [0])
+(Rsh16x(64|32|16|8) (Const16 [0]) _) => (Const16 [0])
+(Rsh16Ux(64|32|16|8) (Const16 [0]) _) => (Const16 [0])
+(Lsh8x(64|32|16|8) (Const8 [0]) _) => (Const8 [0])
+(Rsh8x(64|32|16|8) (Const8 [0]) _) => (Const8 [0])
+(Rsh8Ux(64|32|16|8) (Const8 [0]) _) => (Const8 [0])
+
+// large left shifts of all values, and right shifts of unsigned values
+((Lsh64|Rsh64U)x64 _ (Const64 [c])) && uint64(c) >= 64 => (Const64 [0])
+((Lsh32|Rsh32U)x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+((Lsh16|Rsh16U)x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+((Lsh8|Rsh8U)x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+
+// combine const shifts
+(Lsh64x64 <t> (Lsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh64x64 x (Const64 <t> [c+d]))
+(Lsh32x64 <t> (Lsh32x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh32x64 x (Const64 <t> [c+d]))
+(Lsh16x64 <t> (Lsh16x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh16x64 x (Const64 <t> [c+d]))
+(Lsh8x64 <t> (Lsh8x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh8x64 x (Const64 <t> [c+d]))
+
+(Rsh64x64 <t> (Rsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh64x64 x (Const64 <t> [c+d]))
+(Rsh32x64 <t> (Rsh32x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh32x64 x (Const64 <t> [c+d]))
+(Rsh16x64 <t> (Rsh16x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh16x64 x (Const64 <t> [c+d]))
+(Rsh8x64 <t> (Rsh8x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh8x64 x (Const64 <t> [c+d]))
+
+(Rsh64Ux64 <t> (Rsh64Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh64Ux64 x (Const64 <t> [c+d]))
+(Rsh32Ux64 <t> (Rsh32Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh32Ux64 x (Const64 <t> [c+d]))
+(Rsh16Ux64 <t> (Rsh16Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh16Ux64 x (Const64 <t> [c+d]))
+(Rsh8Ux64 <t> (Rsh8Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh8Ux64 x (Const64 <t> [c+d]))
+
+// Remove signed right shift before an unsigned right shift that extracts the sign bit.
+(Rsh8Ux64 (Rsh8x64 x _) (Const64 <t> [7] )) => (Rsh8Ux64 x (Const64 <t> [7] ))
+(Rsh16Ux64 (Rsh16x64 x _) (Const64 <t> [15])) => (Rsh16Ux64 x (Const64 <t> [15]))
+(Rsh32Ux64 (Rsh32x64 x _) (Const64 <t> [31])) => (Rsh32Ux64 x (Const64 <t> [31]))
+(Rsh64Ux64 (Rsh64x64 x _) (Const64 <t> [63])) => (Rsh64Ux64 x (Const64 <t> [63]))
+
+// ((x >> c1) << c2) >> c3
+(Rsh(64|32|16|8)Ux64 (Lsh(64|32|16|8)x64 (Rsh(64|32|16|8)Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ => (Rsh(64|32|16|8)Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+
+// ((x << c1) >> c2) << c3
+(Lsh(64|32|16|8)x64 (Rsh(64|32|16|8)Ux64 (Lsh(64|32|16|8)x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ => (Lsh(64|32|16|8)x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+
+// (x >> c) & uppermask = 0
+(And64 (Const64 [m]) (Rsh64Ux64 _ (Const64 [c]))) && c >= int64(64-ntz64(m)) => (Const64 [0])
+(And32 (Const32 [m]) (Rsh32Ux64 _ (Const64 [c]))) && c >= int64(32-ntz32(m)) => (Const32 [0])
+(And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c]))) && c >= int64(16-ntz16(m)) => (Const16 [0])
+(And8 (Const8 [m]) (Rsh8Ux64 _ (Const64 [c]))) && c >= int64(8-ntz8(m)) => (Const8 [0])
+
+// (x << c) & lowermask = 0
+(And64 (Const64 [m]) (Lsh64x64 _ (Const64 [c]))) && c >= int64(64-nlz64(m)) => (Const64 [0])
+(And32 (Const32 [m]) (Lsh32x64 _ (Const64 [c]))) && c >= int64(32-nlz32(m)) => (Const32 [0])
+(And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c]))) && c >= int64(16-nlz16(m)) => (Const16 [0])
+(And8 (Const8 [m]) (Lsh8x64 _ (Const64 [c]))) && c >= int64(8-nlz8(m)) => (Const8 [0])
+
+// replace shifts with zero extensions
+(Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) => (ZeroExt8to16 (Trunc16to8 <typ.UInt8> x))
+(Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) => (ZeroExt8to32 (Trunc32to8 <typ.UInt8> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) => (ZeroExt8to64 (Trunc64to8 <typ.UInt8> x))
+(Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) => (ZeroExt16to32 (Trunc32to16 <typ.UInt16> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) => (ZeroExt16to64 (Trunc64to16 <typ.UInt16> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) => (ZeroExt32to64 (Trunc64to32 <typ.UInt32> x))
+
+// replace shifts with sign extensions
+(Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) => (SignExt8to16 (Trunc16to8 <typ.Int8> x))
+(Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) => (SignExt8to32 (Trunc32to8 <typ.Int8> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) => (SignExt8to64 (Trunc64to8 <typ.Int8> x))
+(Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) => (SignExt16to32 (Trunc32to16 <typ.Int16> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) => (SignExt16to64 (Trunc64to16 <typ.Int16> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) => (SignExt32to64 (Trunc64to32 <typ.Int32> x))
+
+// constant comparisons
+(Eq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c == d])
+(Neq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c != d])
+(Less(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c < d])
+(Leq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c <= d])
+
+(Less64U (Const64 [c]) (Const64 [d])) => (ConstBool [uint64(c) < uint64(d)])
+(Less32U (Const32 [c]) (Const32 [d])) => (ConstBool [uint32(c) < uint32(d)])
+(Less16U (Const16 [c]) (Const16 [d])) => (ConstBool [uint16(c) < uint16(d)])
+(Less8U (Const8 [c]) (Const8 [d])) => (ConstBool [ uint8(c) < uint8(d)])
+
+(Leq64U (Const64 [c]) (Const64 [d])) => (ConstBool [uint64(c) <= uint64(d)])
+(Leq32U (Const32 [c]) (Const32 [d])) => (ConstBool [uint32(c) <= uint32(d)])
+(Leq16U (Const16 [c]) (Const16 [d])) => (ConstBool [uint16(c) <= uint16(d)])
+(Leq8U (Const8 [c]) (Const8 [d])) => (ConstBool [ uint8(c) <= uint8(d)])
+
+(Leq8 (Const8 [0]) (And8 _ (Const8 [c]))) && c >= 0 => (ConstBool [true])
+(Leq16 (Const16 [0]) (And16 _ (Const16 [c]))) && c >= 0 => (ConstBool [true])
+(Leq32 (Const32 [0]) (And32 _ (Const32 [c]))) && c >= 0 => (ConstBool [true])
+(Leq64 (Const64 [0]) (And64 _ (Const64 [c]))) && c >= 0 => (ConstBool [true])
+
+(Leq8 (Const8 [0]) (Rsh8Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
+(Leq16 (Const16 [0]) (Rsh16Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
+(Leq32 (Const32 [0]) (Rsh32Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
+(Leq64 (Const64 [0]) (Rsh64Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
+
+// constant floating point comparisons
+(Eq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c == d])
+(Eq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c == d])
+(Neq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c != d])
+(Neq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c != d])
+(Less32F (Const32F [c]) (Const32F [d])) => (ConstBool [c < d])
+(Less64F (Const64F [c]) (Const64F [d])) => (ConstBool [c < d])
+(Leq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c <= d])
+(Leq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c <= d])
+
+// simplifications
+(Or(64|32|16|8) x x) => x
+(Or(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
+(Or(64|32|16|8) (Const(64|32|16|8) [-1]) _) => (Const(64|32|16|8) [-1])
+
+(And(64|32|16|8) x x) => x
+(And(64|32|16|8) (Const(64|32|16|8) [-1]) x) => x
+(And(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0])
+
+(Xor(64|32|16|8) x x) => (Const(64|32|16|8) [0])
+(Xor(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
+
+(Add(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
+(Sub(64|32|16|8) x x) => (Const(64|32|16|8) [0])
+(Mul(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0])
+
+(Com(64|32|16|8) (Com(64|32|16|8) x)) => x
+(Com(64|32|16|8) (Const(64|32|16|8) [c])) => (Const(64|32|16|8) [^c])
+
+(Neg(64|32|16|8) (Sub(64|32|16|8) x y)) => (Sub(64|32|16|8) y x)
+
+// ^(x-1) == ^x+1 == -x
+(Add(64|32|16|8) (Const(64|32|16|8) [1]) (Com(64|32|16|8) x)) => (Neg(64|32|16|8) x)
+(Com(64|32|16|8) (Add(64|32|16|8) (Const(64|32|16|8) [-1]) x)) => (Neg(64|32|16|8) x)
+
+// -(-x) == x
+(Neg(64|32|16|8) (Neg(64|32|16|8) x)) => x
+
+// -^x == x+1
+(Neg(64|32|16|8) <t> (Com(64|32|16|8) x)) => (Add(64|32|16|8) (Const(64|32|16|8) <t> [1]) x)
+
+(And(64|32|16|8) x (And(64|32|16|8) x y)) => (And(64|32|16|8) x y)
+(Or(64|32|16|8) x (Or(64|32|16|8) x y)) => (Or(64|32|16|8) x y)
+(Xor(64|32|16|8) x (Xor(64|32|16|8) x y)) => y
+
+// Unsigned comparisons to zero.
+(Less(64U|32U|16U|8U) _ (Const(64|32|16|8) [0])) => (ConstBool [false])
+(Leq(64U|32U|16U|8U) (Const(64|32|16|8) [0]) _) => (ConstBool [true])
+
+// Ands clear bits. Ors set bits.
+// If a subsequent Or will set all the bits
+// that an And cleared, we can skip the And.
+// This happens in bitmasking code like:
+// x &^= 3 << shift // clear two old bits
+// x |= v << shift // set two new bits
+// when shift is a small constant and v ends up a constant 3.
+(Or8 (And8 x (Const8 [c2])) (Const8 <t> [c1])) && ^(c1 | c2) == 0 => (Or8 (Const8 <t> [c1]) x)
+(Or16 (And16 x (Const16 [c2])) (Const16 <t> [c1])) && ^(c1 | c2) == 0 => (Or16 (Const16 <t> [c1]) x)
+(Or32 (And32 x (Const32 [c2])) (Const32 <t> [c1])) && ^(c1 | c2) == 0 => (Or32 (Const32 <t> [c1]) x)
+(Or64 (And64 x (Const64 [c2])) (Const64 <t> [c1])) && ^(c1 | c2) == 0 => (Or64 (Const64 <t> [c1]) x)
+
+(Trunc64to8 (And64 (Const64 [y]) x)) && y&0xFF == 0xFF => (Trunc64to8 x)
+(Trunc64to16 (And64 (Const64 [y]) x)) && y&0xFFFF == 0xFFFF => (Trunc64to16 x)
+(Trunc64to32 (And64 (Const64 [y]) x)) && y&0xFFFFFFFF == 0xFFFFFFFF => (Trunc64to32 x)
+(Trunc32to8 (And32 (Const32 [y]) x)) && y&0xFF == 0xFF => (Trunc32to8 x)
+(Trunc32to16 (And32 (Const32 [y]) x)) && y&0xFFFF == 0xFFFF => (Trunc32to16 x)
+(Trunc16to8 (And16 (Const16 [y]) x)) && y&0xFF == 0xFF => (Trunc16to8 x)
+
+(ZeroExt8to64 (Trunc64to8 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 56 => x
+(ZeroExt16to64 (Trunc64to16 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 48 => x
+(ZeroExt32to64 (Trunc64to32 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 32 => x
+(ZeroExt8to32 (Trunc32to8 x:(Rsh32Ux64 _ (Const64 [s])))) && s >= 24 => x
+(ZeroExt16to32 (Trunc32to16 x:(Rsh32Ux64 _ (Const64 [s])))) && s >= 16 => x
+(ZeroExt8to16 (Trunc16to8 x:(Rsh16Ux64 _ (Const64 [s])))) && s >= 8 => x
+
+(SignExt8to64 (Trunc64to8 x:(Rsh64x64 _ (Const64 [s])))) && s >= 56 => x
+(SignExt16to64 (Trunc64to16 x:(Rsh64x64 _ (Const64 [s])))) && s >= 48 => x
+(SignExt32to64 (Trunc64to32 x:(Rsh64x64 _ (Const64 [s])))) && s >= 32 => x
+(SignExt8to32 (Trunc32to8 x:(Rsh32x64 _ (Const64 [s])))) && s >= 24 => x
+(SignExt16to32 (Trunc32to16 x:(Rsh32x64 _ (Const64 [s])))) && s >= 16 => x
+(SignExt8to16 (Trunc16to8 x:(Rsh16x64 _ (Const64 [s])))) && s >= 8 => x
+
+(Slicemask (Const32 [x])) && x > 0 => (Const32 [-1])
+(Slicemask (Const32 [0])) => (Const32 [0])
+(Slicemask (Const64 [x])) && x > 0 => (Const64 [-1])
+(Slicemask (Const64 [0])) => (Const64 [0])
+
+// simplifications often used for lengths. e.g. len(s[i:i+5])==5
+(Sub(64|32|16|8) (Add(64|32|16|8) x y) x) => y
+(Sub(64|32|16|8) (Add(64|32|16|8) x y) y) => x
+
+// basic phi simplifications
+(Phi (Const8 [c]) (Const8 [c])) => (Const8 [c])
+(Phi (Const16 [c]) (Const16 [c])) => (Const16 [c])
+(Phi (Const32 [c]) (Const32 [c])) => (Const32 [c])
+(Phi (Const64 [c]) (Const64 [c])) => (Const64 [c])
+
+// slice and interface comparisons
+// The frontend ensures that we can only compare against nil,
+// so we need only compare the first word (interface type or slice ptr).
+(EqInter x y) => (EqPtr (ITab x) (ITab y))
+(NeqInter x y) => (NeqPtr (ITab x) (ITab y))
+(EqSlice x y) => (EqPtr (SlicePtr x) (SlicePtr y))
+(NeqSlice x y) => (NeqPtr (SlicePtr x) (SlicePtr y))
+
+// Load of store of same address, with compatibly typed value and same size
+(Load <t1> p1 (Store {t2} p2 x _))
+ && isSamePtr(p1, p2)
+ && t1.Compare(x.Type) == types.CMPeq
+ && t1.Size() == t2.Size()
+ => x
+(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 x _)))
+ && isSamePtr(p1, p3)
+ && t1.Compare(x.Type) == types.CMPeq
+ && t1.Size() == t2.Size()
+ && disjoint(p3, t3.Size(), p2, t2.Size())
+ => x
+(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _))))
+ && isSamePtr(p1, p4)
+ && t1.Compare(x.Type) == types.CMPeq
+ && t1.Size() == t2.Size()
+ && disjoint(p4, t4.Size(), p2, t2.Size())
+ && disjoint(p4, t4.Size(), p3, t3.Size())
+ => x
+(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _)))))
+ && isSamePtr(p1, p5)
+ && t1.Compare(x.Type) == types.CMPeq
+ && t1.Size() == t2.Size()
+ && disjoint(p5, t5.Size(), p2, t2.Size())
+ && disjoint(p5, t5.Size(), p3, t3.Size())
+ && disjoint(p5, t5.Size(), p4, t4.Size())
+ => x
+
+// Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits
+ (Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) => (Const64F [math.Float64frombits(uint64(x))])
+ (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) => (Const32F [math.Float32frombits(uint32(x))])
+(Load <t1> p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) => (Const64 [int64(math.Float64bits(x))])
+(Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) => (Const32 [int32(math.Float32bits(x))])
+
+// Float Loads up to Zeros so they can be constant folded.
+(Load <t1> op:(OffPtr [o1] p1)
+ (Store {t2} p2 _
+ mem:(Zero [n] p3 _)))
+ && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3)
+ && fe.CanSSA(t1)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p3) mem)
+(Load <t1> op:(OffPtr [o1] p1)
+ (Store {t2} p2 _
+ (Store {t3} p3 _
+ mem:(Zero [n] p4 _))))
+ && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4)
+ && fe.CanSSA(t1)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p4) mem)
+(Load <t1> op:(OffPtr [o1] p1)
+ (Store {t2} p2 _
+ (Store {t3} p3 _
+ (Store {t4} p4 _
+ mem:(Zero [n] p5 _)))))
+ && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5)
+ && fe.CanSSA(t1)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ && disjoint(op, t1.Size(), p4, t4.Size())
+ => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p5) mem)
+(Load <t1> op:(OffPtr [o1] p1)
+ (Store {t2} p2 _
+ (Store {t3} p3 _
+ (Store {t4} p4 _
+ (Store {t5} p5 _
+ mem:(Zero [n] p6 _))))))
+ && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6)
+ && fe.CanSSA(t1)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ && disjoint(op, t1.Size(), p4, t4.Size())
+ && disjoint(op, t1.Size(), p5, t5.Size())
+ => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p6) mem)
+
+// Zero to Load forwarding.
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && t1.IsBoolean()
+ && isSamePtr(p1, p2)
+ && n >= o + 1
+ => (ConstBool [false])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is8BitInt(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 1
+ => (Const8 [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is16BitInt(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 2
+ => (Const16 [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is32BitInt(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 4
+ => (Const32 [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is64BitInt(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 8
+ => (Const64 [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is32BitFloat(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 4
+ => (Const32F [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is64BitFloat(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 8
+ => (Const64F [0])
+
+// Eliminate stores of values that have just been loaded from the same location.
+// We also handle the common case where there are some intermediate stores.
+(Store {t1} p1 (Load <t2> p2 mem) mem)
+ && isSamePtr(p1, p2)
+ && t2.Size() == t1.Size()
+ => mem
+(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ oldmem))
+ && isSamePtr(p1, p2)
+ && t2.Size() == t1.Size()
+ && disjoint(p1, t1.Size(), p3, t3.Size())
+ => mem
+(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem)))
+ && isSamePtr(p1, p2)
+ && t2.Size() == t1.Size()
+ && disjoint(p1, t1.Size(), p3, t3.Size())
+ && disjoint(p1, t1.Size(), p4, t4.Size())
+ => mem
+(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem))))
+ && isSamePtr(p1, p2)
+ && t2.Size() == t1.Size()
+ && disjoint(p1, t1.Size(), p3, t3.Size())
+ && disjoint(p1, t1.Size(), p4, t4.Size())
+ && disjoint(p1, t1.Size(), p5, t5.Size())
+ => mem
+
+// Don't Store zeros to cleared variables.
+(Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _))
+ && isConstZero(x)
+ && o >= 0 && t.Size() + o <= n && isSamePtr(p1, p2)
+ => mem
+(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _)))
+ && isConstZero(x)
+ && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p3)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ => mem
+(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _))))
+ && isConstZero(x)
+ && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p4)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ => mem
+(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _)))))
+ && isConstZero(x)
+ && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p5)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ && disjoint(op, t1.Size(), p4, t4.Size())
+ => mem
+
+// Collapse OffPtr
+(OffPtr (OffPtr p [b]) [a]) => (OffPtr p [a+b])
+(OffPtr p [0]) && v.Type.Compare(p.Type) == types.CMPeq => p
+
+// indexing operations
+// Note: bounds check has already been done
+(PtrIndex <t> ptr idx) && config.PtrSize == 4 && is32Bit(t.Elem().Size()) => (AddPtr ptr (Mul32 <typ.Int> idx (Const32 <typ.Int> [int32(t.Elem().Size())])))
+(PtrIndex <t> ptr idx) && config.PtrSize == 8 => (AddPtr ptr (Mul64 <typ.Int> idx (Const64 <typ.Int> [t.Elem().Size()])))
+
+// struct operations
+(StructSelect (StructMake1 x)) => x
+(StructSelect [0] (StructMake2 x _)) => x
+(StructSelect [1] (StructMake2 _ x)) => x
+(StructSelect [0] (StructMake3 x _ _)) => x
+(StructSelect [1] (StructMake3 _ x _)) => x
+(StructSelect [2] (StructMake3 _ _ x)) => x
+(StructSelect [0] (StructMake4 x _ _ _)) => x
+(StructSelect [1] (StructMake4 _ x _ _)) => x
+(StructSelect [2] (StructMake4 _ _ x _)) => x
+(StructSelect [3] (StructMake4 _ _ _ x)) => x
+
+(Load <t> _ _) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) =>
+ (StructMake0)
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) =>
+ (StructMake1
+ (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem))
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) =>
+ (StructMake2
+ (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
+ (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) =>
+ (StructMake3
+ (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
+ (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
+ (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) =>
+ (StructMake4
+ (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
+ (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
+ (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem)
+ (Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
+
+(StructSelect [i] x:(Load <t> ptr mem)) && !fe.CanSSA(t) =>
+ @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
+
+(Store _ (StructMake0) mem) => mem
+(Store dst (StructMake1 <t> f0) mem) =>
+ (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)
+(Store dst (StructMake2 <t> f0 f1) mem) =>
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem))
+(Store dst (StructMake3 <t> f0 f1 f2) mem) =>
+ (Store {t.FieldType(2)}
+ (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)
+ f2
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem)))
+(Store dst (StructMake4 <t> f0 f1 f2 f3) mem) =>
+ (Store {t.FieldType(3)}
+ (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst)
+ f3
+ (Store {t.FieldType(2)}
+ (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)
+ f2
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem))))
+
+// Putting struct{*byte} and similar into direct interfaces.
+(IMake typ (StructMake1 val)) => (IMake typ val)
+(StructSelect [0] (IData x)) => (IData x)
+
+// un-SSAable values use mem->mem copies
+(Store {t} dst (Load src mem) mem) && !fe.CanSSA(t) =>
+ (Move {t} [t.Size()] dst src mem)
+(Store {t} dst (Load src mem) (VarDef {x} mem)) && !fe.CanSSA(t) =>
+ (Move {t} [t.Size()] dst src (VarDef {x} mem))
+
+// array ops
+(ArraySelect (ArrayMake1 x)) => x
+
+(Load <t> _ _) && t.IsArray() && t.NumElem() == 0 =>
+ (ArrayMake0)
+
+(Load <t> ptr mem) && t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) =>
+ (ArrayMake1 (Load <t.Elem()> ptr mem))
+
+(Store _ (ArrayMake0) mem) => mem
+(Store dst (ArrayMake1 e) mem) => (Store {e.Type} dst e mem)
+
+// Putting [1]*byte and similar into direct interfaces.
+(IMake typ (ArrayMake1 val)) => (IMake typ val)
+(ArraySelect [0] (IData x)) => (IData x)
+
+// string ops
+// Decomposing StringMake and lowering of StringPtr and StringLen
+// happens in a later pass, dec, so that these operations are available
+// to other passes for optimizations.
+(StringPtr (StringMake (Addr <t> {s} base) _)) => (Addr <t> {s} base)
+(StringLen (StringMake _ (Const64 <t> [c]))) => (Const64 <t> [c])
+(ConstString {str}) && config.PtrSize == 4 && str == "" =>
+ (StringMake (ConstNil) (Const32 <typ.Int> [0]))
+(ConstString {str}) && config.PtrSize == 8 && str == "" =>
+ (StringMake (ConstNil) (Const64 <typ.Int> [0]))
+(ConstString {str}) && config.PtrSize == 4 && str != "" =>
+ (StringMake
+ (Addr <typ.BytePtr> {fe.StringData(str)}
+ (SB))
+ (Const32 <typ.Int> [int32(len(str))]))
+(ConstString {str}) && config.PtrSize == 8 && str != "" =>
+ (StringMake
+ (Addr <typ.BytePtr> {fe.StringData(str)}
+ (SB))
+ (Const64 <typ.Int> [int64(len(str))]))
+
+// slice ops
+// Only a few slice rules are provided here. See dec.rules for
+// a more comprehensive set.
+(SliceLen (SliceMake _ (Const64 <t> [c]) _)) => (Const64 <t> [c])
+(SliceCap (SliceMake _ _ (Const64 <t> [c]))) => (Const64 <t> [c])
+(SliceLen (SliceMake _ (Const32 <t> [c]) _)) => (Const32 <t> [c])
+(SliceCap (SliceMake _ _ (Const32 <t> [c]))) => (Const32 <t> [c])
+(SlicePtr (SliceMake (SlicePtr x) _ _)) => (SlicePtr x)
+(SliceLen (SliceMake _ (SliceLen x) _)) => (SliceLen x)
+(SliceCap (SliceMake _ _ (SliceCap x))) => (SliceCap x)
+(SliceCap (SliceMake _ _ (SliceLen x))) => (SliceLen x)
+(ConstSlice) && config.PtrSize == 4 =>
+ (SliceMake
+ (ConstNil <v.Type.Elem().PtrTo()>)
+ (Const32 <typ.Int> [0])
+ (Const32 <typ.Int> [0]))
+(ConstSlice) && config.PtrSize == 8 =>
+ (SliceMake
+ (ConstNil <v.Type.Elem().PtrTo()>)
+ (Const64 <typ.Int> [0])
+ (Const64 <typ.Int> [0]))
+
+// interface ops
+(ConstInterface) =>
+ (IMake
+ (ConstNil <typ.Uintptr>)
+ (ConstNil <typ.BytePtr>))
+
+(NilCheck (GetG mem) mem) => mem
+
+(If (Not cond) yes no) => (If cond no yes)
+(If (ConstBool [c]) yes no) && c => (First yes no)
+(If (ConstBool [c]) yes no) && !c => (First no yes)
+
+// Get rid of Convert ops for pointer arithmetic on unsafe.Pointer.
+(Convert (Add(64|32) (Convert ptr mem) off) mem) => (AddPtr ptr off)
+(Convert (Convert ptr mem) mem) => ptr
+
+// strength reduction of divide by a constant.
+// See ../magic.go for a detailed description of these algorithms.
+
+// Unsigned divide by power of 2. Strength reduce to a shift.
+(Div8u n (Const8 [c])) && isPowerOfTwo8(c) => (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
+(Div16u n (Const16 [c])) && isPowerOfTwo16(c) => (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
+(Div32u n (Const32 [c])) && isPowerOfTwo32(c) => (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
+(Div64u n (Const64 [c])) && isPowerOfTwo64(c) => (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
+(Div64u n (Const64 [-1<<63])) => (Rsh64Ux64 n (Const64 <typ.UInt64> [63]))
+
+// Signed non-negative divide by power of 2.
+(Div8 n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo8(c) => (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
+(Div16 n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo16(c) => (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
+(Div32 n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo32(c) => (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
+(Div64 n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo64(c) => (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
+(Div64 n (Const64 [-1<<63])) && isNonNegative(n) => (Const64 [0])
+
+// Unsigned divide, not a power of 2. Strength reduce to a multiply.
+// For 8-bit divides, we just do a direct 9-bit by 8-bit multiply.
+(Div8u x (Const8 [c])) && umagicOK8(c) =>
+ (Trunc32to8
+ (Rsh32Ux64 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<8+umagic8(c).m)])
+ (ZeroExt8to32 x))
+ (Const64 <typ.UInt64> [8+umagic8(c).s])))
+
+// For 16-bit divides on 64-bit machines, we do a direct 17-bit by 16-bit multiply.
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 8 =>
+ (Trunc64to16
+ (Rsh64Ux64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<16+umagic16(c).m)])
+ (ZeroExt16to64 x))
+ (Const64 <typ.UInt64> [16+umagic16(c).s])))
+
+// For 16-bit divides on 32-bit machines
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0 =>
+ (Trunc32to16
+ (Rsh32Ux64 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<15+umagic16(c).m/2)])
+ (ZeroExt16to32 x))
+ (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && c&1 == 0 =>
+ (Trunc32to16
+ (Rsh32Ux64 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<15+(umagic16(c).m+1)/2)])
+ (Rsh32Ux64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [16+umagic16(c).s-2])))
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && config.useAvg =>
+ (Trunc32to16
+ (Rsh32Ux64 <typ.UInt32>
+ (Avg32u
+ (Lsh32x64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [16]))
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(umagic16(c).m)])
+ (ZeroExt16to32 x)))
+ (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
+
+// For 32-bit divides on 32-bit machines
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul =>
+ (Rsh32Ux64 <typ.UInt32>
+ (Hmul32u <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<31+umagic32(c).m/2)])
+ x)
+ (Const64 <typ.UInt64> [umagic32(c).s-1]))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul =>
+ (Rsh32Ux64 <typ.UInt32>
+ (Hmul32u <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<31+(umagic32(c).m+1)/2)])
+ (Rsh32Ux64 <typ.UInt32> x (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [umagic32(c).s-2]))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul =>
+ (Rsh32Ux64 <typ.UInt32>
+ (Avg32u
+ x
+ (Hmul32u <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(umagic32(c).m)])
+ x))
+ (Const64 <typ.UInt64> [umagic32(c).s-1]))
+
+// For 32-bit divides on 64-bit machines
+// We'll use a regular (non-hi) multiply for this case.
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0 =>
+ (Trunc64to32
+ (Rsh64Ux64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<31+umagic32(c).m/2)])
+ (ZeroExt32to64 x))
+ (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && c&1 == 0 =>
+ (Trunc64to32
+ (Rsh64Ux64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<31+(umagic32(c).m+1)/2)])
+ (Rsh64Ux64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [32+umagic32(c).s-2])))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && config.useAvg =>
+ (Trunc64to32
+ (Rsh64Ux64 <typ.UInt64>
+ (Avg64u
+ (Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32]))
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt32> [int64(umagic32(c).m)])
+ (ZeroExt32to64 x)))
+ (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
+
+// For unsigned 64-bit divides on 32-bit machines,
+// if the constant fits in 16 bits (so that the last term
+// fits in 32 bits), convert to three 32-bit divides by a constant.
+//
+// If 1<<32 = Q * c + R
+// and x = hi << 32 + lo
+//
+// Then x = (hi/c*c + hi%c) << 32 + lo
+// = hi/c*c<<32 + hi%c<<32 + lo
+// = hi/c*c<<32 + (hi%c)*(Q*c+R) + lo/c*c + lo%c
+// = hi/c*c<<32 + (hi%c)*Q*c + lo/c*c + (hi%c*R+lo%c)
+// and x / c = (hi/c)<<32 + (hi%c)*Q + lo/c + (hi%c*R+lo%c)/c
+(Div64u x (Const64 [c])) && c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul =>
+ (Add64
+ (Add64 <typ.UInt64>
+ (Add64 <typ.UInt64>
+ (Lsh64x64 <typ.UInt64>
+ (ZeroExt32to64
+ (Div32u <typ.UInt32>
+ (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32])))
+ (Const32 <typ.UInt32> [int32(c)])))
+ (Const64 <typ.UInt64> [32]))
+ (ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)]))))
+ (Mul64 <typ.UInt64>
+ (ZeroExt32to64 <typ.UInt64>
+ (Mod32u <typ.UInt32>
+ (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32])))
+ (Const32 <typ.UInt32> [int32(c)])))
+ (Const64 <typ.UInt64> [int64((1<<32)/c)])))
+ (ZeroExt32to64
+ (Div32u <typ.UInt32>
+ (Add32 <typ.UInt32>
+ (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)]))
+ (Mul32 <typ.UInt32>
+ (Mod32u <typ.UInt32>
+ (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32])))
+ (Const32 <typ.UInt32> [int32(c)]))
+ (Const32 <typ.UInt32> [int32((1<<32)%c)])))
+ (Const32 <typ.UInt32> [int32(c)]))))
+
+// For 64-bit divides on 64-bit machines
+// (64-bit divides on 32-bit machines are lowered to a runtime call by the walk pass.)
+(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul =>
+ (Rsh64Ux64 <typ.UInt64>
+ (Hmul64u <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<63+umagic64(c).m/2)])
+ x)
+ (Const64 <typ.UInt64> [umagic64(c).s-1]))
+(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul =>
+ (Rsh64Ux64 <typ.UInt64>
+ (Hmul64u <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<63+(umagic64(c).m+1)/2)])
+ (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [umagic64(c).s-2]))
+(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul =>
+ (Rsh64Ux64 <typ.UInt64>
+ (Avg64u
+ x
+ (Hmul64u <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(umagic64(c).m)])
+ x))
+ (Const64 <typ.UInt64> [umagic64(c).s-1]))
+
+// Signed divide by a negative constant. Rewrite to divide by a positive constant.
+(Div8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 => (Neg8 (Div8 <t> n (Const8 <t> [-c])))
+(Div16 <t> n (Const16 [c])) && c < 0 && c != -1<<15 => (Neg16 (Div16 <t> n (Const16 <t> [-c])))
+(Div32 <t> n (Const32 [c])) && c < 0 && c != -1<<31 => (Neg32 (Div32 <t> n (Const32 <t> [-c])))
+(Div64 <t> n (Const64 [c])) && c < 0 && c != -1<<63 => (Neg64 (Div64 <t> n (Const64 <t> [-c])))
+
+// Dividing by the most-negative number. Result is always 0 except
+// if the input is also the most-negative number.
+// We can detect that using the sign bit of x & -x.
+(Div8 <t> x (Const8 [-1<<7 ])) => (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <typ.UInt64> [7 ]))
+(Div16 <t> x (Const16 [-1<<15])) => (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <typ.UInt64> [15]))
+(Div32 <t> x (Const32 [-1<<31])) => (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <typ.UInt64> [31]))
+(Div64 <t> x (Const64 [-1<<63])) => (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <typ.UInt64> [63]))
+
+// Signed divide by power of 2.
+// n / c = n >> log(c) if n >= 0
+// = (n+c-1) >> log(c) if n < 0
+// We conditionally add c-1 by adding n>>63>>(64-log(c)) (first shift signed, second shift unsigned).
+(Div8 <t> n (Const8 [c])) && isPowerOfTwo8(c) =>
+ (Rsh8x64
+ (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [int64( 8-log8(c))])))
+ (Const64 <typ.UInt64> [int64(log8(c))]))
+(Div16 <t> n (Const16 [c])) && isPowerOfTwo16(c) =>
+ (Rsh16x64
+ (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [int64(16-log16(c))])))
+ (Const64 <typ.UInt64> [int64(log16(c))]))
+(Div32 <t> n (Const32 [c])) && isPowerOfTwo32(c) =>
+ (Rsh32x64
+ (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [int64(32-log32(c))])))
+ (Const64 <typ.UInt64> [int64(log32(c))]))
+(Div64 <t> n (Const64 [c])) && isPowerOfTwo64(c) =>
+ (Rsh64x64
+ (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [int64(64-log64(c))])))
+ (Const64 <typ.UInt64> [int64(log64(c))]))
+
+// Signed divide, not a power of 2. Strength reduce to a multiply.
+(Div8 <t> x (Const8 [c])) && smagicOK8(c) =>
+ (Sub8 <t>
+ (Rsh32x64 <t>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(smagic8(c).m)])
+ (SignExt8to32 x))
+ (Const64 <typ.UInt64> [8+smagic8(c).s]))
+ (Rsh32x64 <t>
+ (SignExt8to32 x)
+ (Const64 <typ.UInt64> [31])))
+(Div16 <t> x (Const16 [c])) && smagicOK16(c) =>
+ (Sub16 <t>
+ (Rsh32x64 <t>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(smagic16(c).m)])
+ (SignExt16to32 x))
+ (Const64 <typ.UInt64> [16+smagic16(c).s]))
+ (Rsh32x64 <t>
+ (SignExt16to32 x)
+ (Const64 <typ.UInt64> [31])))
+(Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 8 =>
+ (Sub32 <t>
+ (Rsh64x64 <t>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(smagic32(c).m)])
+ (SignExt32to64 x))
+ (Const64 <typ.UInt64> [32+smagic32(c).s]))
+ (Rsh64x64 <t>
+ (SignExt32to64 x)
+ (Const64 <typ.UInt64> [63])))
+(Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul =>
+ (Sub32 <t>
+ (Rsh32x64 <t>
+ (Hmul32 <t>
+ (Const32 <typ.UInt32> [int32(smagic32(c).m/2)])
+ x)
+ (Const64 <typ.UInt64> [smagic32(c).s-1]))
+ (Rsh32x64 <t>
+ x
+ (Const64 <typ.UInt64> [31])))
+(Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul =>
+ (Sub32 <t>
+ (Rsh32x64 <t>
+ (Add32 <t>
+ (Hmul32 <t>
+ (Const32 <typ.UInt32> [int32(smagic32(c).m)])
+ x)
+ x)
+ (Const64 <typ.UInt64> [smagic32(c).s]))
+ (Rsh32x64 <t>
+ x
+ (Const64 <typ.UInt64> [31])))
+(Div64 <t> x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul =>
+ (Sub64 <t>
+ (Rsh64x64 <t>
+ (Hmul64 <t>
+ (Const64 <typ.UInt64> [int64(smagic64(c).m/2)])
+ x)
+ (Const64 <typ.UInt64> [smagic64(c).s-1]))
+ (Rsh64x64 <t>
+ x
+ (Const64 <typ.UInt64> [63])))
+(Div64 <t> x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul =>
+ (Sub64 <t>
+ (Rsh64x64 <t>
+ (Add64 <t>
+ (Hmul64 <t>
+ (Const64 <typ.UInt64> [int64(smagic64(c).m)])
+ x)
+ x)
+ (Const64 <typ.UInt64> [smagic64(c).s]))
+ (Rsh64x64 <t>
+ x
+ (Const64 <typ.UInt64> [63])))
+
+// Unsigned mod by power of 2 constant.
+(Mod8u <t> n (Const8 [c])) && isPowerOfTwo8(c) => (And8 n (Const8 <t> [c-1]))
+(Mod16u <t> n (Const16 [c])) && isPowerOfTwo16(c) => (And16 n (Const16 <t> [c-1]))
+(Mod32u <t> n (Const32 [c])) && isPowerOfTwo32(c) => (And32 n (Const32 <t> [c-1]))
+(Mod64u <t> n (Const64 [c])) && isPowerOfTwo64(c) => (And64 n (Const64 <t> [c-1]))
+(Mod64u <t> n (Const64 [-1<<63])) => (And64 n (Const64 <t> [1<<63-1]))
+
+// Signed non-negative mod by power of 2 constant.
+(Mod8 <t> n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo8(c) => (And8 n (Const8 <t> [c-1]))
+(Mod16 <t> n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo16(c) => (And16 n (Const16 <t> [c-1]))
+(Mod32 <t> n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo32(c) => (And32 n (Const32 <t> [c-1]))
+(Mod64 <t> n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo64(c) => (And64 n (Const64 <t> [c-1]))
+(Mod64 n (Const64 [-1<<63])) && isNonNegative(n) => n
+
+// Signed mod by negative constant.
+(Mod8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 => (Mod8 <t> n (Const8 <t> [-c]))
+(Mod16 <t> n (Const16 [c])) && c < 0 && c != -1<<15 => (Mod16 <t> n (Const16 <t> [-c]))
+(Mod32 <t> n (Const32 [c])) && c < 0 && c != -1<<31 => (Mod32 <t> n (Const32 <t> [-c]))
+(Mod64 <t> n (Const64 [c])) && c < 0 && c != -1<<63 => (Mod64 <t> n (Const64 <t> [-c]))
+
+// All other mods by constants, do A%B = A-(A/B*B).
+// This implements % with two * and a bunch of ancillary ops.
+// One of the * is free if the user's code also computes A/B.
+(Mod8 <t> x (Const8 [c])) && x.Op != OpConst8 && (c > 0 || c == -1<<7)
+ => (Sub8 x (Mul8 <t> (Div8 <t> x (Const8 <t> [c])) (Const8 <t> [c])))
+(Mod16 <t> x (Const16 [c])) && x.Op != OpConst16 && (c > 0 || c == -1<<15)
+ => (Sub16 x (Mul16 <t> (Div16 <t> x (Const16 <t> [c])) (Const16 <t> [c])))
+(Mod32 <t> x (Const32 [c])) && x.Op != OpConst32 && (c > 0 || c == -1<<31)
+ => (Sub32 x (Mul32 <t> (Div32 <t> x (Const32 <t> [c])) (Const32 <t> [c])))
+(Mod64 <t> x (Const64 [c])) && x.Op != OpConst64 && (c > 0 || c == -1<<63)
+ => (Sub64 x (Mul64 <t> (Div64 <t> x (Const64 <t> [c])) (Const64 <t> [c])))
+(Mod8u <t> x (Const8 [c])) && x.Op != OpConst8 && c > 0 && umagicOK8( c)
+ => (Sub8 x (Mul8 <t> (Div8u <t> x (Const8 <t> [c])) (Const8 <t> [c])))
+(Mod16u <t> x (Const16 [c])) && x.Op != OpConst16 && c > 0 && umagicOK16(c)
+ => (Sub16 x (Mul16 <t> (Div16u <t> x (Const16 <t> [c])) (Const16 <t> [c])))
+(Mod32u <t> x (Const32 [c])) && x.Op != OpConst32 && c > 0 && umagicOK32(c)
+ => (Sub32 x (Mul32 <t> (Div32u <t> x (Const32 <t> [c])) (Const32 <t> [c])))
+(Mod64u <t> x (Const64 [c])) && x.Op != OpConst64 && c > 0 && umagicOK64(c)
+ => (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
+
+// For architectures without rotates on less than 32-bits, promote these checks to 32-bit.
+(Eq8 (Mod8u x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32u <typ.UInt32> (ZeroExt8to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint8(c))])) (Const32 <typ.UInt32> [0]))
+(Eq16 (Mod16u x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32u <typ.UInt32> (ZeroExt16to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint16(c))])) (Const32 <typ.UInt32> [0]))
+(Eq8 (Mod8 x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32 <typ.Int32> (SignExt8to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
+(Eq16 (Mod16 x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32 <typ.Int32> (SignExt16to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
+
+// Divisibility checks x%c == 0 convert to multiply and rotate.
+// Note, x%c == 0 is rewritten as x == c*(x/c) during the opt pass
+// where (x/c) is performed using multiplication with magic constants.
+// To rewrite x%c == 0 requires pattern matching the rewritten expression
+// and checking that the division by the same constant wasn't already calculated.
+// This check is made by counting uses of the magic constant multiplication.
+// Note that if there were an intermediate opt pass, this rule could be applied
+// directly on the Div op and magic division rewrites could be delayed to late opt.
+
+// Unsigned divisibility checks convert to multiply and rotate.
+(Eq8 x (Mul8 (Const8 [c])
+ (Trunc32to8
+ (Rsh32Ux64
+ mul:(Mul32
+ (Const32 [m])
+ (ZeroExt8to32 x))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s
+ && x.Op != OpConst8 && udivisibleOK8(c)
+ => (Leq8U
+ (RotateLeft8 <typ.UInt8>
+ (Mul8 <typ.UInt8>
+ (Const8 <typ.UInt8> [int8(udivisible8(c).m)])
+ x)
+ (Const8 <typ.UInt8> [int8(8-udivisible8(c).k)])
+ )
+ (Const8 <typ.UInt8> [int8(udivisible8(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Trunc64to16
+ (Rsh64Ux64
+ mul:(Mul64
+ (Const64 [m])
+ (ZeroExt16to64 x))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Trunc32to16
+ (Rsh32Ux64
+ mul:(Mul32
+ (Const32 [m])
+ (ZeroExt16to32 x))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Trunc32to16
+ (Rsh32Ux64
+ mul:(Mul32
+ (Const32 [m])
+ (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1])))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Trunc32to16
+ (Rsh32Ux64
+ (Avg32u
+ (Lsh32x64 (ZeroExt16to32 x) (Const64 [16]))
+ mul:(Mul32
+ (Const32 [m])
+ (ZeroExt16to32 x)))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Rsh32Ux64
+ mul:(Hmul32u
+ (Const32 [m])
+ x)
+ (Const64 [s]))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Rsh32Ux64
+ mul:(Hmul32u
+ (Const32 <typ.UInt32> [m])
+ (Rsh32Ux64 x (Const64 [1])))
+ (Const64 [s]))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Rsh32Ux64
+ (Avg32u
+ x
+ mul:(Hmul32u
+ (Const32 [m])
+ x))
+ (Const64 [s]))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(umagic32(c).m) && s == umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Trunc64to32
+ (Rsh64Ux64
+ mul:(Mul64
+ (Const64 [m])
+ (ZeroExt32to64 x))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Trunc64to32
+ (Rsh64Ux64
+ mul:(Mul64
+ (Const64 [m])
+ (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1])))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Trunc64to32
+ (Rsh64Ux64
+ (Avg64u
+ (Lsh64x64 (ZeroExt32to64 x) (Const64 [32]))
+ mul:(Mul64
+ (Const64 [m])
+ (ZeroExt32to64 x)))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq64 x (Mul64 (Const64 [c])
+ (Rsh64Ux64
+ mul:(Hmul64u
+ (Const64 [m])
+ x)
+ (Const64 [s]))
+ )
+) && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1
+ && x.Op != OpConst64 && udivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(udivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [64-udivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(udivisible64(c).max)])
+ )
+(Eq64 x (Mul64 (Const64 [c])
+ (Rsh64Ux64
+ mul:(Hmul64u
+ (Const64 [m])
+ (Rsh64Ux64 x (Const64 [1])))
+ (Const64 [s]))
+ )
+) && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2
+ && x.Op != OpConst64 && udivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(udivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [64-udivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(udivisible64(c).max)])
+ )
+(Eq64 x (Mul64 (Const64 [c])
+ (Rsh64Ux64
+ (Avg64u
+ x
+ mul:(Hmul64u
+ (Const64 [m])
+ x))
+ (Const64 [s]))
+ )
+) && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(umagic64(c).m) && s == umagic64(c).s-1
+ && x.Op != OpConst64 && udivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(udivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [64-udivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(udivisible64(c).max)])
+ )
+
+// Signed divisibility checks convert to multiply, add and rotate.
+(Eq8 x (Mul8 (Const8 [c])
+ (Sub8
+ (Rsh32x64
+ mul:(Mul32
+ (Const32 [m])
+ (SignExt8to32 x))
+ (Const64 [s]))
+ (Rsh32x64
+ (SignExt8to32 x)
+ (Const64 [31])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(smagic8(c).m) && s == 8+smagic8(c).s
+ && x.Op != OpConst8 && sdivisibleOK8(c)
+ => (Leq8U
+ (RotateLeft8 <typ.UInt8>
+ (Add8 <typ.UInt8>
+ (Mul8 <typ.UInt8>
+ (Const8 <typ.UInt8> [int8(sdivisible8(c).m)])
+ x)
+ (Const8 <typ.UInt8> [int8(sdivisible8(c).a)])
+ )
+ (Const8 <typ.UInt8> [int8(8-sdivisible8(c).k)])
+ )
+ (Const8 <typ.UInt8> [int8(sdivisible8(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Sub16
+ (Rsh32x64
+ mul:(Mul32
+ (Const32 [m])
+ (SignExt16to32 x))
+ (Const64 [s]))
+ (Rsh32x64
+ (SignExt16to32 x)
+ (Const64 [31])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(smagic16(c).m) && s == 16+smagic16(c).s
+ && x.Op != OpConst16 && sdivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Add16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(sdivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(sdivisible16(c).a)])
+ )
+ (Const16 <typ.UInt16> [int16(16-sdivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(sdivisible16(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Sub32
+ (Rsh64x64
+ mul:(Mul64
+ (Const64 [m])
+ (SignExt32to64 x))
+ (Const64 [s]))
+ (Rsh64x64
+ (SignExt32to64 x)
+ (Const64 [63])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(smagic32(c).m) && s == 32+smagic32(c).s
+ && x.Op != OpConst32 && sdivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).a)])
+ )
+ (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Sub32
+ (Rsh32x64
+ mul:(Hmul32
+ (Const32 [m])
+ x)
+ (Const64 [s]))
+ (Rsh32x64
+ x
+ (Const64 [31])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1
+ && x.Op != OpConst32 && sdivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).a)])
+ )
+ (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Sub32
+ (Rsh32x64
+ (Add32
+ mul:(Hmul32
+ (Const32 [m])
+ x)
+ x)
+ (Const64 [s]))
+ (Rsh32x64
+ x
+ (Const64 [31])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(smagic32(c).m) && s == smagic32(c).s
+ && x.Op != OpConst32 && sdivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).a)])
+ )
+ (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).max)])
+ )
+
+(Eq64 x (Mul64 (Const64 [c])
+ (Sub64
+ (Rsh64x64
+ mul:(Hmul64
+ (Const64 [m])
+ x)
+ (Const64 [s]))
+ (Rsh64x64
+ x
+ (Const64 [63])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1
+ && x.Op != OpConst64 && sdivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Add64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).a)])
+ )
+ (Const64 <typ.UInt64> [64-sdivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).max)])
+ )
+
+(Eq64 x (Mul64 (Const64 [c])
+ (Sub64
+ (Rsh64x64
+ (Add64
+ mul:(Hmul64
+ (Const64 [m])
+ x)
+ x)
+ (Const64 [s]))
+ (Rsh64x64
+ x
+ (Const64 [63])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(smagic64(c).m) && s == smagic64(c).s
+ && x.Op != OpConst64 && sdivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Add64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).a)])
+ )
+ (Const64 <typ.UInt64> [64-sdivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).max)])
+ )
+
+// Divisibility check for signed integers for power of two constant are simple mask.
+// However, we must match against the rewritten n%c == 0 -> n - c*(n/c) == 0 -> n == c*(n/c)
+// where n/c contains fixup code to handle signed n.
+((Eq8|Neq8) n (Lsh8x64
+ (Rsh8x64
+ (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [kbar])))
+ (Const64 <typ.UInt64> [k]))
+ (Const64 <typ.UInt64> [k]))
+) && k > 0 && k < 7 && kbar == 8 - k
+ => ((Eq8|Neq8) (And8 <t> n (Const8 <t> [1<<uint(k)-1])) (Const8 <t> [0]))
+
+((Eq16|Neq16) n (Lsh16x64
+ (Rsh16x64
+ (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [kbar])))
+ (Const64 <typ.UInt64> [k]))
+ (Const64 <typ.UInt64> [k]))
+) && k > 0 && k < 15 && kbar == 16 - k
+ => ((Eq16|Neq16) (And16 <t> n (Const16 <t> [1<<uint(k)-1])) (Const16 <t> [0]))
+
+((Eq32|Neq32) n (Lsh32x64
+ (Rsh32x64
+ (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [kbar])))
+ (Const64 <typ.UInt64> [k]))
+ (Const64 <typ.UInt64> [k]))
+) && k > 0 && k < 31 && kbar == 32 - k
+ => ((Eq32|Neq32) (And32 <t> n (Const32 <t> [1<<uint(k)-1])) (Const32 <t> [0]))
+
+((Eq64|Neq64) n (Lsh64x64
+ (Rsh64x64
+ (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [kbar])))
+ (Const64 <typ.UInt64> [k]))
+ (Const64 <typ.UInt64> [k]))
+) && k > 0 && k < 63 && kbar == 64 - k
+ => ((Eq64|Neq64) (And64 <t> n (Const64 <t> [1<<uint(k)-1])) (Const64 <t> [0]))
+
+(Eq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Eq(8|16|32|64) x y)
+(Neq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Neq(8|16|32|64) x y)
+
+// Optimize bitsets
+(Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y])) && oneBit8(y)
+ => (Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0]))
+(Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y])) && oneBit16(y)
+ => (Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0]))
+(Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y])) && oneBit32(y)
+ => (Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0]))
+(Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y])) && oneBit64(y)
+ => (Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0]))
+(Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y])) && oneBit8(y)
+ => (Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0]))
+(Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y])) && oneBit16(y)
+ => (Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0]))
+(Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y])) && oneBit32(y)
+ => (Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0]))
+(Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y])) && oneBit64(y)
+ => (Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0]))
+
+// Reassociate expressions involving
+// constants such that constants come first,
+// exposing obvious constant-folding opportunities.
+// Reassociate (op (op y C) x) to (op C (op x y)) or similar, where C
+// is constant, which pushes constants to the outside
+// of the expression. At that point, any constant-folding
+// opportunities should be obvious.
+// Note: don't include AddPtr here! In order to maintain the
+// invariant that pointers must stay within the pointed-to object,
+// we can't pull part of a pointer computation above the AddPtr.
+// See issue 37881.
+// Note: we don't need to handle any (x-C) cases because we already rewrite
+// (x-C) to (x+(-C)).
+
+// x + (C + z) -> C + (x + z)
+(Add64 (Add64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Add64 <t> z x))
+(Add32 (Add32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Add32 <t> z x))
+(Add16 (Add16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Add16 <t> z x))
+(Add8 (Add8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Add8 <t> z x))
+
+// x + (C - z) -> C + (x - z)
+(Add64 (Sub64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 <t> x z))
+(Add32 (Sub32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 <t> x z))
+(Add16 (Sub16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> x z))
+(Add8 (Sub8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> x z))
+
+// x - (C - z) -> x + (z - C) -> (x + z) - C
+(Sub64 x (Sub64 i:(Const64 <t>) z)) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Add64 <t> x z) i)
+(Sub32 x (Sub32 i:(Const32 <t>) z)) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Add32 <t> x z) i)
+(Sub16 x (Sub16 i:(Const16 <t>) z)) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Add16 <t> x z) i)
+(Sub8 x (Sub8 i:(Const8 <t>) z)) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Add8 <t> x z) i)
+
+// x - (z + C) -> x + (-z - C) -> (x - z) - C
+(Sub64 x (Add64 z i:(Const64 <t>))) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Sub64 <t> x z) i)
+(Sub32 x (Add32 z i:(Const32 <t>))) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Sub32 <t> x z) i)
+(Sub16 x (Add16 z i:(Const16 <t>))) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Sub16 <t> x z) i)
+(Sub8 x (Add8 z i:(Const8 <t>))) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Sub8 <t> x z) i)
+
+// (C - z) - x -> C - (z + x)
+(Sub64 (Sub64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 i (Add64 <t> z x))
+(Sub32 (Sub32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 i (Add32 <t> z x))
+(Sub16 (Sub16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 i (Add16 <t> z x))
+(Sub8 (Sub8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 i (Add8 <t> z x))
+
+// (z + C) -x -> C + (z - x)
+(Sub64 (Add64 z i:(Const64 <t>)) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 <t> z x))
+(Sub32 (Add32 z i:(Const32 <t>)) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 <t> z x))
+(Sub16 (Add16 z i:(Const16 <t>)) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> z x))
+(Sub8 (Add8 z i:(Const8 <t>)) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> z x))
+
+// x & (C & z) -> C & (x & z)
+(And64 (And64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (And64 i (And64 <t> z x))
+(And32 (And32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (And32 i (And32 <t> z x))
+(And16 (And16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (And16 i (And16 <t> z x))
+(And8 (And8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (And8 i (And8 <t> z x))
+
+// x | (C | z) -> C | (x | z)
+(Or64 (Or64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Or64 i (Or64 <t> z x))
+(Or32 (Or32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Or32 i (Or32 <t> z x))
+(Or16 (Or16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Or16 i (Or16 <t> z x))
+(Or8 (Or8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Or8 i (Or8 <t> z x))
+
+// x ^ (C ^ z) -> C ^ (x ^ z)
+(Xor64 (Xor64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Xor64 i (Xor64 <t> z x))
+(Xor32 (Xor32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Xor32 i (Xor32 <t> z x))
+(Xor16 (Xor16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Xor16 i (Xor16 <t> z x))
+(Xor8 (Xor8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Xor8 i (Xor8 <t> z x))
+
+// x * (D * z) = D * (x * z)
+(Mul64 (Mul64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Mul64 i (Mul64 <t> x z))
+(Mul32 (Mul32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Mul32 i (Mul32 <t> x z))
+(Mul16 (Mul16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Mul16 i (Mul16 <t> x z))
+(Mul8 (Mul8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Mul8 i (Mul8 <t> x z))
+
+// C + (D + x) -> (C + D) + x
+(Add64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c+d]) x)
+(Add32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c+d]) x)
+(Add16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Add16 (Const16 <t> [c+d]) x)
+(Add8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Add8 (Const8 <t> [c+d]) x)
+
+// C + (D - x) -> (C + D) - x
+(Add64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x)) => (Sub64 (Const64 <t> [c+d]) x)
+(Add32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x)) => (Sub32 (Const32 <t> [c+d]) x)
+(Add16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Sub16 (Const16 <t> [c+d]) x)
+(Add8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Sub8 (Const8 <t> [c+d]) x)
+
+// C - (D - x) -> (C - D) + x
+(Sub64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c-d]) x)
+(Sub32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c-d]) x)
+(Sub16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Add16 (Const16 <t> [c-d]) x)
+(Sub8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Add8 (Const8 <t> [c-d]) x)
+
+// C - (D + x) -> (C - D) - x
+(Sub64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Sub64 (Const64 <t> [c-d]) x)
+(Sub32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Sub32 (Const32 <t> [c-d]) x)
+(Sub16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Sub16 (Const16 <t> [c-d]) x)
+(Sub8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Sub8 (Const8 <t> [c-d]) x)
+
+// C & (D & x) -> (C & D) & x
+(And64 (Const64 <t> [c]) (And64 (Const64 <t> [d]) x)) => (And64 (Const64 <t> [c&d]) x)
+(And32 (Const32 <t> [c]) (And32 (Const32 <t> [d]) x)) => (And32 (Const32 <t> [c&d]) x)
+(And16 (Const16 <t> [c]) (And16 (Const16 <t> [d]) x)) => (And16 (Const16 <t> [c&d]) x)
+(And8 (Const8 <t> [c]) (And8 (Const8 <t> [d]) x)) => (And8 (Const8 <t> [c&d]) x)
+
+// C | (D | x) -> (C | D) | x
+(Or64 (Const64 <t> [c]) (Or64 (Const64 <t> [d]) x)) => (Or64 (Const64 <t> [c|d]) x)
+(Or32 (Const32 <t> [c]) (Or32 (Const32 <t> [d]) x)) => (Or32 (Const32 <t> [c|d]) x)
+(Or16 (Const16 <t> [c]) (Or16 (Const16 <t> [d]) x)) => (Or16 (Const16 <t> [c|d]) x)
+(Or8 (Const8 <t> [c]) (Or8 (Const8 <t> [d]) x)) => (Or8 (Const8 <t> [c|d]) x)
+
+// C ^ (D ^ x) -> (C ^ D) ^ x
+(Xor64 (Const64 <t> [c]) (Xor64 (Const64 <t> [d]) x)) => (Xor64 (Const64 <t> [c^d]) x)
+(Xor32 (Const32 <t> [c]) (Xor32 (Const32 <t> [d]) x)) => (Xor32 (Const32 <t> [c^d]) x)
+(Xor16 (Const16 <t> [c]) (Xor16 (Const16 <t> [d]) x)) => (Xor16 (Const16 <t> [c^d]) x)
+(Xor8 (Const8 <t> [c]) (Xor8 (Const8 <t> [d]) x)) => (Xor8 (Const8 <t> [c^d]) x)
+
+// C * (D * x) = (C * D) * x
+(Mul64 (Const64 <t> [c]) (Mul64 (Const64 <t> [d]) x)) => (Mul64 (Const64 <t> [c*d]) x)
+(Mul32 (Const32 <t> [c]) (Mul32 (Const32 <t> [d]) x)) => (Mul32 (Const32 <t> [c*d]) x)
+(Mul16 (Const16 <t> [c]) (Mul16 (Const16 <t> [d]) x)) => (Mul16 (Const16 <t> [c*d]) x)
+(Mul8 (Const8 <t> [c]) (Mul8 (Const8 <t> [d]) x)) => (Mul8 (Const8 <t> [c*d]) x)
+
+// floating point optimizations
+(Mul(32|64)F x (Const(32|64)F [1])) => x
+(Mul32F x (Const32F [-1])) => (Neg32F x)
+(Mul64F x (Const64F [-1])) => (Neg64F x)
+(Mul32F x (Const32F [2])) => (Add32F x x)
+(Mul64F x (Const64F [2])) => (Add64F x x)
+
+(Div32F x (Const32F <t> [c])) && reciprocalExact32(c) => (Mul32F x (Const32F <t> [1/c]))
+(Div64F x (Const64F <t> [c])) && reciprocalExact64(c) => (Mul64F x (Const64F <t> [1/c]))
+
+(Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(c)) => (Const64F [math.Sqrt(c)])
+
+// recognize runtime.newobject and don't Zero/Nilcheck it
+(Zero (Load (OffPtr [c] (SP)) mem) mem)
+ && mem.Op == OpStaticCall
+ && isSameCall(mem.Aux, "runtime.newobject")
+ && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
+ => mem
+(Store (Load (OffPtr [c] (SP)) mem) x mem)
+ && isConstZero(x)
+ && mem.Op == OpStaticCall
+ && isSameCall(mem.Aux, "runtime.newobject")
+ && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
+ => mem
+(Store (OffPtr (Load (OffPtr [c] (SP)) mem)) x mem)
+ && isConstZero(x)
+ && mem.Op == OpStaticCall
+ && isSameCall(mem.Aux, "runtime.newobject")
+ && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
+ => mem
+// nil checks just need to rewrite to something useless.
+// they will be deadcode eliminated soon afterwards.
+(NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _)
+ && isSameCall(sym, "runtime.newobject")
+ && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
+ && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ => (Invalid)
+(NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _)
+ && isSameCall(sym, "runtime.newobject")
+ && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
+ && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ => (Invalid)
+
+// for rewriting results of some late-expanded rewrites (below)
+(SelectN [0] (MakeResult a ___)) => a
+(SelectN [1] (MakeResult a b ___)) => b
+(SelectN [2] (MakeResult a b c ___)) => c
+
+// for late-expanded calls, recognize newobject and remove zeroing and nilchecks
+(Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
+ && isSameCall(call.Aux, "runtime.newobject")
+ => mem
+
+(Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call))
+ && isConstZero(x)
+ && isSameCall(call.Aux, "runtime.newobject")
+ => mem
+
+(Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call))
+ && isConstZero(x)
+ && isSameCall(call.Aux, "runtime.newobject")
+ => mem
+
+(NilCheck (SelectN [0] call:(StaticLECall _ _)) (SelectN [1] call))
+ && isSameCall(call.Aux, "runtime.newobject")
+ && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ => (Invalid)
+
+(NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) (SelectN [1] call))
+ && isSameCall(call.Aux, "runtime.newobject")
+ && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ => (Invalid)
+
+// for late-expanded calls, recognize memequal applied to a single constant byte
+// TODO figure out breakeven number of bytes for this optimization.
+(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && symIsRO(scon)
+ => (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem)
+
+// Evaluate constant address comparisons.
+(EqPtr x x) => (ConstBool [true])
+(NeqPtr x x) => (ConstBool [false])
+(EqPtr (Addr {a} _) (Addr {b} _)) => (ConstBool [a == b])
+(EqPtr (Addr {a} _) (OffPtr [o] (Addr {b} _))) => (ConstBool [a == b && o == 0])
+(EqPtr (OffPtr [o1] (Addr {a} _)) (OffPtr [o2] (Addr {b} _))) => (ConstBool [a == b && o1 == o2])
+(NeqPtr (Addr {a} _) (Addr {b} _)) => (ConstBool [a != b])
+(NeqPtr (Addr {a} _) (OffPtr [o] (Addr {b} _))) => (ConstBool [a != b || o != 0])
+(NeqPtr (OffPtr [o1] (Addr {a} _)) (OffPtr [o2] (Addr {b} _))) => (ConstBool [a != b || o1 != o2])
+(EqPtr (LocalAddr {a} _ _) (LocalAddr {b} _ _)) => (ConstBool [a == b])
+(EqPtr (LocalAddr {a} _ _) (OffPtr [o] (LocalAddr {b} _ _))) => (ConstBool [a == b && o == 0])
+(EqPtr (OffPtr [o1] (LocalAddr {a} _ _)) (OffPtr [o2] (LocalAddr {b} _ _))) => (ConstBool [a == b && o1 == o2])
+(NeqPtr (LocalAddr {a} _ _) (LocalAddr {b} _ _)) => (ConstBool [a != b])
+(NeqPtr (LocalAddr {a} _ _) (OffPtr [o] (LocalAddr {b} _ _))) => (ConstBool [a != b || o != 0])
+(NeqPtr (OffPtr [o1] (LocalAddr {a} _ _)) (OffPtr [o2] (LocalAddr {b} _ _))) => (ConstBool [a != b || o1 != o2])
+(EqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 == 0])
+(NeqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 != 0])
+(EqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) && isSamePtr(p1, p2) => (ConstBool [o1 == o2])
+(NeqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) && isSamePtr(p1, p2) => (ConstBool [o1 != o2])
+(EqPtr (Const(32|64) [c]) (Const(32|64) [d])) => (ConstBool [c == d])
+(NeqPtr (Const(32|64) [c]) (Const(32|64) [d])) => (ConstBool [c != d])
+
+(EqPtr (LocalAddr _ _) (Addr _)) => (ConstBool [false])
+(EqPtr (OffPtr (LocalAddr _ _)) (Addr _)) => (ConstBool [false])
+(EqPtr (LocalAddr _ _) (OffPtr (Addr _))) => (ConstBool [false])
+(EqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) => (ConstBool [false])
+(NeqPtr (LocalAddr _ _) (Addr _)) => (ConstBool [true])
+(NeqPtr (OffPtr (LocalAddr _ _)) (Addr _)) => (ConstBool [true])
+(NeqPtr (LocalAddr _ _) (OffPtr (Addr _))) => (ConstBool [true])
+(NeqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) => (ConstBool [true])
+
+// Simplify address comparisons.
+(EqPtr (AddPtr p1 o1) p2) && isSamePtr(p1, p2) => (Not (IsNonNil o1))
+(NeqPtr (AddPtr p1 o1) p2) && isSamePtr(p1, p2) => (IsNonNil o1)
+(EqPtr (Const(32|64) [0]) p) => (Not (IsNonNil p))
+(NeqPtr (Const(32|64) [0]) p) => (IsNonNil p)
+(EqPtr (ConstNil) p) => (Not (IsNonNil p))
+(NeqPtr (ConstNil) p) => (IsNonNil p)
+
+// Evaluate constant user nil checks.
+(IsNonNil (ConstNil)) => (ConstBool [false])
+(IsNonNil (Const(32|64) [c])) => (ConstBool [c != 0])
+(IsNonNil (Addr _)) => (ConstBool [true])
+(IsNonNil (LocalAddr _ _)) => (ConstBool [true])
+
+// Inline small or disjoint runtime.memmove calls with constant length.
+// See the comment in op Move in genericOps.go for discussion of the type.
+(StaticCall {sym} s1:(Store _ (Const(64|32) [sz]) s2:(Store _ src s3:(Store {t} _ dst mem))))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && t.IsPtr() // avoids TUINTPTR, see issue 30061
+ && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
+ && isInlinableMemmove(dst, src, int64(sz), config)
+ && clobber(s1, s2, s3)
+ => (Move {t.Elem()} [int64(sz)] dst src mem)
+
+// Inline small or disjoint runtime.memmove calls with constant length.
+// See the comment in op Move in genericOps.go for discussion of the type.
+(SelectN [0] call:(StaticLECall {sym} dst src (Const(64|32) [sz]) mem))
+ && sz >= 0
+ && call.Uses == 1 // this will exclude all calls with results
+ && isSameCall(sym, "runtime.memmove")
+ && dst.Type.IsPtr() // avoids TUINTPTR, see issue 30061
+ && isInlinableMemmove(dst, src, int64(sz), config)
+ && clobber(call)
+ => (Move {dst.Type.Elem()} [int64(sz)] dst src mem)
+
+// De-virtualize interface calls into static calls.
+// Note that (ITab (IMake)) doesn't get
+// rewritten until after the first opt pass,
+// so this rule should trigger reliably.
+(InterCall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem) && devirt(v, auxCall, itab, off) != nil =>
+ (StaticCall [int32(argsize)] {devirt(v, auxCall, itab, off)} mem)
+
+// De-virtualize late-expanded interface calls into late-expanded static calls.
+// Note that (ITab (IMake)) doesn't get rewritten until after the first opt pass,
+// so this rule should trigger reliably.
+// devirtLECall removes the first argument, adds the devirtualized symbol to the AuxCall, and changes the opcode
+(InterLECall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) ___) && devirtLESym(v, auxCall, itab, off) !=
+ nil => devirtLECall(v, devirtLESym(v, auxCall, itab, off))
+
+// Move and Zero optimizations.
+// Move source and destination may overlap.
+
+// Convert Moves into Zeros when the source is known to be zeros.
+(Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _)) && isSamePtr(src, dst2)
+ => (Zero {t} [n] dst1 mem)
+(Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _))) && isSamePtr(src, dst0)
+ => (Zero {t} [n] dst1 mem)
+(Move {t} [n] dst (Addr {sym} (SB)) mem) && symIsROZero(sym) => (Zero {t} [n] dst mem)
+
+// Don't Store to variables that are about to be overwritten by Move/Zero.
+(Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem))
+ && isSamePtr(p1, p2) && store.Uses == 1
+ && n >= o2 + t2.Size()
+ && clobber(store)
+ => (Zero {t1} [n] p1 mem)
+(Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem))
+ && isSamePtr(dst1, dst2) && store.Uses == 1
+ && n >= o2 + t2.Size()
+ && disjoint(src1, n, op, t2.Size())
+ && clobber(store)
+ => (Move {t1} [n] dst1 src1 mem)
+
+// Don't Move to variables that are immediately completely overwritten.
+(Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem))
+ && move.Uses == 1
+ && isSamePtr(dst1, dst2)
+ && clobber(move)
+ => (Zero {t} [n] dst1 mem)
+(Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem))
+ && move.Uses == 1
+ && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
+ && clobber(move)
+ => (Move {t} [n] dst1 src1 mem)
+(Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
+ && move.Uses == 1 && vardef.Uses == 1
+ && isSamePtr(dst1, dst2)
+ && clobber(move, vardef)
+ => (Zero {t} [n] dst1 (VarDef {x} mem))
+(Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
+ && move.Uses == 1 && vardef.Uses == 1
+ && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
+ && clobber(move, vardef)
+ => (Move {t} [n] dst1 src1 (VarDef {x} mem))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [0] p2) d2
+ m3:(Move [n] p3 _ mem)))
+ && m2.Uses == 1 && m3.Uses == 1
+ && o1 == t2.Size()
+ && n == t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && clobber(m2, m3)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [o2] p2) d2
+ m3:(Store {t3} op3:(OffPtr [0] p3) d3
+ m4:(Move [n] p4 _ mem))))
+ && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1
+ && o2 == t3.Size()
+ && o1-o2 == t2.Size()
+ && n == t3.Size() + t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && clobber(m2, m3, m4)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [o2] p2) d2
+ m3:(Store {t3} op3:(OffPtr [o3] p3) d3
+ m4:(Store {t4} op4:(OffPtr [0] p4) d4
+ m5:(Move [n] p5 _ mem)))))
+ && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1
+ && o3 == t4.Size()
+ && o2-o3 == t3.Size()
+ && o1-o2 == t2.Size()
+ && n == t4.Size() + t3.Size() + t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && clobber(m2, m3, m4, m5)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
+
+// Don't Zero variables that are immediately completely overwritten
+// before being accessed.
+(Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem))
+ && zero.Uses == 1
+ && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
+ && clobber(zero)
+ => (Move {t} [n] dst1 src1 mem)
+(Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem)))
+ && zero.Uses == 1 && vardef.Uses == 1
+ && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
+ && clobber(zero, vardef)
+ => (Move {t} [n] dst1 src1 (VarDef {x} mem))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [0] p2) d2
+ m3:(Zero [n] p3 mem)))
+ && m2.Uses == 1 && m3.Uses == 1
+ && o1 == t2.Size()
+ && n == t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && clobber(m2, m3)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [o2] p2) d2
+ m3:(Store {t3} op3:(OffPtr [0] p3) d3
+ m4:(Zero [n] p4 mem))))
+ && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1
+ && o2 == t3.Size()
+ && o1-o2 == t2.Size()
+ && n == t3.Size() + t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && clobber(m2, m3, m4)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [o2] p2) d2
+ m3:(Store {t3} op3:(OffPtr [o3] p3) d3
+ m4:(Store {t4} op4:(OffPtr [0] p4) d4
+ m5:(Zero [n] p5 mem)))))
+ && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1
+ && o3 == t4.Size()
+ && o2-o3 == t3.Size()
+ && o1-o2 == t2.Size()
+ && n == t4.Size() + t3.Size() + t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && clobber(m2, m3, m4, m5)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
+
+// Don't Move from memory if the values are likely to already be
+// in registers.
+(Move {t1} [n] dst p1
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _)))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && o2 == t3.Size()
+ && n == t2.Size() + t3.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && o3 == t4.Size()
+ && o2-o3 == t3.Size()
+ && n == t2.Size() + t3.Size() + t4.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3
+ (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _)))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && registerizable(b, t5)
+ && o4 == t5.Size()
+ && o3-o4 == t4.Size()
+ && o2-o3 == t3.Size()
+ && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
+
+// Same thing but with VarDef in the middle.
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && o2 == t3.Size()
+ && n == t2.Size() + t3.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _)))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && o3 == t4.Size()
+ && o2-o3 == t3.Size()
+ && n == t2.Size() + t3.Size() + t4.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3
+ (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _))))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && registerizable(b, t5)
+ && o4 == t5.Size()
+ && o3-o4 == t4.Size()
+ && o2-o3 == t3.Size()
+ && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
+
+// Prefer to Zero and Store than to Move.
+(Move {t1} [n] dst p1
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Zero {t3} [n] p3 _)))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && n >= o2 + t2.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Zero {t1} [n] dst mem))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Zero {t4} [n] p4 _))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Zero {t1} [n] dst mem)))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Store {t4} (OffPtr <tt4> [o4] p4) d3
+ (Zero {t5} [n] p5 _)))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ && n >= o4 + t4.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Zero {t1} [n] dst mem))))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Store {t4} (OffPtr <tt4> [o4] p4) d3
+ (Store {t5} (OffPtr <tt5> [o5] p5) d4
+ (Zero {t6} [n] p6 _))))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && t6.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && registerizable(b, t5)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ && n >= o4 + t4.Size()
+ && n >= o5 + t5.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [o5] dst) d4
+ (Zero {t1} [n] dst mem)))))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Zero {t3} [n] p3 _))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && n >= o2 + t2.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Zero {t1} [n] dst mem))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Zero {t4} [n] p4 _)))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Zero {t1} [n] dst mem)))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Store {t4} (OffPtr <tt4> [o4] p4) d3
+ (Zero {t5} [n] p5 _))))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ && n >= o4 + t4.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Zero {t1} [n] dst mem))))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Store {t4} (OffPtr <tt4> [o4] p4) d3
+ (Store {t5} (OffPtr <tt5> [o5] p5) d4
+ (Zero {t6} [n] p6 _)))))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && t6.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && registerizable(b, t5)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ && n >= o4 + t4.Size()
+ && n >= o5 + t5.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [o5] dst) d4
+ (Zero {t1} [n] dst mem)))))
+
+// TODO this does not fire before call expansion; is that acceptable?
+(StaticCall {sym} x) && needRaceCleanup(sym, v) => x
+
+// Collapse moving A -> B -> C into just A -> C.
+// Later passes (deadstore, elim unread auto) will remove the A -> B move, if possible.
+// This happens most commonly when B is an autotmp inserted earlier
+// during compilation to ensure correctness.
+// Take care that overlapping moves are preserved.
+// Restrict this optimization to the stack, to avoid duplicating loads from the heap;
+// see CL 145208 for discussion.
+(Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _))
+ && t1.Compare(t2) == types.CMPeq
+ && isSamePtr(tmp1, tmp2)
+ && isStackPtr(src) && !isVolatile(src)
+ && disjoint(src, s, tmp2, s)
+ && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ => (Move {t1} [s] dst src midmem)
+
+// Same, but for large types that require VarDefs.
+(Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _)))
+ && t1.Compare(t2) == types.CMPeq
+ && isSamePtr(tmp1, tmp2)
+ && isStackPtr(src) && !isVolatile(src)
+ && disjoint(src, s, tmp2, s)
+ && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ => (Move {t1} [s] dst src midmem)
+
+// Don't zero the same bits twice.
+(Zero {t} [s] dst1 zero:(Zero {t} [s] dst2 _)) && isSamePtr(dst1, dst2) => zero
+(Zero {t} [s] dst1 vardef:(VarDef (Zero {t} [s] dst2 _))) && isSamePtr(dst1, dst2) => vardef
+
+// Elide self-moves. This only happens rarely (e.g test/fixedbugs/bug277.go).
+// However, this rule is needed to prevent the previous rule from looping forever in such cases.
+(Move dst src mem) && isSamePtr(dst, src) => mem
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
new file mode 100644
index 0000000..0a7d5dd
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -0,0 +1,620 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+// Generic opcodes typically specify a width. The inputs and outputs
+// of that op are the given number of bits wide. There is no notion of
+// "sign", so Add32 can be used both for signed and unsigned 32-bit
+// addition.
+
+// Signed/unsigned is explicit with the extension ops
+// (SignExt*/ZeroExt*) and implicit as the arg to some opcodes
+// (e.g. the second argument to shifts is unsigned). If not mentioned,
+// all args take signed inputs, or don't care whether their inputs
+// are signed or unsigned.
+
+var genericOps = []opData{
+ // 2-input arithmetic
+ // Types must be consistent with Go typing. Add, for example, must take two values
+ // of the same type and produces that same type.
+ {name: "Add8", argLength: 2, commutative: true}, // arg0 + arg1
+ {name: "Add16", argLength: 2, commutative: true},
+ {name: "Add32", argLength: 2, commutative: true},
+ {name: "Add64", argLength: 2, commutative: true},
+ {name: "AddPtr", argLength: 2}, // For address calculations. arg0 is a pointer and arg1 is an int.
+ {name: "Add32F", argLength: 2, commutative: true},
+ {name: "Add64F", argLength: 2, commutative: true},
+
+ {name: "Sub8", argLength: 2}, // arg0 - arg1
+ {name: "Sub16", argLength: 2},
+ {name: "Sub32", argLength: 2},
+ {name: "Sub64", argLength: 2},
+ {name: "SubPtr", argLength: 2},
+ {name: "Sub32F", argLength: 2},
+ {name: "Sub64F", argLength: 2},
+
+ {name: "Mul8", argLength: 2, commutative: true}, // arg0 * arg1
+ {name: "Mul16", argLength: 2, commutative: true},
+ {name: "Mul32", argLength: 2, commutative: true},
+ {name: "Mul64", argLength: 2, commutative: true},
+ {name: "Mul32F", argLength: 2, commutative: true},
+ {name: "Mul64F", argLength: 2, commutative: true},
+
+ {name: "Div32F", argLength: 2}, // arg0 / arg1
+ {name: "Div64F", argLength: 2},
+
+ {name: "Hmul32", argLength: 2, commutative: true},
+ {name: "Hmul32u", argLength: 2, commutative: true},
+ {name: "Hmul64", argLength: 2, commutative: true},
+ {name: "Hmul64u", argLength: 2, commutative: true},
+
+ {name: "Mul32uhilo", argLength: 2, typ: "(UInt32,UInt32)", commutative: true}, // arg0 * arg1, returns (hi, lo)
+ {name: "Mul64uhilo", argLength: 2, typ: "(UInt64,UInt64)", commutative: true}, // arg0 * arg1, returns (hi, lo)
+
+ {name: "Mul32uover", argLength: 2, typ: "(UInt32,Bool)", commutative: true}, // Let x = arg0*arg1 (full 32x32-> 64 unsigned multiply), returns (uint32(x), (uint32(x) != x))
+ {name: "Mul64uover", argLength: 2, typ: "(UInt64,Bool)", commutative: true}, // Let x = arg0*arg1 (full 64x64->128 unsigned multiply), returns (uint64(x), (uint64(x) != x))
+
+ // Weird special instructions for use in the strength reduction of divides.
+ // These ops compute unsigned (arg0 + arg1) / 2, correct to all
+ // 32/64 bits, even when the intermediate result of the add has 33/65 bits.
+ // These ops can assume arg0 >= arg1.
+ // Note: these ops aren't commutative!
+ {name: "Avg32u", argLength: 2, typ: "UInt32"}, // 32-bit platforms only
+ {name: "Avg64u", argLength: 2, typ: "UInt64"}, // 64-bit platforms only
+
+ // For Div16, Div32 and Div64, AuxInt non-zero means that the divisor has been proved to be not -1
+ // or that the dividend is not the most negative value.
+ {name: "Div8", argLength: 2}, // arg0 / arg1, signed
+ {name: "Div8u", argLength: 2}, // arg0 / arg1, unsigned
+ {name: "Div16", argLength: 2, aux: "Bool"},
+ {name: "Div16u", argLength: 2},
+ {name: "Div32", argLength: 2, aux: "Bool"},
+ {name: "Div32u", argLength: 2},
+ {name: "Div64", argLength: 2, aux: "Bool"},
+ {name: "Div64u", argLength: 2},
+ {name: "Div128u", argLength: 3}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r)
+
+ // For Mod16, Mod32 and Mod64, AuxInt non-zero means that the divisor has been proved to be not -1.
+ {name: "Mod8", argLength: 2}, // arg0 % arg1, signed
+ {name: "Mod8u", argLength: 2}, // arg0 % arg1, unsigned
+ {name: "Mod16", argLength: 2, aux: "Bool"},
+ {name: "Mod16u", argLength: 2},
+ {name: "Mod32", argLength: 2, aux: "Bool"},
+ {name: "Mod32u", argLength: 2},
+ {name: "Mod64", argLength: 2, aux: "Bool"},
+ {name: "Mod64u", argLength: 2},
+
+ {name: "And8", argLength: 2, commutative: true}, // arg0 & arg1
+ {name: "And16", argLength: 2, commutative: true},
+ {name: "And32", argLength: 2, commutative: true},
+ {name: "And64", argLength: 2, commutative: true},
+
+ {name: "Or8", argLength: 2, commutative: true}, // arg0 | arg1
+ {name: "Or16", argLength: 2, commutative: true},
+ {name: "Or32", argLength: 2, commutative: true},
+ {name: "Or64", argLength: 2, commutative: true},
+
+ {name: "Xor8", argLength: 2, commutative: true}, // arg0 ^ arg1
+ {name: "Xor16", argLength: 2, commutative: true},
+ {name: "Xor32", argLength: 2, commutative: true},
+ {name: "Xor64", argLength: 2, commutative: true},
+
+ // For shifts, AxB means the shifted value has A bits and the shift amount has B bits.
+ // Shift amounts are considered unsigned.
+ // If arg1 is known to be nonnegative and less than the number of bits in arg0,
+ // then auxInt may be set to 1.
+ // This enables better code generation on some platforms.
+ {name: "Lsh8x8", argLength: 2, aux: "Bool"}, // arg0 << arg1
+ {name: "Lsh8x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh8x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh8x64", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x8", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x64", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x8", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x64", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x8", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x64", argLength: 2, aux: "Bool"},
+
+ {name: "Rsh8x8", argLength: 2, aux: "Bool"}, // arg0 >> arg1, signed
+ {name: "Rsh8x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh8x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh8x64", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x8", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x64", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x8", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x64", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x8", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x64", argLength: 2, aux: "Bool"},
+
+ {name: "Rsh8Ux8", argLength: 2, aux: "Bool"}, // arg0 >> arg1, unsigned
+ {name: "Rsh8Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh8Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh8Ux64", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux8", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux64", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux8", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux64", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux8", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux64", argLength: 2, aux: "Bool"},
+
+ // 2-input comparisons
+ {name: "Eq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 == arg1
+ {name: "Eq16", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Eq32", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Eq64", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "EqPtr", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "EqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "EqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "Eq32F", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Eq64F", argLength: 2, commutative: true, typ: "Bool"},
+
+ {name: "Neq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 != arg1
+ {name: "Neq16", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Neq32", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Neq64", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "NeqPtr", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "NeqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "NeqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "Neq32F", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Neq64F", argLength: 2, commutative: true, typ: "Bool"},
+
+ {name: "Less8", argLength: 2, typ: "Bool"}, // arg0 < arg1, signed
+ {name: "Less8U", argLength: 2, typ: "Bool"}, // arg0 < arg1, unsigned
+ {name: "Less16", argLength: 2, typ: "Bool"},
+ {name: "Less16U", argLength: 2, typ: "Bool"},
+ {name: "Less32", argLength: 2, typ: "Bool"},
+ {name: "Less32U", argLength: 2, typ: "Bool"},
+ {name: "Less64", argLength: 2, typ: "Bool"},
+ {name: "Less64U", argLength: 2, typ: "Bool"},
+ {name: "Less32F", argLength: 2, typ: "Bool"},
+ {name: "Less64F", argLength: 2, typ: "Bool"},
+
+ {name: "Leq8", argLength: 2, typ: "Bool"}, // arg0 <= arg1, signed
+ {name: "Leq8U", argLength: 2, typ: "Bool"}, // arg0 <= arg1, unsigned
+ {name: "Leq16", argLength: 2, typ: "Bool"},
+ {name: "Leq16U", argLength: 2, typ: "Bool"},
+ {name: "Leq32", argLength: 2, typ: "Bool"},
+ {name: "Leq32U", argLength: 2, typ: "Bool"},
+ {name: "Leq64", argLength: 2, typ: "Bool"},
+ {name: "Leq64U", argLength: 2, typ: "Bool"},
+ {name: "Leq32F", argLength: 2, typ: "Bool"},
+ {name: "Leq64F", argLength: 2, typ: "Bool"},
+
+ // the type of a CondSelect is the same as the type of its first
+ // two arguments, which should be register-width scalars; the third
+ // argument should be a boolean
+ {name: "CondSelect", argLength: 3}, // arg2 ? arg0 : arg1
+
+ // boolean ops
+ {name: "AndB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 && arg1 (not shortcircuited)
+ {name: "OrB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 || arg1 (not shortcircuited)
+ {name: "EqB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 == arg1
+ {name: "NeqB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 != arg1
+ {name: "Not", argLength: 1, typ: "Bool"}, // !arg0, boolean
+
+ // 1-input ops
+ {name: "Neg8", argLength: 1}, // -arg0
+ {name: "Neg16", argLength: 1},
+ {name: "Neg32", argLength: 1},
+ {name: "Neg64", argLength: 1},
+ {name: "Neg32F", argLength: 1},
+ {name: "Neg64F", argLength: 1},
+
+ {name: "Com8", argLength: 1}, // ^arg0
+ {name: "Com16", argLength: 1},
+ {name: "Com32", argLength: 1},
+ {name: "Com64", argLength: 1},
+
+ {name: "Ctz8", argLength: 1}, // Count trailing (low order) zeroes (returns 0-8)
+ {name: "Ctz16", argLength: 1}, // Count trailing (low order) zeroes (returns 0-16)
+ {name: "Ctz32", argLength: 1}, // Count trailing (low order) zeroes (returns 0-32)
+ {name: "Ctz64", argLength: 1}, // Count trailing (low order) zeroes (returns 0-64)
+ {name: "Ctz8NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-7
+ {name: "Ctz16NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-15
+ {name: "Ctz32NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-31
+ {name: "Ctz64NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-63
+ {name: "BitLen8", argLength: 1}, // Number of bits in arg[0] (returns 0-8)
+ {name: "BitLen16", argLength: 1}, // Number of bits in arg[0] (returns 0-16)
+ {name: "BitLen32", argLength: 1}, // Number of bits in arg[0] (returns 0-32)
+ {name: "BitLen64", argLength: 1}, // Number of bits in arg[0] (returns 0-64)
+
+ {name: "Bswap32", argLength: 1}, // Swap bytes
+ {name: "Bswap64", argLength: 1}, // Swap bytes
+
+ {name: "BitRev8", argLength: 1}, // Reverse the bits in arg[0]
+ {name: "BitRev16", argLength: 1}, // Reverse the bits in arg[0]
+ {name: "BitRev32", argLength: 1}, // Reverse the bits in arg[0]
+ {name: "BitRev64", argLength: 1}, // Reverse the bits in arg[0]
+
+ {name: "PopCount8", argLength: 1}, // Count bits in arg[0]
+ {name: "PopCount16", argLength: 1}, // Count bits in arg[0]
+ {name: "PopCount32", argLength: 1}, // Count bits in arg[0]
+ {name: "PopCount64", argLength: 1}, // Count bits in arg[0]
+ {name: "RotateLeft8", argLength: 2}, // Rotate bits in arg[0] left by arg[1]
+ {name: "RotateLeft16", argLength: 2}, // Rotate bits in arg[0] left by arg[1]
+ {name: "RotateLeft32", argLength: 2}, // Rotate bits in arg[0] left by arg[1]
+ {name: "RotateLeft64", argLength: 2}, // Rotate bits in arg[0] left by arg[1]
+
+ // Square root, float64 only.
+ // Special cases:
+ // +∞ → +∞
+ // ±0 → ±0 (sign preserved)
+ // x<0 → NaN
+ // NaN → NaN
+ {name: "Sqrt", argLength: 1}, // √arg0
+
+ // Round to integer, float64 only.
+ // Special cases:
+ // ±∞ → ±∞ (sign preserved)
+ // ±0 → ±0 (sign preserved)
+ // NaN → NaN
+ {name: "Floor", argLength: 1}, // round arg0 toward -∞
+ {name: "Ceil", argLength: 1}, // round arg0 toward +∞
+ {name: "Trunc", argLength: 1}, // round arg0 toward 0
+ {name: "Round", argLength: 1}, // round arg0 to nearest, ties away from 0
+ {name: "RoundToEven", argLength: 1}, // round arg0 to nearest, ties to even
+
+ // Modify the sign bit
+ {name: "Abs", argLength: 1}, // absolute value arg0
+ {name: "Copysign", argLength: 2}, // copy sign from arg0 to arg1
+
+ // 3-input opcode.
+ // Fused-multiply-add, float64 only.
+ // When a*b+c is exactly zero (before rounding), then the result is +0 or -0.
+ // The 0's sign is determined according to the standard rules for the
+ // addition (-0 if both a*b and c are -0, +0 otherwise).
+ //
+ // Otherwise, when a*b+c rounds to zero, then the resulting 0's sign is
+ // determined by the sign of the exact result a*b+c.
+ // See section 6.3 in ieee754.
+ //
+ // When the multiply is an infinity times a zero, the result is NaN.
+ // See section 7.2 in ieee754.
+ {name: "FMA", argLength: 3}, // compute (a*b)+c without intermediate rounding
+
+ // Data movement. Max argument length for Phi is indefinite.
+ {name: "Phi", argLength: -1, zeroWidth: true}, // select an argument based on which predecessor block we came from
+ {name: "Copy", argLength: 1}, // output = arg0
+ // Convert converts between pointers and integers.
+ // We have a special op for this so as to not confuse GC
+ // (particularly stack maps). It takes a memory arg so it
+ // gets correctly ordered with respect to GC safepoints.
+ // It gets compiled to nothing, so its result must in the same
+ // register as its argument. regalloc knows it can use any
+ // allocatable integer register for OpConvert.
+ // arg0=ptr/int arg1=mem, output=int/ptr
+ {name: "Convert", argLength: 2, zeroWidth: true, resultInArg0: true},
+
+ // constants. Constant values are stored in the aux or
+ // auxint fields.
+ {name: "ConstBool", aux: "Bool"}, // auxint is 0 for false and 1 for true
+ {name: "ConstString", aux: "String"}, // value is aux.(string)
+ {name: "ConstNil", typ: "BytePtr"}, // nil pointer
+ {name: "Const8", aux: "Int8"}, // auxint is sign-extended 8 bits
+ {name: "Const16", aux: "Int16"}, // auxint is sign-extended 16 bits
+ {name: "Const32", aux: "Int32"}, // auxint is sign-extended 32 bits
+ // Note: ConstX are sign-extended even when the type of the value is unsigned.
+ // For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa.
+ {name: "Const64", aux: "Int64"}, // value is auxint
+ // Note: for both Const32F and Const64F, we disallow encoding NaNs.
+ // Signaling NaNs are tricky because if you do anything with them, they become quiet.
+ // Particularly, converting a 32 bit sNaN to 64 bit and back converts it to a qNaN.
+ // See issue 36399 and 36400.
+ // Encodings of +inf, -inf, and -0 are fine.
+ {name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly representable as float 32
+ {name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint))
+ {name: "ConstInterface"}, // nil interface
+ {name: "ConstSlice"}, // nil slice
+
+ // Constant-like things
+ {name: "InitMem", zeroWidth: true}, // memory input to the function.
+ {name: "Arg", aux: "SymOff", symEffect: "Read", zeroWidth: true}, // argument to the function. aux=GCNode of arg, off = offset in that arg.
+
+ // The address of a variable. arg0 is the base pointer.
+ // If the variable is a global, the base pointer will be SB and
+ // the Aux field will be a *obj.LSym.
+ // If the variable is a local, the base pointer will be SP and
+ // the Aux field will be a *gc.Node.
+ {name: "Addr", argLength: 1, aux: "Sym", symEffect: "Addr"}, // Address of a variable. Arg0=SB. Aux identifies the variable.
+ {name: "LocalAddr", argLength: 2, aux: "Sym", symEffect: "Addr"}, // Address of a variable. Arg0=SP. Arg1=mem. Aux identifies the variable.
+
+ {name: "SP", zeroWidth: true}, // stack pointer
+ {name: "SB", typ: "Uintptr", zeroWidth: true}, // static base pointer (a.k.a. globals pointer)
+ {name: "Invalid"}, // unused value
+
+ // Memory operations
+ {name: "Load", argLength: 2}, // Load from arg0. arg1=memory
+ {name: "Dereference", argLength: 2}, // Load from arg0. arg1=memory. Helper op for arg/result passing, result is an otherwise not-SSA-able "value".
+ {name: "Store", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
+ // The source and destination of Move may overlap in some cases. See e.g.
+ // memmove inlining in generic.rules. When inlineablememmovesize (in ../rewrite.go)
+ // returns true, we must do all loads before all stores, when lowering Move.
+ // The type of Move is used for the write barrier pass to insert write barriers
+ // and for alignment on some architectures.
+ // For pointerless types, it is possible for the type to be inaccurate.
+ // For type alignment and pointer information, use the type in Aux;
+ // for type size, use the size in AuxInt.
+ // The "inline runtime.memmove" rewrite rule generates Moves with inaccurate types,
+ // such as type byte instead of the more accurate type [8]byte.
+ {name: "Move", argLength: 3, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory.
+ {name: "Zero", argLength: 2, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory.
+
+ // Memory operations with write barriers.
+ // Expand to runtime calls. Write barrier will be removed if write on stack.
+ {name: "StoreWB", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
+ {name: "MoveWB", argLength: 3, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory.
+ {name: "ZeroWB", argLength: 2, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory.
+
+ // WB invokes runtime.gcWriteBarrier. This is not a normal
+ // call: it takes arguments in registers, doesn't clobber
+ // general-purpose registers (the exact clobber set is
+ // arch-dependent), and is not a safe-point.
+ {name: "WB", argLength: 3, typ: "Mem", aux: "Sym", symEffect: "None"}, // arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+
+ {name: "HasCPUFeature", argLength: 0, typ: "bool", aux: "Sym", symEffect: "None"}, // aux=place that this feature flag can be loaded from
+
+ // PanicBounds and PanicExtend generate a runtime panic.
+ // Their arguments provide index values to use in panic messages.
+ // Both PanicBounds and PanicExtend have an AuxInt value from the BoundsKind type (in ../op.go).
+ // PanicBounds' index is int sized.
+ // PanicExtend's index is int64 sized. (PanicExtend is only used on 32-bit archs.)
+ {name: "PanicBounds", argLength: 3, aux: "Int64", typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory.
+ {name: "PanicExtend", argLength: 4, aux: "Int64", typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory.
+
+ // Function calls. Arguments to the call have already been written to the stack.
+ // Return values appear on the stack. The method receiver, if any, is treated
+ // as a phantom first argument.
+ // TODO(josharian): ClosureCall and InterCall should have Int32 aux
+ // to match StaticCall's 32 bit arg size limit.
+ // TODO(drchase,josharian): could the arg size limit be bundled into the rules for CallOff?
+ {name: "ClosureCall", argLength: 3, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory.
+ {name: "StaticCall", argLength: 1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory.
+ {name: "InterCall", argLength: 2, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory.
+ {name: "ClosureLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded closure call. arg0=code pointer, arg1=context ptr, arg2..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+ {name: "StaticLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+ {name: "InterLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded interface call. arg0=code pointer, arg1..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+
+ // Conversions: signed extensions, zero (unsigned) extensions, truncations
+ {name: "SignExt8to16", argLength: 1, typ: "Int16"},
+ {name: "SignExt8to32", argLength: 1, typ: "Int32"},
+ {name: "SignExt8to64", argLength: 1, typ: "Int64"},
+ {name: "SignExt16to32", argLength: 1, typ: "Int32"},
+ {name: "SignExt16to64", argLength: 1, typ: "Int64"},
+ {name: "SignExt32to64", argLength: 1, typ: "Int64"},
+ {name: "ZeroExt8to16", argLength: 1, typ: "UInt16"},
+ {name: "ZeroExt8to32", argLength: 1, typ: "UInt32"},
+ {name: "ZeroExt8to64", argLength: 1, typ: "UInt64"},
+ {name: "ZeroExt16to32", argLength: 1, typ: "UInt32"},
+ {name: "ZeroExt16to64", argLength: 1, typ: "UInt64"},
+ {name: "ZeroExt32to64", argLength: 1, typ: "UInt64"},
+ {name: "Trunc16to8", argLength: 1},
+ {name: "Trunc32to8", argLength: 1},
+ {name: "Trunc32to16", argLength: 1},
+ {name: "Trunc64to8", argLength: 1},
+ {name: "Trunc64to16", argLength: 1},
+ {name: "Trunc64to32", argLength: 1},
+
+ {name: "Cvt32to32F", argLength: 1},
+ {name: "Cvt32to64F", argLength: 1},
+ {name: "Cvt64to32F", argLength: 1},
+ {name: "Cvt64to64F", argLength: 1},
+ {name: "Cvt32Fto32", argLength: 1},
+ {name: "Cvt32Fto64", argLength: 1},
+ {name: "Cvt64Fto32", argLength: 1},
+ {name: "Cvt64Fto64", argLength: 1},
+ {name: "Cvt32Fto64F", argLength: 1},
+ {name: "Cvt64Fto32F", argLength: 1},
+ {name: "CvtBoolToUint8", argLength: 1},
+
+ // Force rounding to precision of type.
+ {name: "Round32F", argLength: 1},
+ {name: "Round64F", argLength: 1},
+
+ // Automatically inserted safety checks
+ {name: "IsNonNil", argLength: 1, typ: "Bool"}, // arg0 != nil
+ {name: "IsInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 < arg1. arg1 is guaranteed >= 0.
+ {name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0.
+ {name: "NilCheck", argLength: 2, typ: "Void"}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns void.
+
+ // Pseudo-ops
+ {name: "GetG", argLength: 1, zeroWidth: true}, // runtime.getg() (read g pointer). arg0=mem
+ {name: "GetClosurePtr"}, // get closure pointer from dedicated register
+ {name: "GetCallerPC"}, // for getcallerpc intrinsic
+ {name: "GetCallerSP"}, // for getcallersp intrinsic
+
+ // Indexing operations
+ {name: "PtrIndex", argLength: 2}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type
+ {name: "OffPtr", argLength: 1, aux: "Int64"}, // arg0 + auxint (arg0 and result are pointers)
+
+ // Slices
+ {name: "SliceMake", argLength: 3}, // arg0=ptr, arg1=len, arg2=cap
+ {name: "SlicePtr", argLength: 1, typ: "BytePtr"}, // ptr(arg0)
+ {name: "SliceLen", argLength: 1}, // len(arg0)
+ {name: "SliceCap", argLength: 1}, // cap(arg0)
+
+ // Complex (part/whole)
+ {name: "ComplexMake", argLength: 2}, // arg0=real, arg1=imag
+ {name: "ComplexReal", argLength: 1}, // real(arg0)
+ {name: "ComplexImag", argLength: 1}, // imag(arg0)
+
+ // Strings
+ {name: "StringMake", argLength: 2}, // arg0=ptr, arg1=len
+ {name: "StringPtr", argLength: 1, typ: "BytePtr"}, // ptr(arg0)
+ {name: "StringLen", argLength: 1, typ: "Int"}, // len(arg0)
+
+ // Interfaces
+ {name: "IMake", argLength: 2}, // arg0=itab, arg1=data
+ {name: "ITab", argLength: 1, typ: "Uintptr"}, // arg0=interface, returns itable field
+ {name: "IData", argLength: 1}, // arg0=interface, returns data field
+
+ // Structs
+ {name: "StructMake0"}, // Returns struct with 0 fields.
+ {name: "StructMake1", argLength: 1}, // arg0=field0. Returns struct.
+ {name: "StructMake2", argLength: 2}, // arg0,arg1=field0,field1. Returns struct.
+ {name: "StructMake3", argLength: 3}, // arg0..2=field0..2. Returns struct.
+ {name: "StructMake4", argLength: 4}, // arg0..3=field0..3. Returns struct.
+ {name: "StructSelect", argLength: 1, aux: "Int64"}, // arg0=struct, auxint=field index. Returns the auxint'th field.
+
+ // Arrays
+ {name: "ArrayMake0"}, // Returns array with 0 elements
+ {name: "ArrayMake1", argLength: 1}, // Returns array with 1 element
+ {name: "ArraySelect", argLength: 1, aux: "Int64"}, // arg0=array, auxint=index. Returns a[i].
+
+ // Spill&restore ops for the register allocator. These are
+ // semantically identical to OpCopy; they do not take/return
+ // stores like regular memory ops do. We can get away without memory
+ // args because we know there is no aliasing of spill slots on the stack.
+ {name: "StoreReg", argLength: 1},
+ {name: "LoadReg", argLength: 1},
+
+ // Used during ssa construction. Like Copy, but the arg has not been specified yet.
+ {name: "FwdRef", aux: "Sym", symEffect: "None"},
+
+ // Unknown value. Used for Values whose values don't matter because they are dead code.
+ {name: "Unknown"},
+
+ {name: "VarDef", argLength: 1, aux: "Sym", typ: "Mem", symEffect: "None", zeroWidth: true}, // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem
+ {name: "VarKill", argLength: 1, aux: "Sym", symEffect: "None"}, // aux is a *gc.Node of a variable that is known to be dead. arg0=mem, returns mem
+ // TODO: what's the difference between VarLive and KeepAlive?
+ {name: "VarLive", argLength: 1, aux: "Sym", symEffect: "Read", zeroWidth: true}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem
+ {name: "KeepAlive", argLength: 2, typ: "Mem", zeroWidth: true}, // arg[0] is a value that must be kept alive until this mark. arg[1]=mem, returns mem
+
+ // InlMark marks the start of an inlined function body. Its AuxInt field
+ // distinguishes which entry in the local inline tree it is marking.
+ {name: "InlMark", argLength: 1, aux: "Int32", typ: "Void"}, // arg[0]=mem, returns void.
+
+ // Ops for breaking 64-bit operations on 32-bit architectures
+ {name: "Int64Make", argLength: 2, typ: "UInt64"}, // arg0=hi, arg1=lo
+ {name: "Int64Hi", argLength: 1, typ: "UInt32"}, // high 32-bit of arg0
+ {name: "Int64Lo", argLength: 1, typ: "UInt32"}, // low 32-bit of arg0
+
+ {name: "Add32carry", argLength: 2, commutative: true, typ: "(UInt32,Flags)"}, // arg0 + arg1, returns (value, carry)
+ {name: "Add32withcarry", argLength: 3, commutative: true}, // arg0 + arg1 + arg2, arg2=carry (0 or 1)
+
+ {name: "Sub32carry", argLength: 2, typ: "(UInt32,Flags)"}, // arg0 - arg1, returns (value, carry)
+ {name: "Sub32withcarry", argLength: 3}, // arg0 - arg1 - arg2, arg2=carry (0 or 1)
+
+ {name: "Add64carry", argLength: 3, commutative: true, typ: "(UInt64,UInt64)"}, // arg0 + arg1 + arg2, arg2 must be 0 or 1. returns (value, value>>64)
+ {name: "Sub64borrow", argLength: 3, typ: "(UInt64,UInt64)"}, // arg0 - (arg1 + arg2), arg2 must be 0 or 1. returns (value, value>>64&1)
+
+ {name: "Signmask", argLength: 1, typ: "Int32"}, // 0 if arg0 >= 0, -1 if arg0 < 0
+ {name: "Zeromask", argLength: 1, typ: "UInt32"}, // 0 if arg0 == 0, 0xffffffff if arg0 != 0
+ {name: "Slicemask", argLength: 1}, // 0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0. Type is native int size.
+
+ {name: "SpectreIndex", argLength: 2}, // arg0 if 0 <= arg0 < arg1, 0 otherwise. Type is native int size.
+ {name: "SpectreSliceIndex", argLength: 2}, // arg0 if 0 <= arg0 <= arg1, 0 otherwise. Type is native int size.
+
+ {name: "Cvt32Uto32F", argLength: 1}, // uint32 -> float32, only used on 32-bit arch
+ {name: "Cvt32Uto64F", argLength: 1}, // uint32 -> float64, only used on 32-bit arch
+ {name: "Cvt32Fto32U", argLength: 1}, // float32 -> uint32, only used on 32-bit arch
+ {name: "Cvt64Fto32U", argLength: 1}, // float64 -> uint32, only used on 32-bit arch
+ {name: "Cvt64Uto32F", argLength: 1}, // uint64 -> float32, only used on archs that has the instruction
+ {name: "Cvt64Uto64F", argLength: 1}, // uint64 -> float64, only used on archs that has the instruction
+ {name: "Cvt32Fto64U", argLength: 1}, // float32 -> uint64, only used on archs that has the instruction
+ {name: "Cvt64Fto64U", argLength: 1}, // float64 -> uint64, only used on archs that has the instruction
+
+ // pseudo-ops for breaking Tuple
+ {name: "Select0", argLength: 1, zeroWidth: true}, // the first component of a tuple
+ {name: "Select1", argLength: 1, zeroWidth: true}, // the second component of a tuple
+ {name: "SelectN", argLength: 1, aux: "Int64"}, // arg0=result, auxint=field index. Returns the auxint'th member.
+ {name: "SelectNAddr", argLength: 1, aux: "Int64"}, // arg0=result, auxint=field index. Returns the address of auxint'th member. Used for un-SSA-able result types.
+ {name: "MakeResult", argLength: -1}, // arg0 .. are components of a "Result" (like the result from a Call). The last arg should be memory (like the result from a call).
+
+ // Atomic operations used for semantically inlining sync/atomic and
+ // runtime/internal/atomic. Atomic loads return a new memory so that
+ // the loads are properly ordered with respect to other loads and
+ // stores.
+ {name: "AtomicLoad8", argLength: 2, typ: "(UInt8,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoad32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoadAcq32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory.
+ {name: "AtomicLoadAcq64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory.
+ {name: "AtomicStore8", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStore32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStore64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStorePtrNoWB", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStoreRel32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory.
+ {name: "AtomicStoreRel64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory.
+ {name: "AtomicExchange32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicExchange64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicAdd32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicAdd64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicCompareAndSwap32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicCompareAndSwapRel32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Lock release, reports whether store happens and new memory.
+ {name: "AtomicAnd8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicAnd32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+
+ // Atomic operation variants
+ // These variants have the same semantics as above atomic operations.
+ // But they are used for generating more efficient code on certain modern machines, with run-time CPU feature detection.
+ // Currently, they are used on ARM64 only.
+ {name: "AtomicAdd32Variant", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicAdd64Variant", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicExchange32Variant", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicExchange64Variant", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicCompareAndSwap32Variant", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicCompareAndSwap64Variant", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicAnd8Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicAnd32Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr8Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr32Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+
+ // Clobber experiment op
+ {name: "Clobber", argLength: 0, typ: "Void", aux: "SymOff", symEffect: "None"}, // write an invalid pointer value to the given pointer slot of a stack variable
+}
+
+// kind controls successors implicit exit
+// ----------------------------------------------------------
+// Exit [return mem] [] yes
+// Ret [return mem] [] yes
+// RetJmp [return mem] [] yes
+// Plain [] [next]
+// If [boolean Value] [then, else]
+// First [] [always, never]
+
+var genericBlocks = []blockData{
+ {name: "Plain"}, // a single successor
+ {name: "If", controls: 1}, // if Controls[0] goto Succs[0] else goto Succs[1]
+ {name: "Defer", controls: 1}, // Succs[0]=defer queued, Succs[1]=defer recovered. Controls[0] is call op (of memory type)
+ {name: "Ret", controls: 1}, // no successors, Controls[0] value is memory result
+ {name: "RetJmp", controls: 1}, // no successors, Controls[0] value is memory result, jumps to b.Aux.(*gc.Sym)
+ {name: "Exit", controls: 1}, // no successors, Controls[0] value generates a panic
+
+ // transient block state used for dead code removal
+ {name: "First"}, // 2 successors, always takes the first one (second is dead)
+}
+
+func init() {
+ archs = append(archs, arch{
+ name: "generic",
+ ops: genericOps,
+ blocks: genericBlocks,
+ generic: true,
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/main.go b/src/cmd/compile/internal/ssa/gen/main.go
new file mode 100644
index 0000000..dfa146a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/main.go
@@ -0,0 +1,541 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// The gen command generates Go code (in the parent directory) for all
+// the architecture-specific opcodes, blocks, and rewrites.
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+ "regexp"
+ "runtime"
+ "runtime/pprof"
+ "runtime/trace"
+ "sort"
+ "strings"
+ "sync"
+)
+
+// TODO: capitalize these types, so that we can more easily tell variable names
+// apart from type names, and avoid awkward func parameters like "arch arch".
+
+type arch struct {
+ name string
+ pkg string // obj package to import for this arch.
+ genfile string // source file containing opcode code generation.
+ ops []opData
+ blocks []blockData
+ regnames []string
+ gpregmask regMask
+ fpregmask regMask
+ fp32regmask regMask
+ fp64regmask regMask
+ specialregmask regMask
+ framepointerreg int8
+ linkreg int8
+ generic bool
+ imports []string
+}
+
+type opData struct {
+ name string
+ reg regInfo
+ asm string
+ typ string // default result type
+ aux string
+ rematerializeable bool
+ argLength int32 // number of arguments, if -1, then this operation has a variable number of arguments
+ commutative bool // this operation is commutative on its first 2 arguments (e.g. addition)
+ resultInArg0 bool // (first, if a tuple) output of v and v.Args[0] must be allocated to the same register
+ resultNotInArgs bool // outputs must not be allocated to the same registers as inputs
+ clobberFlags bool // this op clobbers flags register
+ call bool // is a function call
+ nilCheck bool // this op is a nil check on arg0
+ faultOnNilArg0 bool // this op will fault if arg0 is nil (and aux encodes a small offset)
+ faultOnNilArg1 bool // this op will fault if arg1 is nil (and aux encodes a small offset)
+ usesScratch bool // this op requires scratch memory space
+ hasSideEffects bool // for "reasons", not to be eliminated. E.g., atomic store, #19182.
+ zeroWidth bool // op never translates into any machine code. example: copy, which may sometimes translate to machine code, is not zero-width.
+ unsafePoint bool // this op is an unsafe point, i.e. not safe for async preemption
+ symEffect string // effect this op has on symbol in aux
+ scale uint8 // amd64/386 indexed load scale
+}
+
+type blockData struct {
+ name string // the suffix for this block ("EQ", "LT", etc.)
+ controls int // the number of control values this type of block requires
+ aux string // the type of the Aux/AuxInt value, if any
+}
+
+type regInfo struct {
+ // inputs[i] encodes the set of registers allowed for the i'th input.
+ // Inputs that don't use registers (flags, memory, etc.) should be 0.
+ inputs []regMask
+ // clobbers encodes the set of registers that are overwritten by
+ // the instruction (other than the output registers).
+ clobbers regMask
+ // outputs[i] encodes the set of registers allowed for the i'th output.
+ outputs []regMask
+}
+
+type regMask uint64
+
+func (a arch) regMaskComment(r regMask) string {
+ var buf bytes.Buffer
+ for i := uint64(0); r != 0; i++ {
+ if r&1 != 0 {
+ if buf.Len() == 0 {
+ buf.WriteString(" //")
+ }
+ buf.WriteString(" ")
+ buf.WriteString(a.regnames[i])
+ }
+ r >>= 1
+ }
+ return buf.String()
+}
+
+var archs []arch
+
+var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`")
+var memprofile = flag.String("memprofile", "", "write memory profile to `file`")
+var tracefile = flag.String("trace", "", "write trace to `file`")
+
+func main() {
+ flag.Parse()
+ if *cpuprofile != "" {
+ f, err := os.Create(*cpuprofile)
+ if err != nil {
+ log.Fatal("could not create CPU profile: ", err)
+ }
+ defer f.Close()
+ if err := pprof.StartCPUProfile(f); err != nil {
+ log.Fatal("could not start CPU profile: ", err)
+ }
+ defer pprof.StopCPUProfile()
+ }
+ if *tracefile != "" {
+ f, err := os.Create(*tracefile)
+ if err != nil {
+ log.Fatalf("failed to create trace output file: %v", err)
+ }
+ defer func() {
+ if err := f.Close(); err != nil {
+ log.Fatalf("failed to close trace file: %v", err)
+ }
+ }()
+
+ if err := trace.Start(f); err != nil {
+ log.Fatalf("failed to start trace: %v", err)
+ }
+ defer trace.Stop()
+ }
+
+ sort.Sort(ArchsByName(archs))
+
+ // The generate tasks are run concurrently, since they are CPU-intensive
+ // that can easily make use of many cores on a machine.
+ //
+ // Note that there is no limit on the concurrency at the moment. On a
+ // four-core laptop at the time of writing, peak RSS usually reaches
+ // ~200MiB, which seems doable by practically any machine nowadays. If
+ // that stops being the case, we can cap this func to a fixed number of
+ // architectures being generated at once.
+
+ tasks := []func(){
+ genOp,
+ }
+ for _, a := range archs {
+ a := a // the funcs are ran concurrently at a later time
+ tasks = append(tasks, func() {
+ genRules(a)
+ genSplitLoadRules(a)
+ })
+ }
+ var wg sync.WaitGroup
+ for _, task := range tasks {
+ task := task
+ wg.Add(1)
+ go func() {
+ task()
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ if *memprofile != "" {
+ f, err := os.Create(*memprofile)
+ if err != nil {
+ log.Fatal("could not create memory profile: ", err)
+ }
+ defer f.Close()
+ runtime.GC() // get up-to-date statistics
+ if err := pprof.WriteHeapProfile(f); err != nil {
+ log.Fatal("could not write memory profile: ", err)
+ }
+ }
+}
+
+func genOp() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated from gen/*Ops.go; DO NOT EDIT.\n")
+ fmt.Fprintln(w)
+ fmt.Fprintln(w, "package ssa")
+
+ fmt.Fprintln(w, "import (")
+ fmt.Fprintln(w, "\"cmd/internal/obj\"")
+ for _, a := range archs {
+ if a.pkg != "" {
+ fmt.Fprintf(w, "%q\n", a.pkg)
+ }
+ }
+ fmt.Fprintln(w, ")")
+
+ // generate Block* declarations
+ fmt.Fprintln(w, "const (")
+ fmt.Fprintln(w, "BlockInvalid BlockKind = iota")
+ for _, a := range archs {
+ fmt.Fprintln(w)
+ for _, d := range a.blocks {
+ fmt.Fprintf(w, "Block%s%s\n", a.Name(), d.name)
+ }
+ }
+ fmt.Fprintln(w, ")")
+
+ // generate block kind string method
+ fmt.Fprintln(w, "var blockString = [...]string{")
+ fmt.Fprintln(w, "BlockInvalid:\"BlockInvalid\",")
+ for _, a := range archs {
+ fmt.Fprintln(w)
+ for _, b := range a.blocks {
+ fmt.Fprintf(w, "Block%s%s:\"%s\",\n", a.Name(), b.name, b.name)
+ }
+ }
+ fmt.Fprintln(w, "}")
+ fmt.Fprintln(w, "func (k BlockKind) String() string {return blockString[k]}")
+
+ // generate block kind auxint method
+ fmt.Fprintln(w, "func (k BlockKind) AuxIntType() string {")
+ fmt.Fprintln(w, "switch k {")
+ for _, a := range archs {
+ for _, b := range a.blocks {
+ if b.auxIntType() == "invalid" {
+ continue
+ }
+ fmt.Fprintf(w, "case Block%s%s: return \"%s\"\n", a.Name(), b.name, b.auxIntType())
+ }
+ }
+ fmt.Fprintln(w, "}")
+ fmt.Fprintln(w, "return \"\"")
+ fmt.Fprintln(w, "}")
+
+ // generate Op* declarations
+ fmt.Fprintln(w, "const (")
+ fmt.Fprintln(w, "OpInvalid Op = iota") // make sure OpInvalid is 0.
+ for _, a := range archs {
+ fmt.Fprintln(w)
+ for _, v := range a.ops {
+ if v.name == "Invalid" {
+ continue
+ }
+ fmt.Fprintf(w, "Op%s%s\n", a.Name(), v.name)
+ }
+ }
+ fmt.Fprintln(w, ")")
+
+ // generate OpInfo table
+ fmt.Fprintln(w, "var opcodeTable = [...]opInfo{")
+ fmt.Fprintln(w, " { name: \"OpInvalid\" },")
+ for _, a := range archs {
+ fmt.Fprintln(w)
+
+ pkg := path.Base(a.pkg)
+ for _, v := range a.ops {
+ if v.name == "Invalid" {
+ continue
+ }
+ fmt.Fprintln(w, "{")
+ fmt.Fprintf(w, "name:\"%s\",\n", v.name)
+
+ // flags
+ if v.aux != "" {
+ fmt.Fprintf(w, "auxType: aux%s,\n", v.aux)
+ }
+ fmt.Fprintf(w, "argLen: %d,\n", v.argLength)
+
+ if v.rematerializeable {
+ if v.reg.clobbers != 0 {
+ log.Fatalf("%s is rematerializeable and clobbers registers", v.name)
+ }
+ if v.clobberFlags {
+ log.Fatalf("%s is rematerializeable and clobbers flags", v.name)
+ }
+ fmt.Fprintln(w, "rematerializeable: true,")
+ }
+ if v.commutative {
+ fmt.Fprintln(w, "commutative: true,")
+ }
+ if v.resultInArg0 {
+ fmt.Fprintln(w, "resultInArg0: true,")
+ // OpConvert's register mask is selected dynamically,
+ // so don't try to check it in the static table.
+ if v.name != "Convert" && v.reg.inputs[0] != v.reg.outputs[0] {
+ log.Fatalf("%s: input[0] and output[0] must use the same registers for %s", a.name, v.name)
+ }
+ if v.name != "Convert" && v.commutative && v.reg.inputs[1] != v.reg.outputs[0] {
+ log.Fatalf("%s: input[1] and output[0] must use the same registers for %s", a.name, v.name)
+ }
+ }
+ if v.resultNotInArgs {
+ fmt.Fprintln(w, "resultNotInArgs: true,")
+ }
+ if v.clobberFlags {
+ fmt.Fprintln(w, "clobberFlags: true,")
+ }
+ if v.call {
+ fmt.Fprintln(w, "call: true,")
+ }
+ if v.nilCheck {
+ fmt.Fprintln(w, "nilCheck: true,")
+ }
+ if v.faultOnNilArg0 {
+ fmt.Fprintln(w, "faultOnNilArg0: true,")
+ if v.aux != "Sym" && v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "Int32" && v.aux != "" {
+ log.Fatalf("faultOnNilArg0 with aux %s not allowed", v.aux)
+ }
+ }
+ if v.faultOnNilArg1 {
+ fmt.Fprintln(w, "faultOnNilArg1: true,")
+ if v.aux != "Sym" && v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "Int32" && v.aux != "" {
+ log.Fatalf("faultOnNilArg1 with aux %s not allowed", v.aux)
+ }
+ }
+ if v.usesScratch {
+ fmt.Fprintln(w, "usesScratch: true,")
+ }
+ if v.hasSideEffects {
+ fmt.Fprintln(w, "hasSideEffects: true,")
+ }
+ if v.zeroWidth {
+ fmt.Fprintln(w, "zeroWidth: true,")
+ }
+ if v.unsafePoint {
+ fmt.Fprintln(w, "unsafePoint: true,")
+ }
+ needEffect := strings.HasPrefix(v.aux, "Sym")
+ if v.symEffect != "" {
+ if !needEffect {
+ log.Fatalf("symEffect with aux %s not allowed", v.aux)
+ }
+ fmt.Fprintf(w, "symEffect: Sym%s,\n", strings.Replace(v.symEffect, ",", "|Sym", -1))
+ } else if needEffect {
+ log.Fatalf("symEffect needed for aux %s", v.aux)
+ }
+ if a.name == "generic" {
+ fmt.Fprintln(w, "generic:true,")
+ fmt.Fprintln(w, "},") // close op
+ // generic ops have no reg info or asm
+ continue
+ }
+ if v.asm != "" {
+ fmt.Fprintf(w, "asm: %s.A%s,\n", pkg, v.asm)
+ }
+ if v.scale != 0 {
+ fmt.Fprintf(w, "scale: %d,\n", v.scale)
+ }
+ fmt.Fprintln(w, "reg:regInfo{")
+
+ // Compute input allocation order. We allocate from the
+ // most to the least constrained input. This order guarantees
+ // that we will always be able to find a register.
+ var s []intPair
+ for i, r := range v.reg.inputs {
+ if r != 0 {
+ s = append(s, intPair{countRegs(r), i})
+ }
+ }
+ if len(s) > 0 {
+ sort.Sort(byKey(s))
+ fmt.Fprintln(w, "inputs: []inputInfo{")
+ for _, p := range s {
+ r := v.reg.inputs[p.val]
+ fmt.Fprintf(w, "{%d,%d},%s\n", p.val, r, a.regMaskComment(r))
+ }
+ fmt.Fprintln(w, "},")
+ }
+
+ if v.reg.clobbers > 0 {
+ fmt.Fprintf(w, "clobbers: %d,%s\n", v.reg.clobbers, a.regMaskComment(v.reg.clobbers))
+ }
+
+ // reg outputs
+ s = s[:0]
+ for i, r := range v.reg.outputs {
+ s = append(s, intPair{countRegs(r), i})
+ }
+ if len(s) > 0 {
+ sort.Sort(byKey(s))
+ fmt.Fprintln(w, "outputs: []outputInfo{")
+ for _, p := range s {
+ r := v.reg.outputs[p.val]
+ fmt.Fprintf(w, "{%d,%d},%s\n", p.val, r, a.regMaskComment(r))
+ }
+ fmt.Fprintln(w, "},")
+ }
+ fmt.Fprintln(w, "},") // close reg info
+ fmt.Fprintln(w, "},") // close op
+ }
+ }
+ fmt.Fprintln(w, "}")
+
+ fmt.Fprintln(w, "func (o Op) Asm() obj.As {return opcodeTable[o].asm}")
+ fmt.Fprintln(w, "func (o Op) Scale() int16 {return int16(opcodeTable[o].scale)}")
+
+ // generate op string method
+ fmt.Fprintln(w, "func (o Op) String() string {return opcodeTable[o].name }")
+
+ fmt.Fprintln(w, "func (o Op) UsesScratch() bool { return opcodeTable[o].usesScratch }")
+
+ fmt.Fprintln(w, "func (o Op) SymEffect() SymEffect { return opcodeTable[o].symEffect }")
+ fmt.Fprintln(w, "func (o Op) IsCall() bool { return opcodeTable[o].call }")
+ fmt.Fprintln(w, "func (o Op) HasSideEffects() bool { return opcodeTable[o].hasSideEffects }")
+ fmt.Fprintln(w, "func (o Op) UnsafePoint() bool { return opcodeTable[o].unsafePoint }")
+
+ // generate registers
+ for _, a := range archs {
+ if a.generic {
+ continue
+ }
+ fmt.Fprintf(w, "var registers%s = [...]Register {\n", a.name)
+ var gcRegN int
+ for i, r := range a.regnames {
+ pkg := a.pkg[len("cmd/internal/obj/"):]
+ var objname string // name in cmd/internal/obj/$ARCH
+ switch r {
+ case "SB":
+ // SB isn't a real register. cmd/internal/obj expects 0 in this case.
+ objname = "0"
+ case "SP":
+ objname = pkg + ".REGSP"
+ case "g":
+ objname = pkg + ".REGG"
+ default:
+ objname = pkg + ".REG_" + r
+ }
+ // Assign a GC register map index to registers
+ // that may contain pointers.
+ gcRegIdx := -1
+ if a.gpregmask&(1<<uint(i)) != 0 {
+ gcRegIdx = gcRegN
+ gcRegN++
+ }
+ fmt.Fprintf(w, " {%d, %s, %d, \"%s\"},\n", i, objname, gcRegIdx, r)
+ }
+ if gcRegN > 32 {
+ // Won't fit in a uint32 mask.
+ log.Fatalf("too many GC registers (%d > 32) on %s", gcRegN, a.name)
+ }
+ fmt.Fprintln(w, "}")
+ fmt.Fprintf(w, "var gpRegMask%s = regMask(%d)\n", a.name, a.gpregmask)
+ fmt.Fprintf(w, "var fpRegMask%s = regMask(%d)\n", a.name, a.fpregmask)
+ if a.fp32regmask != 0 {
+ fmt.Fprintf(w, "var fp32RegMask%s = regMask(%d)\n", a.name, a.fp32regmask)
+ }
+ if a.fp64regmask != 0 {
+ fmt.Fprintf(w, "var fp64RegMask%s = regMask(%d)\n", a.name, a.fp64regmask)
+ }
+ fmt.Fprintf(w, "var specialRegMask%s = regMask(%d)\n", a.name, a.specialregmask)
+ fmt.Fprintf(w, "var framepointerReg%s = int8(%d)\n", a.name, a.framepointerreg)
+ fmt.Fprintf(w, "var linkReg%s = int8(%d)\n", a.name, a.linkreg)
+ }
+
+ // gofmt result
+ b := w.Bytes()
+ var err error
+ b, err = format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", w.Bytes())
+ panic(err)
+ }
+
+ if err := ioutil.WriteFile("../opGen.go", b, 0666); err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+
+ // Check that the arch genfile handles all the arch-specific opcodes.
+ // This is very much a hack, but it is better than nothing.
+ //
+ // Do a single regexp pass to record all ops being handled in a map, and
+ // then compare that with the ops list. This is much faster than one
+ // regexp pass per opcode.
+ for _, a := range archs {
+ if a.genfile == "" {
+ continue
+ }
+
+ pattern := fmt.Sprintf(`\Wssa\.Op%s([a-zA-Z0-9_]+)\W`, a.name)
+ rxOp, err := regexp.Compile(pattern)
+ if err != nil {
+ log.Fatalf("bad opcode regexp %s: %v", pattern, err)
+ }
+
+ src, err := ioutil.ReadFile(a.genfile)
+ if err != nil {
+ log.Fatalf("can't read %s: %v", a.genfile, err)
+ }
+ seen := make(map[string]bool, len(a.ops))
+ for _, m := range rxOp.FindAllSubmatch(src, -1) {
+ seen[string(m[1])] = true
+ }
+ for _, op := range a.ops {
+ if !seen[op.name] {
+ log.Fatalf("Op%s%s has no code generation in %s", a.name, op.name, a.genfile)
+ }
+ }
+ }
+}
+
+// Name returns the name of the architecture for use in Op* and Block* enumerations.
+func (a arch) Name() string {
+ s := a.name
+ if s == "generic" {
+ s = ""
+ }
+ return s
+}
+
+// countRegs returns the number of set bits in the register mask.
+func countRegs(r regMask) int {
+ n := 0
+ for r != 0 {
+ n += int(r & 1)
+ r >>= 1
+ }
+ return n
+}
+
+// for sorting a pair of integers by key
+type intPair struct {
+ key, val int
+}
+type byKey []intPair
+
+func (a byKey) Len() int { return len(a) }
+func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byKey) Less(i, j int) bool { return a[i].key < a[j].key }
+
+type ArchsByName []arch
+
+func (x ArchsByName) Len() int { return len(x) }
+func (x ArchsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x ArchsByName) Less(i, j int) bool { return x[i].name < x[j].name }
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
new file mode 100644
index 0000000..aaf9101
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -0,0 +1,1856 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gen
+
+// This program generates Go code that applies rewrite rules to a Value.
+// The generated code implements a function of type func (v *Value) bool
+// which reports whether if did something.
+// Ideas stolen from Swift: http://www.hpl.hp.com/techreports/Compaq-DEC/WRL-2000-2.html
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+// rule syntax:
+// sexpr [&& extra conditions] => [@block] sexpr
+//
+// sexpr are s-expressions (lisp-like parenthesized groupings)
+// sexpr ::= [variable:](opcode sexpr*)
+// | variable
+// | <type>
+// | [auxint]
+// | {aux}
+//
+// aux ::= variable | {code}
+// type ::= variable | {code}
+// variable ::= some token
+// opcode ::= one of the opcodes from the *Ops.go files
+
+// special rules: trailing ellipsis "..." (in the outermost sexpr?) must match on both sides of a rule.
+// trailing three underscore "___" in the outermost match sexpr indicate the presence of
+// extra ignored args that need not appear in the replacement
+
+// extra conditions is just a chunk of Go that evaluates to a boolean. It may use
+// variables declared in the matching tsexpr. The variable "v" is predefined to be
+// the value matched by the entire rule.
+
+// If multiple rules match, the first one in file order is selected.
+
+var (
+ genLog = flag.Bool("log", false, "generate code that logs; for debugging only")
+ addLine = flag.Bool("line", false, "add line number comment to generated rules; for debugging only")
+)
+
+type Rule struct {
+ Rule string
+ Loc string // file name & line number
+}
+
+func (r Rule) String() string {
+ return fmt.Sprintf("rule %q at %s", r.Rule, r.Loc)
+}
+
+func normalizeSpaces(s string) string {
+ return strings.Join(strings.Fields(strings.TrimSpace(s)), " ")
+}
+
+// parse returns the matching part of the rule, additional conditions, and the result.
+func (r Rule) parse() (match, cond, result string) {
+ s := strings.Split(r.Rule, "=>")
+ match = normalizeSpaces(s[0])
+ result = normalizeSpaces(s[1])
+ cond = ""
+ if i := strings.Index(match, "&&"); i >= 0 {
+ cond = normalizeSpaces(match[i+2:])
+ match = normalizeSpaces(match[:i])
+ }
+ return match, cond, result
+}
+
+func genRules(arch arch) { genRulesSuffix(arch, "") }
+func genSplitLoadRules(arch arch) { genRulesSuffix(arch, "splitload") }
+
+func genRulesSuffix(arch arch, suff string) {
+ // Open input file.
+ text, err := os.Open(arch.name + suff + ".rules")
+ if err != nil {
+ if suff == "" {
+ // All architectures must have a plain rules file.
+ log.Fatalf("can't read rule file: %v", err)
+ }
+ // Some architectures have bonus rules files that others don't share. That's fine.
+ return
+ }
+
+ // oprules contains a list of rules for each block and opcode
+ blockrules := map[string][]Rule{}
+ oprules := map[string][]Rule{}
+
+ // read rule file
+ scanner := bufio.NewScanner(text)
+ rule := ""
+ var lineno int
+ var ruleLineno int // line number of "=>"
+ for scanner.Scan() {
+ lineno++
+ line := scanner.Text()
+ if i := strings.Index(line, "//"); i >= 0 {
+ // Remove comments. Note that this isn't string safe, so
+ // it will truncate lines with // inside strings. Oh well.
+ line = line[:i]
+ }
+ rule += " " + line
+ rule = strings.TrimSpace(rule)
+ if rule == "" {
+ continue
+ }
+ if !strings.Contains(rule, "=>") {
+ continue
+ }
+ if ruleLineno == 0 {
+ ruleLineno = lineno
+ }
+ if strings.HasSuffix(rule, "=>") {
+ continue // continue on the next line
+ }
+ if n := balance(rule); n > 0 {
+ continue // open parentheses remain, continue on the next line
+ } else if n < 0 {
+ break // continuing the line can't help, and it will only make errors worse
+ }
+
+ loc := fmt.Sprintf("%s%s.rules:%d", arch.name, suff, ruleLineno)
+ for _, rule2 := range expandOr(rule) {
+ r := Rule{Rule: rule2, Loc: loc}
+ if rawop := strings.Split(rule2, " ")[0][1:]; isBlock(rawop, arch) {
+ blockrules[rawop] = append(blockrules[rawop], r)
+ continue
+ }
+ // Do fancier value op matching.
+ match, _, _ := r.parse()
+ op, oparch, _, _, _, _ := parseValue(match, arch, loc)
+ opname := fmt.Sprintf("Op%s%s", oparch, op.name)
+ oprules[opname] = append(oprules[opname], r)
+ }
+ rule = ""
+ ruleLineno = 0
+ }
+ if err := scanner.Err(); err != nil {
+ log.Fatalf("scanner failed: %v\n", err)
+ }
+ if balance(rule) != 0 {
+ log.Fatalf("%s.rules:%d: unbalanced rule: %v\n", arch.name, lineno, rule)
+ }
+
+ // Order all the ops.
+ var ops []string
+ for op := range oprules {
+ ops = append(ops, op)
+ }
+ sort.Strings(ops)
+
+ genFile := &File{Arch: arch, Suffix: suff}
+ // Main rewrite routine is a switch on v.Op.
+ fn := &Func{Kind: "Value", ArgLen: -1}
+
+ sw := &Switch{Expr: exprf("v.Op")}
+ for _, op := range ops {
+ eop, ok := parseEllipsisRules(oprules[op], arch)
+ if ok {
+ if strings.Contains(oprules[op][0].Rule, "=>") && opByName(arch, op).aux != opByName(arch, eop).aux {
+ panic(fmt.Sprintf("can't use ... for ops that have different aux types: %s and %s", op, eop))
+ }
+ swc := &Case{Expr: exprf("%s", op)}
+ swc.add(stmtf("v.Op = %s", eop))
+ swc.add(stmtf("return true"))
+ sw.add(swc)
+ continue
+ }
+
+ swc := &Case{Expr: exprf("%s", op)}
+ swc.add(stmtf("return rewriteValue%s%s_%s(v)", arch.name, suff, op))
+ sw.add(swc)
+ }
+ fn.add(sw)
+ fn.add(stmtf("return false"))
+ genFile.add(fn)
+
+ // Generate a routine per op. Note that we don't make one giant routine
+ // because it is too big for some compilers.
+ for _, op := range ops {
+ rules := oprules[op]
+ _, ok := parseEllipsisRules(oprules[op], arch)
+ if ok {
+ continue
+ }
+
+ // rr is kept between iterations, so that each rule can check
+ // that the previous rule wasn't unconditional.
+ var rr *RuleRewrite
+ fn := &Func{
+ Kind: "Value",
+ Suffix: fmt.Sprintf("_%s", op),
+ ArgLen: opByName(arch, op).argLength,
+ }
+ fn.add(declf("b", "v.Block"))
+ fn.add(declf("config", "b.Func.Config"))
+ fn.add(declf("fe", "b.Func.fe"))
+ fn.add(declf("typ", "&b.Func.Config.Types"))
+ for _, rule := range rules {
+ if rr != nil && !rr.CanFail {
+ log.Fatalf("unconditional rule %s is followed by other rules", rr.Match)
+ }
+ rr = &RuleRewrite{Loc: rule.Loc}
+ rr.Match, rr.Cond, rr.Result = rule.parse()
+ pos, _ := genMatch(rr, arch, rr.Match, fn.ArgLen >= 0)
+ if pos == "" {
+ pos = "v.Pos"
+ }
+ if rr.Cond != "" {
+ rr.add(breakf("!(%s)", rr.Cond))
+ }
+ genResult(rr, arch, rr.Result, pos)
+ if *genLog {
+ rr.add(stmtf("logRule(%q)", rule.Loc))
+ }
+ fn.add(rr)
+ }
+ if rr.CanFail {
+ fn.add(stmtf("return false"))
+ }
+ genFile.add(fn)
+ }
+
+ // Generate block rewrite function. There are only a few block types
+ // so we can make this one function with a switch.
+ fn = &Func{Kind: "Block"}
+ fn.add(declf("config", "b.Func.Config"))
+ fn.add(declf("typ", "&b.Func.Config.Types"))
+
+ sw = &Switch{Expr: exprf("b.Kind")}
+ ops = ops[:0]
+ for op := range blockrules {
+ ops = append(ops, op)
+ }
+ sort.Strings(ops)
+ for _, op := range ops {
+ name, data := getBlockInfo(op, arch)
+ swc := &Case{Expr: exprf("%s", name)}
+ for _, rule := range blockrules[op] {
+ swc.add(genBlockRewrite(rule, arch, data))
+ }
+ sw.add(swc)
+ }
+ fn.add(sw)
+ fn.add(stmtf("return false"))
+ genFile.add(fn)
+
+ // Remove unused imports and variables.
+ buf := new(bytes.Buffer)
+ fprint(buf, genFile)
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "", buf, parser.ParseComments)
+ if err != nil {
+ filename := fmt.Sprintf("%s_broken.go", arch.name)
+ if err := ioutil.WriteFile(filename, buf.Bytes(), 0644); err != nil {
+ log.Printf("failed to dump broken code to %s: %v", filename, err)
+ } else {
+ log.Printf("dumped broken code to %s", filename)
+ }
+ log.Fatalf("failed to parse generated code for arch %s: %v", arch.name, err)
+ }
+ tfile := fset.File(file.Pos())
+
+ // First, use unusedInspector to find the unused declarations by their
+ // start position.
+ u := unusedInspector{unused: make(map[token.Pos]bool)}
+ u.node(file)
+
+ // Then, delete said nodes via astutil.Apply.
+ pre := func(c *astutil.Cursor) bool {
+ node := c.Node()
+ if node == nil {
+ return true
+ }
+ if u.unused[node.Pos()] {
+ c.Delete()
+ // Unused imports and declarations use exactly
+ // one line. Prevent leaving an empty line.
+ tfile.MergeLine(tfile.Position(node.Pos()).Line)
+ return false
+ }
+ return true
+ }
+ post := func(c *astutil.Cursor) bool {
+ switch node := c.Node().(type) {
+ case *ast.GenDecl:
+ if len(node.Specs) == 0 {
+ // Don't leave a broken or empty GenDecl behind,
+ // such as "import ()".
+ c.Delete()
+ }
+ }
+ return true
+ }
+ file = astutil.Apply(file, pre, post).(*ast.File)
+
+ // Write the well-formatted source to file
+ f, err := os.Create("../rewrite" + arch.name + suff + ".go")
+ if err != nil {
+ log.Fatalf("can't write output: %v", err)
+ }
+ defer f.Close()
+ // gofmt result; use a buffered writer, as otherwise go/format spends
+ // far too much time in syscalls.
+ bw := bufio.NewWriter(f)
+ if err := format.Node(bw, fset, file); err != nil {
+ log.Fatalf("can't format output: %v", err)
+ }
+ if err := bw.Flush(); err != nil {
+ log.Fatalf("can't write output: %v", err)
+ }
+ if err := f.Close(); err != nil {
+ log.Fatalf("can't write output: %v", err)
+ }
+}
+
+// unusedInspector can be used to detect unused variables and imports in an
+// ast.Node via its node method. The result is available in the "unused" map.
+//
+// note that unusedInspector is lazy and best-effort; it only supports the node
+// types and patterns used by the rulegen program.
+type unusedInspector struct {
+ // scope is the current scope, which can never be nil when a declaration
+ // is encountered. That is, the unusedInspector.node entrypoint should
+ // generally be an entire file or block.
+ scope *scope
+
+ // unused is the resulting set of unused declared names, indexed by the
+ // starting position of the node that declared the name.
+ unused map[token.Pos]bool
+
+ // defining is the object currently being defined; this is useful so
+ // that if "foo := bar" is unused and removed, we can then detect if
+ // "bar" becomes unused as well.
+ defining *object
+}
+
+// scoped opens a new scope when called, and returns a function which closes
+// that same scope. When a scope is closed, unused variables are recorded.
+func (u *unusedInspector) scoped() func() {
+ outer := u.scope
+ u.scope = &scope{outer: outer, objects: map[string]*object{}}
+ return func() {
+ for anyUnused := true; anyUnused; {
+ anyUnused = false
+ for _, obj := range u.scope.objects {
+ if obj.numUses > 0 {
+ continue
+ }
+ u.unused[obj.pos] = true
+ for _, used := range obj.used {
+ if used.numUses--; used.numUses == 0 {
+ anyUnused = true
+ }
+ }
+ // We've decremented numUses for each of the
+ // objects in used. Zero this slice too, to keep
+ // everything consistent.
+ obj.used = nil
+ }
+ }
+ u.scope = outer
+ }
+}
+
+func (u *unusedInspector) exprs(list []ast.Expr) {
+ for _, x := range list {
+ u.node(x)
+ }
+}
+
+func (u *unusedInspector) node(node ast.Node) {
+ switch node := node.(type) {
+ case *ast.File:
+ defer u.scoped()()
+ for _, decl := range node.Decls {
+ u.node(decl)
+ }
+ case *ast.GenDecl:
+ for _, spec := range node.Specs {
+ u.node(spec)
+ }
+ case *ast.ImportSpec:
+ impPath, _ := strconv.Unquote(node.Path.Value)
+ name := path.Base(impPath)
+ u.scope.objects[name] = &object{
+ name: name,
+ pos: node.Pos(),
+ }
+ case *ast.FuncDecl:
+ u.node(node.Type)
+ if node.Body != nil {
+ u.node(node.Body)
+ }
+ case *ast.FuncType:
+ if node.Params != nil {
+ u.node(node.Params)
+ }
+ if node.Results != nil {
+ u.node(node.Results)
+ }
+ case *ast.FieldList:
+ for _, field := range node.List {
+ u.node(field)
+ }
+ case *ast.Field:
+ u.node(node.Type)
+
+ // statements
+
+ case *ast.BlockStmt:
+ defer u.scoped()()
+ for _, stmt := range node.List {
+ u.node(stmt)
+ }
+ case *ast.DeclStmt:
+ u.node(node.Decl)
+ case *ast.IfStmt:
+ if node.Init != nil {
+ u.node(node.Init)
+ }
+ u.node(node.Cond)
+ u.node(node.Body)
+ if node.Else != nil {
+ u.node(node.Else)
+ }
+ case *ast.ForStmt:
+ if node.Init != nil {
+ u.node(node.Init)
+ }
+ if node.Cond != nil {
+ u.node(node.Cond)
+ }
+ if node.Post != nil {
+ u.node(node.Post)
+ }
+ u.node(node.Body)
+ case *ast.SwitchStmt:
+ if node.Init != nil {
+ u.node(node.Init)
+ }
+ if node.Tag != nil {
+ u.node(node.Tag)
+ }
+ u.node(node.Body)
+ case *ast.CaseClause:
+ u.exprs(node.List)
+ defer u.scoped()()
+ for _, stmt := range node.Body {
+ u.node(stmt)
+ }
+ case *ast.BranchStmt:
+ case *ast.ExprStmt:
+ u.node(node.X)
+ case *ast.AssignStmt:
+ if node.Tok != token.DEFINE {
+ u.exprs(node.Rhs)
+ u.exprs(node.Lhs)
+ break
+ }
+ lhs := node.Lhs
+ if len(lhs) == 2 && lhs[1].(*ast.Ident).Name == "_" {
+ lhs = lhs[:1]
+ }
+ if len(lhs) != 1 {
+ panic("no support for := with multiple names")
+ }
+
+ name := lhs[0].(*ast.Ident)
+ obj := &object{
+ name: name.Name,
+ pos: name.NamePos,
+ }
+
+ old := u.defining
+ u.defining = obj
+ u.exprs(node.Rhs)
+ u.defining = old
+
+ u.scope.objects[name.Name] = obj
+ case *ast.ReturnStmt:
+ u.exprs(node.Results)
+ case *ast.IncDecStmt:
+ u.node(node.X)
+
+ // expressions
+
+ case *ast.CallExpr:
+ u.node(node.Fun)
+ u.exprs(node.Args)
+ case *ast.SelectorExpr:
+ u.node(node.X)
+ case *ast.UnaryExpr:
+ u.node(node.X)
+ case *ast.BinaryExpr:
+ u.node(node.X)
+ u.node(node.Y)
+ case *ast.StarExpr:
+ u.node(node.X)
+ case *ast.ParenExpr:
+ u.node(node.X)
+ case *ast.IndexExpr:
+ u.node(node.X)
+ u.node(node.Index)
+ case *ast.TypeAssertExpr:
+ u.node(node.X)
+ u.node(node.Type)
+ case *ast.Ident:
+ if obj := u.scope.Lookup(node.Name); obj != nil {
+ obj.numUses++
+ if u.defining != nil {
+ u.defining.used = append(u.defining.used, obj)
+ }
+ }
+ case *ast.BasicLit:
+ case *ast.ValueSpec:
+ u.exprs(node.Values)
+ default:
+ panic(fmt.Sprintf("unhandled node: %T", node))
+ }
+}
+
+// scope keeps track of a certain scope and its declared names, as well as the
+// outer (parent) scope.
+type scope struct {
+ outer *scope // can be nil, if this is the top-level scope
+ objects map[string]*object // indexed by each declared name
+}
+
+func (s *scope) Lookup(name string) *object {
+ if obj := s.objects[name]; obj != nil {
+ return obj
+ }
+ if s.outer == nil {
+ return nil
+ }
+ return s.outer.Lookup(name)
+}
+
+// object keeps track of a declared name, such as a variable or import.
+type object struct {
+ name string
+ pos token.Pos // start position of the node declaring the object
+
+ numUses int // number of times this object is used
+ used []*object // objects that its declaration makes use of
+}
+
+func fprint(w io.Writer, n Node) {
+ switch n := n.(type) {
+ case *File:
+ file := n
+ seenRewrite := make(map[[3]string]string)
+ fmt.Fprintf(w, "// Code generated from gen/%s%s.rules; DO NOT EDIT.\n", n.Arch.name, n.Suffix)
+ fmt.Fprintf(w, "// generated with: cd gen; go run *.go\n")
+ fmt.Fprintf(w, "\npackage ssa\n")
+ for _, path := range append([]string{
+ "fmt",
+ "math",
+ "cmd/internal/obj",
+ "cmd/internal/objabi",
+ "cmd/compile/internal/types",
+ }, n.Arch.imports...) {
+ fmt.Fprintf(w, "import %q\n", path)
+ }
+ for _, f := range n.List {
+ f := f.(*Func)
+ fmt.Fprintf(w, "func rewrite%s%s%s%s(", f.Kind, n.Arch.name, n.Suffix, f.Suffix)
+ fmt.Fprintf(w, "%c *%s) bool {\n", strings.ToLower(f.Kind)[0], f.Kind)
+ if f.Kind == "Value" && f.ArgLen > 0 {
+ for i := f.ArgLen - 1; i >= 0; i-- {
+ fmt.Fprintf(w, "v_%d := v.Args[%d]\n", i, i)
+ }
+ }
+ for _, n := range f.List {
+ fprint(w, n)
+
+ if rr, ok := n.(*RuleRewrite); ok {
+ k := [3]string{
+ normalizeMatch(rr.Match, file.Arch),
+ normalizeWhitespace(rr.Cond),
+ normalizeWhitespace(rr.Result),
+ }
+ if prev, ok := seenRewrite[k]; ok {
+ log.Fatalf("duplicate rule %s, previously seen at %s\n", rr.Loc, prev)
+ }
+ seenRewrite[k] = rr.Loc
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ case *Switch:
+ fmt.Fprintf(w, "switch ")
+ fprint(w, n.Expr)
+ fmt.Fprintf(w, " {\n")
+ for _, n := range n.List {
+ fprint(w, n)
+ }
+ fmt.Fprintf(w, "}\n")
+ case *Case:
+ fmt.Fprintf(w, "case ")
+ fprint(w, n.Expr)
+ fmt.Fprintf(w, ":\n")
+ for _, n := range n.List {
+ fprint(w, n)
+ }
+ case *RuleRewrite:
+ if *addLine {
+ fmt.Fprintf(w, "// %s\n", n.Loc)
+ }
+ fmt.Fprintf(w, "// match: %s\n", n.Match)
+ if n.Cond != "" {
+ fmt.Fprintf(w, "// cond: %s\n", n.Cond)
+ }
+ fmt.Fprintf(w, "// result: %s\n", n.Result)
+ fmt.Fprintf(w, "for %s {\n", n.Check)
+ nCommutative := 0
+ for _, n := range n.List {
+ if b, ok := n.(*CondBreak); ok {
+ b.InsideCommuteLoop = nCommutative > 0
+ }
+ fprint(w, n)
+ if loop, ok := n.(StartCommuteLoop); ok {
+ if nCommutative != loop.Depth {
+ panic("mismatch commute loop depth")
+ }
+ nCommutative++
+ }
+ }
+ fmt.Fprintf(w, "return true\n")
+ for i := 0; i < nCommutative; i++ {
+ fmt.Fprintln(w, "}")
+ }
+ if n.CommuteDepth > 0 && n.CanFail {
+ fmt.Fprint(w, "break\n")
+ }
+ fmt.Fprintf(w, "}\n")
+ case *Declare:
+ fmt.Fprintf(w, "%s := ", n.Name)
+ fprint(w, n.Value)
+ fmt.Fprintln(w)
+ case *CondBreak:
+ fmt.Fprintf(w, "if ")
+ fprint(w, n.Cond)
+ fmt.Fprintf(w, " {\n")
+ if n.InsideCommuteLoop {
+ fmt.Fprintf(w, "continue")
+ } else {
+ fmt.Fprintf(w, "break")
+ }
+ fmt.Fprintf(w, "\n}\n")
+ case ast.Node:
+ printConfig.Fprint(w, emptyFset, n)
+ if _, ok := n.(ast.Stmt); ok {
+ fmt.Fprintln(w)
+ }
+ case StartCommuteLoop:
+ fmt.Fprintf(w, "for _i%[1]d := 0; _i%[1]d <= 1; _i%[1]d, %[2]s_0, %[2]s_1 = _i%[1]d + 1, %[2]s_1, %[2]s_0 {\n", n.Depth, n.V)
+ default:
+ log.Fatalf("cannot print %T", n)
+ }
+}
+
+var printConfig = printer.Config{
+ Mode: printer.RawFormat, // we use go/format later, so skip work here
+}
+
+var emptyFset = token.NewFileSet()
+
+// Node can be a Statement or an ast.Expr.
+type Node interface{}
+
+// Statement can be one of our high-level statement struct types, or an
+// ast.Stmt under some limited circumstances.
+type Statement interface{}
+
+// BodyBase is shared by all of our statement pseudo-node types which can
+// contain other statements.
+type BodyBase struct {
+ List []Statement
+ CanFail bool
+}
+
+func (w *BodyBase) add(node Statement) {
+ var last Statement
+ if len(w.List) > 0 {
+ last = w.List[len(w.List)-1]
+ }
+ if node, ok := node.(*CondBreak); ok {
+ w.CanFail = true
+ if last, ok := last.(*CondBreak); ok {
+ // Add to the previous "if <cond> { break }" via a
+ // logical OR, which will save verbosity.
+ last.Cond = &ast.BinaryExpr{
+ Op: token.LOR,
+ X: last.Cond,
+ Y: node.Cond,
+ }
+ return
+ }
+ }
+
+ w.List = append(w.List, node)
+}
+
+// predeclared contains globally known tokens that should not be redefined.
+var predeclared = map[string]bool{
+ "nil": true,
+ "false": true,
+ "true": true,
+}
+
+// declared reports if the body contains a Declare with the given name.
+func (w *BodyBase) declared(name string) bool {
+ if predeclared[name] {
+ // Treat predeclared names as having already been declared.
+ // This lets us use nil to match an aux field or
+ // true and false to match an auxint field.
+ return true
+ }
+ for _, s := range w.List {
+ if decl, ok := s.(*Declare); ok && decl.Name == name {
+ return true
+ }
+ }
+ return false
+}
+
+// These types define some high-level statement struct types, which can be used
+// as a Statement. This allows us to keep some node structs simpler, and have
+// higher-level nodes such as an entire rule rewrite.
+//
+// Note that ast.Expr is always used as-is; we don't declare our own expression
+// nodes.
+type (
+ File struct {
+ BodyBase // []*Func
+ Arch arch
+ Suffix string
+ }
+ Func struct {
+ BodyBase
+ Kind string // "Value" or "Block"
+ Suffix string
+ ArgLen int32 // if kind == "Value", number of args for this op
+ }
+ Switch struct {
+ BodyBase // []*Case
+ Expr ast.Expr
+ }
+ Case struct {
+ BodyBase
+ Expr ast.Expr
+ }
+ RuleRewrite struct {
+ BodyBase
+ Match, Cond, Result string // top comments
+ Check string // top-level boolean expression
+
+ Alloc int // for unique var names
+ Loc string // file name & line number of the original rule
+ CommuteDepth int // used to track depth of commute loops
+ }
+ Declare struct {
+ Name string
+ Value ast.Expr
+ }
+ CondBreak struct {
+ Cond ast.Expr
+ InsideCommuteLoop bool
+ }
+ StartCommuteLoop struct {
+ Depth int
+ V string
+ }
+)
+
+// exprf parses a Go expression generated from fmt.Sprintf, panicking if an
+// error occurs.
+func exprf(format string, a ...interface{}) ast.Expr {
+ src := fmt.Sprintf(format, a...)
+ expr, err := parser.ParseExpr(src)
+ if err != nil {
+ log.Fatalf("expr parse error on %q: %v", src, err)
+ }
+ return expr
+}
+
+// stmtf parses a Go statement generated from fmt.Sprintf. This function is only
+// meant for simple statements that don't have a custom Statement node declared
+// in this package, such as ast.ReturnStmt or ast.ExprStmt.
+func stmtf(format string, a ...interface{}) Statement {
+ src := fmt.Sprintf(format, a...)
+ fsrc := "package p\nfunc _() {\n" + src + "\n}\n"
+ file, err := parser.ParseFile(token.NewFileSet(), "", fsrc, 0)
+ if err != nil {
+ log.Fatalf("stmt parse error on %q: %v", src, err)
+ }
+ return file.Decls[0].(*ast.FuncDecl).Body.List[0]
+}
+
+// declf constructs a simple "name := value" declaration, using exprf for its
+// value.
+func declf(name, format string, a ...interface{}) *Declare {
+ return &Declare{name, exprf(format, a...)}
+}
+
+// breakf constructs a simple "if cond { break }" statement, using exprf for its
+// condition.
+func breakf(format string, a ...interface{}) *CondBreak {
+ return &CondBreak{Cond: exprf(format, a...)}
+}
+
+func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
+ rr := &RuleRewrite{Loc: rule.Loc}
+ rr.Match, rr.Cond, rr.Result = rule.parse()
+ _, _, auxint, aux, s := extract(rr.Match) // remove parens, then split
+
+ // check match of control values
+ if len(s) < data.controls {
+ log.Fatalf("incorrect number of arguments in %s, got %v wanted at least %v", rule, len(s), data.controls)
+ }
+ controls := s[:data.controls]
+ pos := make([]string, data.controls)
+ for i, arg := range controls {
+ cname := fmt.Sprintf("b.Controls[%v]", i)
+ if strings.Contains(arg, "(") {
+ vname, expr := splitNameExpr(arg)
+ if vname == "" {
+ vname = fmt.Sprintf("v_%v", i)
+ }
+ rr.add(declf(vname, cname))
+ p, op := genMatch0(rr, arch, expr, vname, nil, false) // TODO: pass non-nil cnt?
+ if op != "" {
+ check := fmt.Sprintf("%s.Op == %s", cname, op)
+ if rr.Check == "" {
+ rr.Check = check
+ } else {
+ rr.Check += " && " + check
+ }
+ }
+ if p == "" {
+ p = vname + ".Pos"
+ }
+ pos[i] = p
+ } else {
+ rr.add(declf(arg, cname))
+ pos[i] = arg + ".Pos"
+ }
+ }
+ for _, e := range []struct {
+ name, field, dclType string
+ }{
+ {auxint, "AuxInt", data.auxIntType()},
+ {aux, "Aux", data.auxType()},
+ } {
+ if e.name == "" {
+ continue
+ }
+
+ if e.dclType == "" {
+ log.Fatalf("op %s has no declared type for %s", data.name, e.field)
+ }
+ if !token.IsIdentifier(e.name) || rr.declared(e.name) {
+ rr.add(breakf("%sTo%s(b.%s) != %s", unTitle(e.field), title(e.dclType), e.field, e.name))
+ } else {
+ rr.add(declf(e.name, "%sTo%s(b.%s)", unTitle(e.field), title(e.dclType), e.field))
+ }
+ }
+ if rr.Cond != "" {
+ rr.add(breakf("!(%s)", rr.Cond))
+ }
+
+ // Rule matches. Generate result.
+ outop, _, auxint, aux, t := extract(rr.Result) // remove parens, then split
+ blockName, outdata := getBlockInfo(outop, arch)
+ if len(t) < outdata.controls {
+ log.Fatalf("incorrect number of output arguments in %s, got %v wanted at least %v", rule, len(s), outdata.controls)
+ }
+
+ // Check if newsuccs is the same set as succs.
+ succs := s[data.controls:]
+ newsuccs := t[outdata.controls:]
+ m := map[string]bool{}
+ for _, succ := range succs {
+ if m[succ] {
+ log.Fatalf("can't have a repeat successor name %s in %s", succ, rule)
+ }
+ m[succ] = true
+ }
+ for _, succ := range newsuccs {
+ if !m[succ] {
+ log.Fatalf("unknown successor %s in %s", succ, rule)
+ }
+ delete(m, succ)
+ }
+ if len(m) != 0 {
+ log.Fatalf("unmatched successors %v in %s", m, rule)
+ }
+
+ var genControls [2]string
+ for i, control := range t[:outdata.controls] {
+ // Select a source position for any new control values.
+ // TODO: does it always make sense to use the source position
+ // of the original control values or should we be using the
+ // block's source position in some cases?
+ newpos := "b.Pos" // default to block's source position
+ if i < len(pos) && pos[i] != "" {
+ // Use the previous control value's source position.
+ newpos = pos[i]
+ }
+
+ // Generate a new control value (or copy an existing value).
+ genControls[i] = genResult0(rr, arch, control, false, false, newpos, nil)
+ }
+ switch outdata.controls {
+ case 0:
+ rr.add(stmtf("b.Reset(%s)", blockName))
+ case 1:
+ rr.add(stmtf("b.resetWithControl(%s, %s)", blockName, genControls[0]))
+ case 2:
+ rr.add(stmtf("b.resetWithControl2(%s, %s, %s)", blockName, genControls[0], genControls[1]))
+ default:
+ log.Fatalf("too many controls: %d", outdata.controls)
+ }
+
+ if auxint != "" {
+ // Make sure auxint value has the right type.
+ rr.add(stmtf("b.AuxInt = %sToAuxInt(%s)", unTitle(outdata.auxIntType()), auxint))
+ }
+ if aux != "" {
+ // Make sure aux value has the right type.
+ rr.add(stmtf("b.Aux = %sToAux(%s)", unTitle(outdata.auxType()), aux))
+ }
+
+ succChanged := false
+ for i := 0; i < len(succs); i++ {
+ if succs[i] != newsuccs[i] {
+ succChanged = true
+ }
+ }
+ if succChanged {
+ if len(succs) != 2 {
+ log.Fatalf("changed successors, len!=2 in %s", rule)
+ }
+ if succs[0] != newsuccs[1] || succs[1] != newsuccs[0] {
+ log.Fatalf("can only handle swapped successors in %s", rule)
+ }
+ rr.add(stmtf("b.swapSuccessors()"))
+ }
+
+ if *genLog {
+ rr.add(stmtf("logRule(%q)", rule.Loc))
+ }
+ return rr
+}
+
+// genMatch returns the variable whose source position should be used for the
+// result (or "" if no opinion), and a boolean that reports whether the match can fail.
+func genMatch(rr *RuleRewrite, arch arch, match string, pregenTop bool) (pos, checkOp string) {
+ cnt := varCount(rr)
+ return genMatch0(rr, arch, match, "v", cnt, pregenTop)
+}
+
+func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int, pregenTop bool) (pos, checkOp string) {
+ if match[0] != '(' || match[len(match)-1] != ')' {
+ log.Fatalf("%s: non-compound expr in genMatch0: %q", rr.Loc, match)
+ }
+ op, oparch, typ, auxint, aux, args := parseValue(match, arch, rr.Loc)
+
+ checkOp = fmt.Sprintf("Op%s%s", oparch, op.name)
+
+ if op.faultOnNilArg0 || op.faultOnNilArg1 {
+ // Prefer the position of an instruction which could fault.
+ pos = v + ".Pos"
+ }
+
+ // If the last argument is ___, it means "don't care about trailing arguments, really"
+ // The likely/intended use is for rewrites that are too tricky to express in the existing pattern language
+ // Do a length check early because long patterns fed short (ultimately not-matching) inputs will
+ // do an indexing error in pattern-matching.
+ if op.argLength == -1 {
+ l := len(args)
+ if l == 0 || args[l-1] != "___" {
+ rr.add(breakf("len(%s.Args) != %d", v, l))
+ } else if l > 1 && args[l-1] == "___" {
+ rr.add(breakf("len(%s.Args) < %d", v, l-1))
+ }
+ }
+
+ for _, e := range []struct {
+ name, field, dclType string
+ }{
+ {typ, "Type", "*types.Type"},
+ {auxint, "AuxInt", op.auxIntType()},
+ {aux, "Aux", op.auxType()},
+ } {
+ if e.name == "" {
+ continue
+ }
+
+ if e.dclType == "" {
+ log.Fatalf("op %s has no declared type for %s", op.name, e.field)
+ }
+ if !token.IsIdentifier(e.name) || rr.declared(e.name) {
+ switch e.field {
+ case "Aux":
+ rr.add(breakf("auxTo%s(%s.%s) != %s", title(e.dclType), v, e.field, e.name))
+ case "AuxInt":
+ rr.add(breakf("auxIntTo%s(%s.%s) != %s", title(e.dclType), v, e.field, e.name))
+ case "Type":
+ rr.add(breakf("%s.%s != %s", v, e.field, e.name))
+ }
+ } else {
+ switch e.field {
+ case "Aux":
+ rr.add(declf(e.name, "auxTo%s(%s.%s)", title(e.dclType), v, e.field))
+ case "AuxInt":
+ rr.add(declf(e.name, "auxIntTo%s(%s.%s)", title(e.dclType), v, e.field))
+ case "Type":
+ rr.add(declf(e.name, "%s.%s", v, e.field))
+ }
+ }
+ }
+
+ commutative := op.commutative
+ if commutative {
+ if args[0] == args[1] {
+ // When we have (Add x x), for any x,
+ // even if there are other uses of x besides these two,
+ // and even if x is not a variable,
+ // we can skip the commutative match.
+ commutative = false
+ }
+ if cnt[args[0]] == 1 && cnt[args[1]] == 1 {
+ // When we have (Add x y) with no other uses
+ // of x and y in the matching rule and condition,
+ // then we can skip the commutative match (Add y x).
+ commutative = false
+ }
+ }
+
+ if !pregenTop {
+ // Access last argument first to minimize bounds checks.
+ for n := len(args) - 1; n > 0; n-- {
+ a := args[n]
+ if a == "_" {
+ continue
+ }
+ if !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) {
+ rr.add(declf(a, "%s.Args[%d]", v, n))
+ // delete the last argument so it is not reprocessed
+ args = args[:n]
+ } else {
+ rr.add(stmtf("_ = %s.Args[%d]", v, n))
+ }
+ break
+ }
+ }
+ if commutative && !pregenTop {
+ for i := 0; i <= 1; i++ {
+ vname := fmt.Sprintf("%s_%d", v, i)
+ rr.add(declf(vname, "%s.Args[%d]", v, i))
+ }
+ }
+ if commutative {
+ rr.add(StartCommuteLoop{rr.CommuteDepth, v})
+ rr.CommuteDepth++
+ }
+ for i, arg := range args {
+ if arg == "_" {
+ continue
+ }
+ var rhs string
+ if (commutative && i < 2) || pregenTop {
+ rhs = fmt.Sprintf("%s_%d", v, i)
+ } else {
+ rhs = fmt.Sprintf("%s.Args[%d]", v, i)
+ }
+ if !strings.Contains(arg, "(") {
+ // leaf variable
+ if rr.declared(arg) {
+ // variable already has a definition. Check whether
+ // the old definition and the new definition match.
+ // For example, (add x x). Equality is just pointer equality
+ // on Values (so cse is important to do before lowering).
+ rr.add(breakf("%s != %s", arg, rhs))
+ } else {
+ if arg != rhs {
+ rr.add(declf(arg, "%s", rhs))
+ }
+ }
+ continue
+ }
+ // compound sexpr
+ argname, expr := splitNameExpr(arg)
+ if argname == "" {
+ argname = fmt.Sprintf("%s_%d", v, i)
+ }
+ if argname == "b" {
+ log.Fatalf("don't name args 'b', it is ambiguous with blocks")
+ }
+
+ if argname != rhs {
+ rr.add(declf(argname, "%s", rhs))
+ }
+ bexpr := exprf("%s.Op != addLater", argname)
+ rr.add(&CondBreak{Cond: bexpr})
+ argPos, argCheckOp := genMatch0(rr, arch, expr, argname, cnt, false)
+ bexpr.(*ast.BinaryExpr).Y.(*ast.Ident).Name = argCheckOp
+
+ if argPos != "" {
+ // Keep the argument in preference to the parent, as the
+ // argument is normally earlier in program flow.
+ // Keep the argument in preference to an earlier argument,
+ // as that prefers the memory argument which is also earlier
+ // in the program flow.
+ pos = argPos
+ }
+ }
+
+ return pos, checkOp
+}
+
+func genResult(rr *RuleRewrite, arch arch, result, pos string) {
+ move := result[0] == '@'
+ if move {
+ // parse @block directive
+ s := strings.SplitN(result[1:], " ", 2)
+ rr.add(stmtf("b = %s", s[0]))
+ result = s[1]
+ }
+ cse := make(map[string]string)
+ genResult0(rr, arch, result, true, move, pos, cse)
+}
+
+func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos string, cse map[string]string) string {
+ resname, expr := splitNameExpr(result)
+ result = expr
+ // TODO: when generating a constant result, use f.constVal to avoid
+ // introducing copies just to clean them up again.
+ if result[0] != '(' {
+ // variable
+ if top {
+ // It in not safe in general to move a variable between blocks
+ // (and particularly not a phi node).
+ // Introduce a copy.
+ rr.add(stmtf("v.copyOf(%s)", result))
+ }
+ return result
+ }
+
+ w := normalizeWhitespace(result)
+ if prev := cse[w]; prev != "" {
+ return prev
+ }
+
+ op, oparch, typ, auxint, aux, args := parseValue(result, arch, rr.Loc)
+
+ // Find the type of the variable.
+ typeOverride := typ != ""
+ if typ == "" && op.typ != "" {
+ typ = typeName(op.typ)
+ }
+
+ v := "v"
+ if top && !move {
+ rr.add(stmtf("v.reset(Op%s%s)", oparch, op.name))
+ if typeOverride {
+ rr.add(stmtf("v.Type = %s", typ))
+ }
+ } else {
+ if typ == "" {
+ log.Fatalf("sub-expression %s (op=Op%s%s) at %s must have a type", result, oparch, op.name, rr.Loc)
+ }
+ if resname == "" {
+ v = fmt.Sprintf("v%d", rr.Alloc)
+ } else {
+ v = resname
+ }
+ rr.Alloc++
+ rr.add(declf(v, "b.NewValue0(%s, Op%s%s, %s)", pos, oparch, op.name, typ))
+ if move && top {
+ // Rewrite original into a copy
+ rr.add(stmtf("v.copyOf(%s)", v))
+ }
+ }
+
+ if auxint != "" {
+ // Make sure auxint value has the right type.
+ rr.add(stmtf("%s.AuxInt = %sToAuxInt(%s)", v, unTitle(op.auxIntType()), auxint))
+ }
+ if aux != "" {
+ // Make sure aux value has the right type.
+ rr.add(stmtf("%s.Aux = %sToAux(%s)", v, unTitle(op.auxType()), aux))
+ }
+ all := new(strings.Builder)
+ for i, arg := range args {
+ x := genResult0(rr, arch, arg, false, move, pos, cse)
+ if i > 0 {
+ all.WriteString(", ")
+ }
+ all.WriteString(x)
+ }
+ switch len(args) {
+ case 0:
+ case 1:
+ rr.add(stmtf("%s.AddArg(%s)", v, all.String()))
+ default:
+ rr.add(stmtf("%s.AddArg%d(%s)", v, len(args), all.String()))
+ }
+
+ if cse != nil {
+ cse[w] = v
+ }
+ return v
+}
+
+func split(s string) []string {
+ var r []string
+
+outer:
+ for s != "" {
+ d := 0 // depth of ({[<
+ var open, close byte // opening and closing markers ({[< or )}]>
+ nonsp := false // found a non-space char so far
+ for i := 0; i < len(s); i++ {
+ switch {
+ case d == 0 && s[i] == '(':
+ open, close = '(', ')'
+ d++
+ case d == 0 && s[i] == '<':
+ open, close = '<', '>'
+ d++
+ case d == 0 && s[i] == '[':
+ open, close = '[', ']'
+ d++
+ case d == 0 && s[i] == '{':
+ open, close = '{', '}'
+ d++
+ case d == 0 && (s[i] == ' ' || s[i] == '\t'):
+ if nonsp {
+ r = append(r, strings.TrimSpace(s[:i]))
+ s = s[i:]
+ continue outer
+ }
+ case d > 0 && s[i] == open:
+ d++
+ case d > 0 && s[i] == close:
+ d--
+ default:
+ nonsp = true
+ }
+ }
+ if d != 0 {
+ log.Fatalf("imbalanced expression: %q", s)
+ }
+ if nonsp {
+ r = append(r, strings.TrimSpace(s))
+ }
+ break
+ }
+ return r
+}
+
+// isBlock reports whether this op is a block opcode.
+func isBlock(name string, arch arch) bool {
+ for _, b := range genericBlocks {
+ if b.name == name {
+ return true
+ }
+ }
+ for _, b := range arch.blocks {
+ if b.name == name {
+ return true
+ }
+ }
+ return false
+}
+
+func extract(val string) (op, typ, auxint, aux string, args []string) {
+ val = val[1 : len(val)-1] // remove ()
+
+ // Split val up into regions.
+ // Split by spaces/tabs, except those contained in (), {}, [], or <>.
+ s := split(val)
+
+ // Extract restrictions and args.
+ op = s[0]
+ for _, a := range s[1:] {
+ switch a[0] {
+ case '<':
+ typ = a[1 : len(a)-1] // remove <>
+ case '[':
+ auxint = a[1 : len(a)-1] // remove []
+ case '{':
+ aux = a[1 : len(a)-1] // remove {}
+ default:
+ args = append(args, a)
+ }
+ }
+ return
+}
+
+// parseValue parses a parenthesized value from a rule.
+// The value can be from the match or the result side.
+// It returns the op and unparsed strings for typ, auxint, and aux restrictions and for all args.
+// oparch is the architecture that op is located in, or "" for generic.
+func parseValue(val string, arch arch, loc string) (op opData, oparch, typ, auxint, aux string, args []string) {
+ // Resolve the op.
+ var s string
+ s, typ, auxint, aux, args = extract(val)
+
+ // match reports whether x is a good op to select.
+ // If strict is true, rule generation might succeed.
+ // If strict is false, rule generation has failed,
+ // but we're trying to generate a useful error.
+ // Doing strict=true then strict=false allows
+ // precise op matching while retaining good error messages.
+ match := func(x opData, strict bool, archname string) bool {
+ if x.name != s {
+ return false
+ }
+ if x.argLength != -1 && int(x.argLength) != len(args) && (len(args) != 1 || args[0] != "...") {
+ if strict {
+ return false
+ }
+ log.Printf("%s: op %s (%s) should have %d args, has %d", loc, s, archname, x.argLength, len(args))
+ }
+ return true
+ }
+
+ for _, x := range genericOps {
+ if match(x, true, "generic") {
+ op = x
+ break
+ }
+ }
+ for _, x := range arch.ops {
+ if arch.name != "generic" && match(x, true, arch.name) {
+ if op.name != "" {
+ log.Fatalf("%s: matches for op %s found in both generic and %s", loc, op.name, arch.name)
+ }
+ op = x
+ oparch = arch.name
+ break
+ }
+ }
+
+ if op.name == "" {
+ // Failed to find the op.
+ // Run through everything again with strict=false
+ // to generate useful diagnosic messages before failing.
+ for _, x := range genericOps {
+ match(x, false, "generic")
+ }
+ for _, x := range arch.ops {
+ match(x, false, arch.name)
+ }
+ log.Fatalf("%s: unknown op %s", loc, s)
+ }
+
+ // Sanity check aux, auxint.
+ if auxint != "" && !opHasAuxInt(op) {
+ log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
+ }
+ if aux != "" && !opHasAux(op) {
+ log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
+ }
+ return
+}
+
+func opHasAuxInt(op opData) bool {
+ switch op.aux {
+ case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "UInt8", "Float32", "Float64",
+ "SymOff", "CallOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop":
+ return true
+ }
+ return false
+}
+
+func opHasAux(op opData) bool {
+ switch op.aux {
+ case "String", "Sym", "SymOff", "Call", "CallOff", "SymValAndOff", "Typ", "TypSize",
+ "S390XCCMask", "S390XRotateParams":
+ return true
+ }
+ return false
+}
+
+// splitNameExpr splits s-expr arg, possibly prefixed by "name:",
+// into name and the unprefixed expression.
+// For example, "x:(Foo)" yields "x", "(Foo)",
+// and "(Foo)" yields "", "(Foo)".
+func splitNameExpr(arg string) (name, expr string) {
+ colon := strings.Index(arg, ":")
+ if colon < 0 {
+ return "", arg
+ }
+ openparen := strings.Index(arg, "(")
+ if openparen < 0 {
+ log.Fatalf("splitNameExpr(%q): colon but no open parens", arg)
+ }
+ if colon > openparen {
+ // colon is inside the parens, such as in "(Foo x:(Bar))".
+ return "", arg
+ }
+ return arg[:colon], arg[colon+1:]
+}
+
+func getBlockInfo(op string, arch arch) (name string, data blockData) {
+ for _, b := range genericBlocks {
+ if b.name == op {
+ return "Block" + op, b
+ }
+ }
+ for _, b := range arch.blocks {
+ if b.name == op {
+ return "Block" + arch.name + op, b
+ }
+ }
+ log.Fatalf("could not find block data for %s", op)
+ panic("unreachable")
+}
+
+// typeName returns the string to use to generate a type.
+func typeName(typ string) string {
+ if typ[0] == '(' {
+ ts := strings.Split(typ[1:len(typ)-1], ",")
+ if len(ts) != 2 {
+ log.Fatalf("Tuple expect 2 arguments")
+ }
+ return "types.NewTuple(" + typeName(ts[0]) + ", " + typeName(ts[1]) + ")"
+ }
+ switch typ {
+ case "Flags", "Mem", "Void", "Int128":
+ return "types.Type" + typ
+ default:
+ return "typ." + typ
+ }
+}
+
+// balance returns the number of unclosed '(' characters in s.
+// If a ')' appears without a corresponding '(', balance returns -1.
+func balance(s string) int {
+ balance := 0
+ for _, c := range s {
+ switch c {
+ case '(':
+ balance++
+ case ')':
+ balance--
+ if balance < 0 {
+ // don't allow ")(" to return 0
+ return -1
+ }
+ }
+ }
+ return balance
+}
+
+// findAllOpcode is a function to find the opcode portion of s-expressions.
+var findAllOpcode = regexp.MustCompile(`[(](\w+[|])+\w+[)]`).FindAllStringIndex
+
+// excludeFromExpansion reports whether the substring s[idx[0]:idx[1]] in a rule
+// should be disregarded as a candidate for | expansion.
+// It uses simple syntactic checks to see whether the substring
+// is inside an AuxInt expression or inside the && conditions.
+func excludeFromExpansion(s string, idx []int) bool {
+ left := s[:idx[0]]
+ if strings.LastIndexByte(left, '[') > strings.LastIndexByte(left, ']') {
+ // Inside an AuxInt expression.
+ return true
+ }
+ right := s[idx[1]:]
+ if strings.Contains(left, "&&") && strings.Contains(right, "=>") {
+ // Inside && conditions.
+ return true
+ }
+ return false
+}
+
+// expandOr converts a rule into multiple rules by expanding | ops.
+func expandOr(r string) []string {
+ // Find every occurrence of |-separated things.
+ // They look like MOV(B|W|L|Q|SS|SD)load or MOV(Q|L)loadidx(1|8).
+ // Generate rules selecting one case from each |-form.
+
+ // Count width of |-forms. They must match.
+ n := 1
+ for _, idx := range findAllOpcode(r, -1) {
+ if excludeFromExpansion(r, idx) {
+ continue
+ }
+ s := r[idx[0]:idx[1]]
+ c := strings.Count(s, "|") + 1
+ if c == 1 {
+ continue
+ }
+ if n > 1 && n != c {
+ log.Fatalf("'|' count doesn't match in %s: both %d and %d\n", r, n, c)
+ }
+ n = c
+ }
+ if n == 1 {
+ // No |-form in this rule.
+ return []string{r}
+ }
+ // Build each new rule.
+ res := make([]string, n)
+ for i := 0; i < n; i++ {
+ buf := new(strings.Builder)
+ x := 0
+ for _, idx := range findAllOpcode(r, -1) {
+ if excludeFromExpansion(r, idx) {
+ continue
+ }
+ buf.WriteString(r[x:idx[0]]) // write bytes we've skipped over so far
+ s := r[idx[0]+1 : idx[1]-1] // remove leading "(" and trailing ")"
+ buf.WriteString(strings.Split(s, "|")[i]) // write the op component for this rule
+ x = idx[1] // note that we've written more bytes
+ }
+ buf.WriteString(r[x:])
+ res[i] = buf.String()
+ }
+ return res
+}
+
+// varCount returns a map which counts the number of occurrences of
+// Value variables in the s-expression rr.Match and the Go expression rr.Cond.
+func varCount(rr *RuleRewrite) map[string]int {
+ cnt := map[string]int{}
+ varCount1(rr.Loc, rr.Match, cnt)
+ if rr.Cond != "" {
+ expr, err := parser.ParseExpr(rr.Cond)
+ if err != nil {
+ log.Fatalf("%s: failed to parse cond %q: %v", rr.Loc, rr.Cond, err)
+ }
+ ast.Inspect(expr, func(n ast.Node) bool {
+ if id, ok := n.(*ast.Ident); ok {
+ cnt[id.Name]++
+ }
+ return true
+ })
+ }
+ return cnt
+}
+
+func varCount1(loc, m string, cnt map[string]int) {
+ if m[0] == '<' || m[0] == '[' || m[0] == '{' {
+ return
+ }
+ if token.IsIdentifier(m) {
+ cnt[m]++
+ return
+ }
+ // Split up input.
+ name, expr := splitNameExpr(m)
+ if name != "" {
+ cnt[name]++
+ }
+ if expr[0] != '(' || expr[len(expr)-1] != ')' {
+ log.Fatalf("%s: non-compound expr in varCount1: %q", loc, expr)
+ }
+ s := split(expr[1 : len(expr)-1])
+ for _, arg := range s[1:] {
+ varCount1(loc, arg, cnt)
+ }
+}
+
+// normalizeWhitespace replaces 2+ whitespace sequences with a single space.
+func normalizeWhitespace(x string) string {
+ x = strings.Join(strings.Fields(x), " ")
+ x = strings.Replace(x, "( ", "(", -1)
+ x = strings.Replace(x, " )", ")", -1)
+ x = strings.Replace(x, "[ ", "[", -1)
+ x = strings.Replace(x, " ]", "]", -1)
+ x = strings.Replace(x, ")=>", ") =>", -1)
+ return x
+}
+
+// opIsCommutative reports whether op s is commutative.
+func opIsCommutative(op string, arch arch) bool {
+ for _, x := range genericOps {
+ if op == x.name {
+ if x.commutative {
+ return true
+ }
+ break
+ }
+ }
+ if arch.name != "generic" {
+ for _, x := range arch.ops {
+ if op == x.name {
+ if x.commutative {
+ return true
+ }
+ break
+ }
+ }
+ }
+ return false
+}
+
+func normalizeMatch(m string, arch arch) string {
+ if token.IsIdentifier(m) {
+ return m
+ }
+ op, typ, auxint, aux, args := extract(m)
+ if opIsCommutative(op, arch) {
+ if args[1] < args[0] {
+ args[0], args[1] = args[1], args[0]
+ }
+ }
+ s := new(strings.Builder)
+ fmt.Fprintf(s, "%s <%s> [%s] {%s}", op, typ, auxint, aux)
+ for _, arg := range args {
+ prefix, expr := splitNameExpr(arg)
+ fmt.Fprint(s, " ", prefix, normalizeMatch(expr, arch))
+ }
+ return s.String()
+}
+
+func parseEllipsisRules(rules []Rule, arch arch) (newop string, ok bool) {
+ if len(rules) != 1 {
+ for _, r := range rules {
+ if strings.Contains(r.Rule, "...") {
+ log.Fatalf("%s: found ellipsis in rule, but there are other rules with the same op", r.Loc)
+ }
+ }
+ return "", false
+ }
+ rule := rules[0]
+ match, cond, result := rule.parse()
+ if cond != "" || !isEllipsisValue(match) || !isEllipsisValue(result) {
+ if strings.Contains(rule.Rule, "...") {
+ log.Fatalf("%s: found ellipsis in non-ellipsis rule", rule.Loc)
+ }
+ checkEllipsisRuleCandidate(rule, arch)
+ return "", false
+ }
+ op, oparch, _, _, _, _ := parseValue(result, arch, rule.Loc)
+ return fmt.Sprintf("Op%s%s", oparch, op.name), true
+}
+
+// isEllipsisValue reports whether s is of the form (OpX ...).
+func isEllipsisValue(s string) bool {
+ if len(s) < 2 || s[0] != '(' || s[len(s)-1] != ')' {
+ return false
+ }
+ c := split(s[1 : len(s)-1])
+ if len(c) != 2 || c[1] != "..." {
+ return false
+ }
+ return true
+}
+
+func checkEllipsisRuleCandidate(rule Rule, arch arch) {
+ match, cond, result := rule.parse()
+ if cond != "" {
+ return
+ }
+ op, _, _, auxint, aux, args := parseValue(match, arch, rule.Loc)
+ var auxint2, aux2 string
+ var args2 []string
+ var usingCopy string
+ var eop opData
+ if result[0] != '(' {
+ // Check for (Foo x) => x, which can be converted to (Foo ...) => (Copy ...).
+ args2 = []string{result}
+ usingCopy = " using Copy"
+ } else {
+ eop, _, _, auxint2, aux2, args2 = parseValue(result, arch, rule.Loc)
+ }
+ // Check that all restrictions in match are reproduced exactly in result.
+ if aux != aux2 || auxint != auxint2 || len(args) != len(args2) {
+ return
+ }
+ if strings.Contains(rule.Rule, "=>") && op.aux != eop.aux {
+ return
+ }
+ for i := range args {
+ if args[i] != args2[i] {
+ return
+ }
+ }
+ switch {
+ case opHasAux(op) && aux == "" && aux2 == "":
+ fmt.Printf("%s: rule silently zeros aux, either copy aux or explicitly zero\n", rule.Loc)
+ case opHasAuxInt(op) && auxint == "" && auxint2 == "":
+ fmt.Printf("%s: rule silently zeros auxint, either copy auxint or explicitly zero\n", rule.Loc)
+ default:
+ fmt.Printf("%s: possible ellipsis rule candidate%s: %q\n", rule.Loc, usingCopy, rule.Rule)
+ }
+}
+
+func opByName(arch arch, name string) opData {
+ name = name[2:]
+ for _, x := range genericOps {
+ if name == x.name {
+ return x
+ }
+ }
+ if arch.name != "generic" {
+ name = name[len(arch.name):]
+ for _, x := range arch.ops {
+ if name == x.name {
+ return x
+ }
+ }
+ }
+ log.Fatalf("failed to find op named %s in arch %s", name, arch.name)
+ panic("unreachable")
+}
+
+// auxType returns the Go type that this operation should store in its aux field.
+func (op opData) auxType() string {
+ switch op.aux {
+ case "String":
+ return "string"
+ case "Sym":
+ // Note: a Sym can be an *obj.LSym, a *gc.Node, or nil.
+ return "Sym"
+ case "SymOff":
+ return "Sym"
+ case "Call":
+ return "Call"
+ case "CallOff":
+ return "Call"
+ case "SymValAndOff":
+ return "Sym"
+ case "Typ":
+ return "*types.Type"
+ case "TypSize":
+ return "*types.Type"
+ case "S390XCCMask":
+ return "s390x.CCMask"
+ case "S390XRotateParams":
+ return "s390x.RotateParams"
+ default:
+ return "invalid"
+ }
+}
+
+// auxIntType returns the Go type that this operation should store in its auxInt field.
+func (op opData) auxIntType() string {
+ switch op.aux {
+ case "Bool":
+ return "bool"
+ case "Int8":
+ return "int8"
+ case "Int16":
+ return "int16"
+ case "Int32":
+ return "int32"
+ case "Int64":
+ return "int64"
+ case "Int128":
+ return "int128"
+ case "UInt8":
+ return "uint8"
+ case "Float32":
+ return "float32"
+ case "Float64":
+ return "float64"
+ case "CallOff":
+ return "int32"
+ case "SymOff":
+ return "int32"
+ case "SymValAndOff":
+ return "ValAndOff"
+ case "TypSize":
+ return "int64"
+ case "CCop":
+ return "Op"
+ case "FlagConstant":
+ return "flagConstant"
+ case "ARM64BitField":
+ return "arm64BitField"
+ default:
+ return "invalid"
+ }
+}
+
+// auxType returns the Go type that this block should store in its aux field.
+func (b blockData) auxType() string {
+ switch b.aux {
+ case "S390XCCMask", "S390XCCMaskInt8", "S390XCCMaskUint8":
+ return "s390x.CCMask"
+ case "S390XRotateParams":
+ return "s390x.RotateParams"
+ default:
+ return "invalid"
+ }
+}
+
+// auxIntType returns the Go type that this block should store in its auxInt field.
+func (b blockData) auxIntType() string {
+ switch b.aux {
+ case "S390XCCMaskInt8":
+ return "int8"
+ case "S390XCCMaskUint8":
+ return "uint8"
+ case "Int64":
+ return "int64"
+ default:
+ return "invalid"
+ }
+}
+
+func title(s string) string {
+ if i := strings.Index(s, "."); i >= 0 {
+ switch strings.ToLower(s[:i]) {
+ case "s390x": // keep arch prefix for clarity
+ s = s[:i] + s[i+1:]
+ default:
+ s = s[i+1:]
+ }
+ }
+ return strings.Title(s)
+}
+
+func unTitle(s string) string {
+ if i := strings.Index(s, "."); i >= 0 {
+ switch strings.ToLower(s[:i]) {
+ case "s390x": // keep arch prefix for clarity
+ s = s[:i] + s[i+1:]
+ default:
+ s = s[i+1:]
+ }
+ }
+ return strings.ToLower(s[:1]) + s[1:]
+}
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
new file mode 100644
index 0000000..c06b580
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -0,0 +1,1319 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "bytes"
+ "cmd/internal/src"
+ "fmt"
+ "html"
+ exec "internal/execabs"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+type HTMLWriter struct {
+ w io.WriteCloser
+ Func *Func
+ path string
+ dot *dotWriter
+ prevHash []byte
+ pendingPhases []string
+ pendingTitles []string
+}
+
+func NewHTMLWriter(path string, f *Func, cfgMask string) *HTMLWriter {
+ path = strings.Replace(path, "/", string(filepath.Separator), -1)
+ out, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ f.Fatalf("%v", err)
+ }
+ reportPath := path
+ if !filepath.IsAbs(reportPath) {
+ pwd, err := os.Getwd()
+ if err != nil {
+ f.Fatalf("%v", err)
+ }
+ reportPath = filepath.Join(pwd, path)
+ }
+ html := HTMLWriter{
+ w: out,
+ Func: f,
+ path: reportPath,
+ dot: newDotWriter(cfgMask),
+ }
+ html.start()
+ return &html
+}
+
+// Fatalf reports an error and exits.
+func (w *HTMLWriter) Fatalf(msg string, args ...interface{}) {
+ fe := w.Func.Frontend()
+ fe.Fatalf(src.NoXPos, msg, args...)
+}
+
+// Logf calls the (w *HTMLWriter).Func's Logf method passing along a msg and args.
+func (w *HTMLWriter) Logf(msg string, args ...interface{}) {
+ w.Func.Logf(msg, args...)
+}
+
+func (w *HTMLWriter) start() {
+ if w == nil {
+ return
+ }
+ w.WriteString("<html>")
+ w.WriteString(`<head>
+<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
+<style>
+
+body {
+ font-size: 14px;
+ font-family: Arial, sans-serif;
+}
+
+h1 {
+ font-size: 18px;
+ display: inline-block;
+ margin: 0 1em .5em 0;
+}
+
+#helplink {
+ display: inline-block;
+}
+
+#help {
+ display: none;
+}
+
+.stats {
+ font-size: 60%;
+}
+
+table {
+ border: 1px solid black;
+ table-layout: fixed;
+ width: 300px;
+}
+
+th, td {
+ border: 1px solid black;
+ overflow: hidden;
+ width: 400px;
+ vertical-align: top;
+ padding: 5px;
+}
+
+td > h2 {
+ cursor: pointer;
+ font-size: 120%;
+ margin: 5px 0px 5px 0px;
+}
+
+td.collapsed {
+ font-size: 12px;
+ width: 12px;
+ border: 1px solid white;
+ padding: 2px;
+ cursor: pointer;
+ background: #fafafa;
+}
+
+td.collapsed div {
+ text-align: right;
+ transform: rotate(180deg);
+ writing-mode: vertical-lr;
+ white-space: pre;
+}
+
+code, pre, .lines, .ast {
+ font-family: Menlo, monospace;
+ font-size: 12px;
+}
+
+pre {
+ -moz-tab-size: 4;
+ -o-tab-size: 4;
+ tab-size: 4;
+}
+
+.allow-x-scroll {
+ overflow-x: scroll;
+}
+
+.lines {
+ float: left;
+ overflow: hidden;
+ text-align: right;
+ margin-top: 7px;
+}
+
+.lines div {
+ padding-right: 10px;
+ color: gray;
+}
+
+div.line-number {
+ font-size: 12px;
+}
+
+.ast {
+ white-space: nowrap;
+}
+
+td.ssa-prog {
+ width: 600px;
+ word-wrap: break-word;
+}
+
+li {
+ list-style-type: none;
+}
+
+li.ssa-long-value {
+ text-indent: -2em; /* indent wrapped lines */
+}
+
+li.ssa-value-list {
+ display: inline;
+}
+
+li.ssa-start-block {
+ padding: 0;
+ margin: 0;
+}
+
+li.ssa-end-block {
+ padding: 0;
+ margin: 0;
+}
+
+ul.ssa-print-func {
+ padding-left: 0;
+}
+
+li.ssa-start-block button {
+ padding: 0 1em;
+ margin: 0;
+ border: none;
+ display: inline;
+ font-size: 14px;
+ float: right;
+}
+
+button:hover {
+ background-color: #eee;
+ cursor: pointer;
+}
+
+dl.ssa-gen {
+ padding-left: 0;
+}
+
+dt.ssa-prog-src {
+ padding: 0;
+ margin: 0;
+ float: left;
+ width: 4em;
+}
+
+dd.ssa-prog {
+ padding: 0;
+ margin-right: 0;
+ margin-left: 4em;
+}
+
+.dead-value {
+ color: gray;
+}
+
+.dead-block {
+ opacity: 0.5;
+}
+
+.depcycle {
+ font-style: italic;
+}
+
+.line-number {
+ font-size: 11px;
+}
+
+.no-line-number {
+ font-size: 11px;
+ color: gray;
+}
+
+.zoom {
+ position: absolute;
+ float: left;
+ white-space: nowrap;
+ background-color: #eee;
+}
+
+.zoom a:link, .zoom a:visited {
+ text-decoration: none;
+ color: blue;
+ font-size: 16px;
+ padding: 4px 2px;
+}
+
+svg {
+ cursor: default;
+ outline: 1px solid #eee;
+ width: 100%;
+}
+
+body.darkmode {
+ background-color: rgb(21, 21, 21);
+ color: rgb(230, 255, 255);
+ opacity: 100%;
+}
+
+td.darkmode {
+ background-color: rgb(21, 21, 21);
+ border: 1px solid gray;
+}
+
+body.darkmode table, th {
+ border: 1px solid gray;
+}
+
+body.darkmode text {
+ fill: white;
+}
+
+body.darkmode svg polygon:first-child {
+ fill: rgb(21, 21, 21);
+}
+
+.highlight-aquamarine { background-color: aquamarine; color: black; }
+.highlight-coral { background-color: coral; color: black; }
+.highlight-lightpink { background-color: lightpink; color: black; }
+.highlight-lightsteelblue { background-color: lightsteelblue; color: black; }
+.highlight-palegreen { background-color: palegreen; color: black; }
+.highlight-skyblue { background-color: skyblue; color: black; }
+.highlight-lightgray { background-color: lightgray; color: black; }
+.highlight-yellow { background-color: yellow; color: black; }
+.highlight-lime { background-color: lime; color: black; }
+.highlight-khaki { background-color: khaki; color: black; }
+.highlight-aqua { background-color: aqua; color: black; }
+.highlight-salmon { background-color: salmon; color: black; }
+
+/* Ensure all dead values/blocks continue to have gray font color in dark mode with highlights */
+.dead-value span.highlight-aquamarine,
+.dead-block.highlight-aquamarine,
+.dead-value span.highlight-coral,
+.dead-block.highlight-coral,
+.dead-value span.highlight-lightpink,
+.dead-block.highlight-lightpink,
+.dead-value span.highlight-lightsteelblue,
+.dead-block.highlight-lightsteelblue,
+.dead-value span.highlight-palegreen,
+.dead-block.highlight-palegreen,
+.dead-value span.highlight-skyblue,
+.dead-block.highlight-skyblue,
+.dead-value span.highlight-lightgray,
+.dead-block.highlight-lightgray,
+.dead-value span.highlight-yellow,
+.dead-block.highlight-yellow,
+.dead-value span.highlight-lime,
+.dead-block.highlight-lime,
+.dead-value span.highlight-khaki,
+.dead-block.highlight-khaki,
+.dead-value span.highlight-aqua,
+.dead-block.highlight-aqua,
+.dead-value span.highlight-salmon,
+.dead-block.highlight-salmon {
+ color: gray;
+}
+
+.outline-blue { outline: #2893ff solid 2px; }
+.outline-red { outline: red solid 2px; }
+.outline-blueviolet { outline: blueviolet solid 2px; }
+.outline-darkolivegreen { outline: darkolivegreen solid 2px; }
+.outline-fuchsia { outline: fuchsia solid 2px; }
+.outline-sienna { outline: sienna solid 2px; }
+.outline-gold { outline: gold solid 2px; }
+.outline-orangered { outline: orangered solid 2px; }
+.outline-teal { outline: teal solid 2px; }
+.outline-maroon { outline: maroon solid 2px; }
+.outline-black { outline: black solid 2px; }
+
+ellipse.outline-blue { stroke-width: 2px; stroke: #2893ff; }
+ellipse.outline-red { stroke-width: 2px; stroke: red; }
+ellipse.outline-blueviolet { stroke-width: 2px; stroke: blueviolet; }
+ellipse.outline-darkolivegreen { stroke-width: 2px; stroke: darkolivegreen; }
+ellipse.outline-fuchsia { stroke-width: 2px; stroke: fuchsia; }
+ellipse.outline-sienna { stroke-width: 2px; stroke: sienna; }
+ellipse.outline-gold { stroke-width: 2px; stroke: gold; }
+ellipse.outline-orangered { stroke-width: 2px; stroke: orangered; }
+ellipse.outline-teal { stroke-width: 2px; stroke: teal; }
+ellipse.outline-maroon { stroke-width: 2px; stroke: maroon; }
+ellipse.outline-black { stroke-width: 2px; stroke: black; }
+
+/* Capture alternative for outline-black and ellipse.outline-black when in dark mode */
+body.darkmode .outline-black { outline: gray solid 2px; }
+body.darkmode ellipse.outline-black { outline: gray solid 2px; }
+
+</style>
+
+<script type="text/javascript">
+
+// Contains phase names which are expanded by default. Other columns are collapsed.
+let expandedDefault = [
+ "start",
+ "deadcode",
+ "opt",
+ "lower",
+ "late-deadcode",
+ "regalloc",
+ "genssa",
+];
+if (history.state === null) {
+ history.pushState({expandedDefault}, "", location.href);
+}
+
+// ordered list of all available highlight colors
+var highlights = [
+ "highlight-aquamarine",
+ "highlight-coral",
+ "highlight-lightpink",
+ "highlight-lightsteelblue",
+ "highlight-palegreen",
+ "highlight-skyblue",
+ "highlight-lightgray",
+ "highlight-yellow",
+ "highlight-lime",
+ "highlight-khaki",
+ "highlight-aqua",
+ "highlight-salmon"
+];
+
+// state: which value is highlighted this color?
+var highlighted = {};
+for (var i = 0; i < highlights.length; i++) {
+ highlighted[highlights[i]] = "";
+}
+
+// ordered list of all available outline colors
+var outlines = [
+ "outline-blue",
+ "outline-red",
+ "outline-blueviolet",
+ "outline-darkolivegreen",
+ "outline-fuchsia",
+ "outline-sienna",
+ "outline-gold",
+ "outline-orangered",
+ "outline-teal",
+ "outline-maroon",
+ "outline-black"
+];
+
+// state: which value is outlined this color?
+var outlined = {};
+for (var i = 0; i < outlines.length; i++) {
+ outlined[outlines[i]] = "";
+}
+
+window.onload = function() {
+ if (history.state !== null) {
+ expandedDefault = history.state.expandedDefault;
+ }
+ if (window.matchMedia && window.matchMedia("(prefers-color-scheme: dark)").matches) {
+ toggleDarkMode();
+ document.getElementById("dark-mode-button").checked = true;
+ }
+
+ var ssaElemClicked = function(elem, event, selections, selected) {
+ event.stopPropagation();
+
+ // find all values with the same name
+ var c = elem.classList.item(0);
+ var x = document.getElementsByClassName(c);
+
+ // if selected, remove selections from all of them
+ // otherwise, attempt to add
+
+ var remove = "";
+ for (var i = 0; i < selections.length; i++) {
+ var color = selections[i];
+ if (selected[color] == c) {
+ remove = color;
+ break;
+ }
+ }
+
+ if (remove != "") {
+ for (var i = 0; i < x.length; i++) {
+ x[i].classList.remove(remove);
+ }
+ selected[remove] = "";
+ return;
+ }
+
+ // we're adding a selection
+ // find first available color
+ var avail = "";
+ for (var i = 0; i < selections.length; i++) {
+ var color = selections[i];
+ if (selected[color] == "") {
+ avail = color;
+ break;
+ }
+ }
+ if (avail == "") {
+ alert("out of selection colors; go add more");
+ return;
+ }
+
+ // set that as the selection
+ for (var i = 0; i < x.length; i++) {
+ x[i].classList.add(avail);
+ }
+ selected[avail] = c;
+ };
+
+ var ssaValueClicked = function(event) {
+ ssaElemClicked(this, event, highlights, highlighted);
+ };
+
+ var ssaBlockClicked = function(event) {
+ ssaElemClicked(this, event, outlines, outlined);
+ };
+
+ var ssavalues = document.getElementsByClassName("ssa-value");
+ for (var i = 0; i < ssavalues.length; i++) {
+ ssavalues[i].addEventListener('click', ssaValueClicked);
+ }
+
+ var ssalongvalues = document.getElementsByClassName("ssa-long-value");
+ for (var i = 0; i < ssalongvalues.length; i++) {
+ // don't attach listeners to li nodes, just the spans they contain
+ if (ssalongvalues[i].nodeName == "SPAN") {
+ ssalongvalues[i].addEventListener('click', ssaValueClicked);
+ }
+ }
+
+ var ssablocks = document.getElementsByClassName("ssa-block");
+ for (var i = 0; i < ssablocks.length; i++) {
+ ssablocks[i].addEventListener('click', ssaBlockClicked);
+ }
+
+ var lines = document.getElementsByClassName("line-number");
+ for (var i = 0; i < lines.length; i++) {
+ lines[i].addEventListener('click', ssaValueClicked);
+ }
+
+
+ function toggler(phase) {
+ return function() {
+ toggle_cell(phase+'-col');
+ toggle_cell(phase+'-exp');
+ const i = expandedDefault.indexOf(phase);
+ if (i !== -1) {
+ expandedDefault.splice(i, 1);
+ } else {
+ expandedDefault.push(phase);
+ }
+ history.pushState({expandedDefault}, "", location.href);
+ };
+ }
+
+ function toggle_cell(id) {
+ var e = document.getElementById(id);
+ if (e.style.display == 'table-cell') {
+ e.style.display = 'none';
+ } else {
+ e.style.display = 'table-cell';
+ }
+ }
+
+ // Go through all columns and collapse needed phases.
+ const td = document.getElementsByTagName("td");
+ for (let i = 0; i < td.length; i++) {
+ const id = td[i].id;
+ const phase = id.substr(0, id.length-4);
+ let show = expandedDefault.indexOf(phase) !== -1
+
+ // If show == false, check to see if this is a combined column (multiple phases).
+ // If combined, check each of the phases to see if they are in our expandedDefaults.
+ // If any are found, that entire combined column gets shown.
+ if (!show) {
+ const combined = phase.split('--+--');
+ const len = combined.length;
+ if (len > 1) {
+ for (let i = 0; i < len; i++) {
+ const num = expandedDefault.indexOf(combined[i]);
+ if (num !== -1) {
+ expandedDefault.splice(num, 1);
+ if (expandedDefault.indexOf(phase) === -1) {
+ expandedDefault.push(phase);
+ show = true;
+ }
+ }
+ }
+ }
+ }
+ if (id.endsWith("-exp")) {
+ const h2Els = td[i].getElementsByTagName("h2");
+ const len = h2Els.length;
+ if (len > 0) {
+ for (let i = 0; i < len; i++) {
+ h2Els[i].addEventListener('click', toggler(phase));
+ }
+ }
+ } else {
+ td[i].addEventListener('click', toggler(phase));
+ }
+ if (id.endsWith("-col") && show || id.endsWith("-exp") && !show) {
+ td[i].style.display = 'none';
+ continue;
+ }
+ td[i].style.display = 'table-cell';
+ }
+
+ // find all svg block nodes, add their block classes
+ var nodes = document.querySelectorAll('*[id^="graph_node_"]');
+ for (var i = 0; i < nodes.length; i++) {
+ var node = nodes[i];
+ var name = node.id.toString();
+ var block = name.substring(name.lastIndexOf("_")+1);
+ node.classList.remove("node");
+ node.classList.add(block);
+ node.addEventListener('click', ssaBlockClicked);
+ var ellipse = node.getElementsByTagName('ellipse')[0];
+ ellipse.classList.add(block);
+ ellipse.addEventListener('click', ssaBlockClicked);
+ }
+
+ // make big graphs smaller
+ var targetScale = 0.5;
+ var nodes = document.querySelectorAll('*[id^="svg_graph_"]');
+ // TODO: Implement smarter auto-zoom using the viewBox attribute
+ // and in case of big graphs set the width and height of the svg graph to
+ // maximum allowed.
+ for (var i = 0; i < nodes.length; i++) {
+ var node = nodes[i];
+ var name = node.id.toString();
+ var phase = name.substring(name.lastIndexOf("_")+1);
+ var gNode = document.getElementById("g_graph_"+phase);
+ var scale = gNode.transform.baseVal.getItem(0).matrix.a;
+ if (scale > targetScale) {
+ node.width.baseVal.value *= targetScale / scale;
+ node.height.baseVal.value *= targetScale / scale;
+ }
+ }
+};
+
+function toggle_visibility(id) {
+ var e = document.getElementById(id);
+ if (e.style.display == 'block') {
+ e.style.display = 'none';
+ } else {
+ e.style.display = 'block';
+ }
+}
+
+function hideBlock(el) {
+ var es = el.parentNode.parentNode.getElementsByClassName("ssa-value-list");
+ if (es.length===0)
+ return;
+ var e = es[0];
+ if (e.style.display === 'block' || e.style.display === '') {
+ e.style.display = 'none';
+ el.innerHTML = '+';
+ } else {
+ e.style.display = 'block';
+ el.innerHTML = '-';
+ }
+}
+
+// TODO: scale the graph with the viewBox attribute.
+function graphReduce(id) {
+ var node = document.getElementById(id);
+ if (node) {
+ node.width.baseVal.value *= 0.9;
+ node.height.baseVal.value *= 0.9;
+ }
+ return false;
+}
+
+function graphEnlarge(id) {
+ var node = document.getElementById(id);
+ if (node) {
+ node.width.baseVal.value *= 1.1;
+ node.height.baseVal.value *= 1.1;
+ }
+ return false;
+}
+
+function makeDraggable(event) {
+ var svg = event.target;
+ if (window.PointerEvent) {
+ svg.addEventListener('pointerdown', startDrag);
+ svg.addEventListener('pointermove', drag);
+ svg.addEventListener('pointerup', endDrag);
+ svg.addEventListener('pointerleave', endDrag);
+ } else {
+ svg.addEventListener('mousedown', startDrag);
+ svg.addEventListener('mousemove', drag);
+ svg.addEventListener('mouseup', endDrag);
+ svg.addEventListener('mouseleave', endDrag);
+ }
+
+ var point = svg.createSVGPoint();
+ var isPointerDown = false;
+ var pointerOrigin;
+ var viewBox = svg.viewBox.baseVal;
+
+ function getPointFromEvent (event) {
+ point.x = event.clientX;
+ point.y = event.clientY;
+
+ // We get the current transformation matrix of the SVG and we inverse it
+ var invertedSVGMatrix = svg.getScreenCTM().inverse();
+ return point.matrixTransform(invertedSVGMatrix);
+ }
+
+ function startDrag(event) {
+ isPointerDown = true;
+ pointerOrigin = getPointFromEvent(event);
+ }
+
+ function drag(event) {
+ if (!isPointerDown) {
+ return;
+ }
+ event.preventDefault();
+
+ var pointerPosition = getPointFromEvent(event);
+ viewBox.x -= (pointerPosition.x - pointerOrigin.x);
+ viewBox.y -= (pointerPosition.y - pointerOrigin.y);
+ }
+
+ function endDrag(event) {
+ isPointerDown = false;
+ }
+}
+
+function toggleDarkMode() {
+ document.body.classList.toggle('darkmode');
+
+ // Collect all of the "collapsed" elements and apply dark mode on each collapsed column
+ const collapsedEls = document.getElementsByClassName('collapsed');
+ const len = collapsedEls.length;
+
+ for (let i = 0; i < len; i++) {
+ collapsedEls[i].classList.toggle('darkmode');
+ }
+
+ // Collect and spread the appropriate elements from all of the svgs on the page into one array
+ const svgParts = [
+ ...document.querySelectorAll('path'),
+ ...document.querySelectorAll('ellipse'),
+ ...document.querySelectorAll('polygon'),
+ ];
+
+ // Iterate over the svgParts specifically looking for white and black fill/stroke to be toggled.
+ // The verbose conditional is intentional here so that we do not mutate any svg path, ellipse, or polygon that is of any color other than white or black.
+ svgParts.forEach(el => {
+ if (el.attributes.stroke.value === 'white') {
+ el.attributes.stroke.value = 'black';
+ } else if (el.attributes.stroke.value === 'black') {
+ el.attributes.stroke.value = 'white';
+ }
+ if (el.attributes.fill.value === 'white') {
+ el.attributes.fill.value = 'black';
+ } else if (el.attributes.fill.value === 'black') {
+ el.attributes.fill.value = 'white';
+ }
+ });
+}
+
+</script>
+
+</head>`)
+ w.WriteString("<body>")
+ w.WriteString("<h1>")
+ w.WriteString(html.EscapeString(w.Func.Name))
+ w.WriteString("</h1>")
+ w.WriteString(`
+<a href="#" onclick="toggle_visibility('help');return false;" id="helplink">help</a>
+<div id="help">
+
+<p>
+Click on a value or block to toggle highlighting of that value/block
+and its uses. (Values and blocks are highlighted by ID, and IDs of
+dead items may be reused, so not all highlights necessarily correspond
+to the clicked item.)
+</p>
+
+<p>
+Faded out values and blocks are dead code that has not been eliminated.
+</p>
+
+<p>
+Values printed in italics have a dependency cycle.
+</p>
+
+<p>
+<b>CFG</b>: Dashed edge is for unlikely branches. Blue color is for backward edges.
+Edge with a dot means that this edge follows the order in which blocks were laidout.
+</p>
+
+</div>
+<label for="dark-mode-button" style="margin-left: 15px; cursor: pointer;">darkmode</label>
+<input type="checkbox" onclick="toggleDarkMode();" id="dark-mode-button" style="cursor: pointer" />
+`)
+ w.WriteString("<table>")
+ w.WriteString("<tr>")
+}
+
+func (w *HTMLWriter) Close() {
+ if w == nil {
+ return
+ }
+ io.WriteString(w.w, "</tr>")
+ io.WriteString(w.w, "</table>")
+ io.WriteString(w.w, "</body>")
+ io.WriteString(w.w, "</html>")
+ w.w.Close()
+ fmt.Printf("dumped SSA to %v\n", w.path)
+}
+
+// WritePhase writes f in a column headed by title.
+// phase is used for collapsing columns and should be unique across the table.
+func (w *HTMLWriter) WritePhase(phase, title string) {
+ if w == nil {
+ return // avoid generating HTML just to discard it
+ }
+ hash := hashFunc(w.Func)
+ w.pendingPhases = append(w.pendingPhases, phase)
+ w.pendingTitles = append(w.pendingTitles, title)
+ if !bytes.Equal(hash, w.prevHash) {
+ w.flushPhases()
+ }
+ w.prevHash = hash
+}
+
+// flushPhases collects any pending phases and titles, writes them to the html, and resets the pending slices.
+func (w *HTMLWriter) flushPhases() {
+ phaseLen := len(w.pendingPhases)
+ if phaseLen == 0 {
+ return
+ }
+ phases := strings.Join(w.pendingPhases, " + ")
+ w.WriteMultiTitleColumn(
+ phases,
+ w.pendingTitles,
+ fmt.Sprintf("hash-%x", w.prevHash),
+ w.Func.HTML(w.pendingPhases[phaseLen-1], w.dot),
+ )
+ w.pendingPhases = w.pendingPhases[:0]
+ w.pendingTitles = w.pendingTitles[:0]
+}
+
+// FuncLines contains source code for a function to be displayed
+// in sources column.
+type FuncLines struct {
+ Filename string
+ StartLineno uint
+ Lines []string
+}
+
+// ByTopo sorts topologically: target function is on top,
+// followed by inlined functions sorted by filename and line numbers.
+type ByTopo []*FuncLines
+
+func (x ByTopo) Len() int { return len(x) }
+func (x ByTopo) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x ByTopo) Less(i, j int) bool {
+ a := x[i]
+ b := x[j]
+ if a.Filename == b.Filename {
+ return a.StartLineno < b.StartLineno
+ }
+ return a.Filename < b.Filename
+}
+
+// WriteSources writes lines as source code in a column headed by title.
+// phase is used for collapsing columns and should be unique across the table.
+func (w *HTMLWriter) WriteSources(phase string, all []*FuncLines) {
+ if w == nil {
+ return // avoid generating HTML just to discard it
+ }
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, "<div class=\"lines\" style=\"width: 8%\">")
+ filename := ""
+ for _, fl := range all {
+ fmt.Fprint(&buf, "<div>&nbsp;</div>")
+ if filename != fl.Filename {
+ fmt.Fprint(&buf, "<div>&nbsp;</div>")
+ filename = fl.Filename
+ }
+ for i := range fl.Lines {
+ ln := int(fl.StartLineno) + i
+ fmt.Fprintf(&buf, "<div class=\"l%v line-number\">%v</div>", ln, ln)
+ }
+ }
+ fmt.Fprint(&buf, "</div><div style=\"width: 92%\"><pre>")
+ filename = ""
+ for _, fl := range all {
+ fmt.Fprint(&buf, "<div>&nbsp;</div>")
+ if filename != fl.Filename {
+ fmt.Fprintf(&buf, "<div><strong>%v</strong></div>", fl.Filename)
+ filename = fl.Filename
+ }
+ for i, line := range fl.Lines {
+ ln := int(fl.StartLineno) + i
+ var escaped string
+ if strings.TrimSpace(line) == "" {
+ escaped = "&nbsp;"
+ } else {
+ escaped = html.EscapeString(line)
+ }
+ fmt.Fprintf(&buf, "<div class=\"l%v line-number\">%v</div>", ln, escaped)
+ }
+ }
+ fmt.Fprint(&buf, "</pre></div>")
+ w.WriteColumn(phase, phase, "allow-x-scroll", buf.String())
+}
+
+func (w *HTMLWriter) WriteAST(phase string, buf *bytes.Buffer) {
+ if w == nil {
+ return // avoid generating HTML just to discard it
+ }
+ lines := strings.Split(buf.String(), "\n")
+ var out bytes.Buffer
+
+ fmt.Fprint(&out, "<div>")
+ for _, l := range lines {
+ l = strings.TrimSpace(l)
+ var escaped string
+ var lineNo string
+ if l == "" {
+ escaped = "&nbsp;"
+ } else {
+ if strings.HasPrefix(l, "buildssa") {
+ escaped = fmt.Sprintf("<b>%v</b>", l)
+ } else {
+ // Parse the line number from the format l(123).
+ idx := strings.Index(l, " l(")
+ if idx != -1 {
+ subl := l[idx+3:]
+ idxEnd := strings.Index(subl, ")")
+ if idxEnd != -1 {
+ if _, err := strconv.Atoi(subl[:idxEnd]); err == nil {
+ lineNo = subl[:idxEnd]
+ }
+ }
+ }
+ escaped = html.EscapeString(l)
+ }
+ }
+ if lineNo != "" {
+ fmt.Fprintf(&out, "<div class=\"l%v line-number ast\">%v</div>", lineNo, escaped)
+ } else {
+ fmt.Fprintf(&out, "<div class=\"ast\">%v</div>", escaped)
+ }
+ }
+ fmt.Fprint(&out, "</div>")
+ w.WriteColumn(phase, phase, "allow-x-scroll", out.String())
+}
+
+// WriteColumn writes raw HTML in a column headed by title.
+// It is intended for pre- and post-compilation log output.
+func (w *HTMLWriter) WriteColumn(phase, title, class, html string) {
+ w.WriteMultiTitleColumn(phase, []string{title}, class, html)
+}
+
+func (w *HTMLWriter) WriteMultiTitleColumn(phase string, titles []string, class, html string) {
+ if w == nil {
+ return
+ }
+ id := strings.Replace(phase, " ", "-", -1)
+ // collapsed column
+ w.Printf("<td id=\"%v-col\" class=\"collapsed\"><div>%v</div></td>", id, phase)
+
+ if class == "" {
+ w.Printf("<td id=\"%v-exp\">", id)
+ } else {
+ w.Printf("<td id=\"%v-exp\" class=\"%v\">", id, class)
+ }
+ for _, title := range titles {
+ w.WriteString("<h2>" + title + "</h2>")
+ }
+ w.WriteString(html)
+ w.WriteString("</td>\n")
+}
+
+func (w *HTMLWriter) Printf(msg string, v ...interface{}) {
+ if _, err := fmt.Fprintf(w.w, msg, v...); err != nil {
+ w.Fatalf("%v", err)
+ }
+}
+
+func (w *HTMLWriter) WriteString(s string) {
+ if _, err := io.WriteString(w.w, s); err != nil {
+ w.Fatalf("%v", err)
+ }
+}
+
+func (v *Value) HTML() string {
+ // TODO: Using the value ID as the class ignores the fact
+ // that value IDs get recycled and that some values
+ // are transmuted into other values.
+ s := v.String()
+ return fmt.Sprintf("<span class=\"%s ssa-value\">%s</span>", s, s)
+}
+
+func (v *Value) LongHTML() string {
+ // TODO: Any intra-value formatting?
+ // I'm wary of adding too much visual noise,
+ // but a little bit might be valuable.
+ // We already have visual noise in the form of punctuation
+ // maybe we could replace some of that with formatting.
+ s := fmt.Sprintf("<span class=\"%s ssa-long-value\">", v.String())
+
+ linenumber := "<span class=\"no-line-number\">(?)</span>"
+ if v.Pos.IsKnown() {
+ linenumber = fmt.Sprintf("<span class=\"l%v line-number\">(%s)</span>", v.Pos.LineNumber(), v.Pos.LineNumberHTML())
+ }
+
+ s += fmt.Sprintf("%s %s = %s", v.HTML(), linenumber, v.Op.String())
+
+ s += " &lt;" + html.EscapeString(v.Type.String()) + "&gt;"
+ s += html.EscapeString(v.auxString())
+ for _, a := range v.Args {
+ s += fmt.Sprintf(" %s", a.HTML())
+ }
+ r := v.Block.Func.RegAlloc
+ if int(v.ID) < len(r) && r[v.ID] != nil {
+ s += " : " + html.EscapeString(r[v.ID].String())
+ }
+ var names []string
+ for name, values := range v.Block.Func.NamedValues {
+ for _, value := range values {
+ if value == v {
+ names = append(names, name.String())
+ break // drop duplicates.
+ }
+ }
+ }
+ if len(names) != 0 {
+ s += " (" + strings.Join(names, ", ") + ")"
+ }
+
+ s += "</span>"
+ return s
+}
+
+func (b *Block) HTML() string {
+ // TODO: Using the value ID as the class ignores the fact
+ // that value IDs get recycled and that some values
+ // are transmuted into other values.
+ s := html.EscapeString(b.String())
+ return fmt.Sprintf("<span class=\"%s ssa-block\">%s</span>", s, s)
+}
+
+func (b *Block) LongHTML() string {
+ // TODO: improve this for HTML?
+ s := fmt.Sprintf("<span class=\"%s ssa-block\">%s</span>", html.EscapeString(b.String()), html.EscapeString(b.Kind.String()))
+ if b.Aux != nil {
+ s += html.EscapeString(fmt.Sprintf(" {%v}", b.Aux))
+ }
+ if t := b.AuxIntString(); t != "" {
+ s += html.EscapeString(fmt.Sprintf(" [%v]", t))
+ }
+ for _, c := range b.ControlValues() {
+ s += fmt.Sprintf(" %s", c.HTML())
+ }
+ if len(b.Succs) > 0 {
+ s += " &#8594;" // right arrow
+ for _, e := range b.Succs {
+ c := e.b
+ s += " " + c.HTML()
+ }
+ }
+ switch b.Likely {
+ case BranchUnlikely:
+ s += " (unlikely)"
+ case BranchLikely:
+ s += " (likely)"
+ }
+ if b.Pos.IsKnown() {
+ // TODO does not begin to deal with the full complexity of line numbers.
+ // Maybe we want a string/slice instead, of outer-inner when inlining.
+ s += fmt.Sprintf(" <span class=\"l%v line-number\">(%s)</span>", b.Pos.LineNumber(), b.Pos.LineNumberHTML())
+ }
+ return s
+}
+
+func (f *Func) HTML(phase string, dot *dotWriter) string {
+ buf := new(bytes.Buffer)
+ if dot != nil {
+ dot.writeFuncSVG(buf, phase, f)
+ }
+ fmt.Fprint(buf, "<code>")
+ p := htmlFuncPrinter{w: buf}
+ fprintFunc(p, f)
+
+ // fprintFunc(&buf, f) // TODO: HTML, not text, <br /> for line breaks, etc.
+ fmt.Fprint(buf, "</code>")
+ return buf.String()
+}
+
+func (d *dotWriter) writeFuncSVG(w io.Writer, phase string, f *Func) {
+ if d.broken {
+ return
+ }
+ if _, ok := d.phases[phase]; !ok {
+ return
+ }
+ cmd := exec.Command(d.path, "-Tsvg")
+ pipe, err := cmd.StdinPipe()
+ if err != nil {
+ d.broken = true
+ fmt.Println(err)
+ return
+ }
+ buf := new(bytes.Buffer)
+ cmd.Stdout = buf
+ bufErr := new(bytes.Buffer)
+ cmd.Stderr = bufErr
+ err = cmd.Start()
+ if err != nil {
+ d.broken = true
+ fmt.Println(err)
+ return
+ }
+ fmt.Fprint(pipe, `digraph "" { margin=0; ranksep=.2; `)
+ id := strings.Replace(phase, " ", "-", -1)
+ fmt.Fprintf(pipe, `id="g_graph_%s";`, id)
+ fmt.Fprintf(pipe, `node [style=filled,fillcolor=white,fontsize=16,fontname="Menlo,Times,serif",margin="0.01,0.03"];`)
+ fmt.Fprintf(pipe, `edge [fontsize=16,fontname="Menlo,Times,serif"];`)
+ for i, b := range f.Blocks {
+ if b.Kind == BlockInvalid {
+ continue
+ }
+ layout := ""
+ if f.laidout {
+ layout = fmt.Sprintf(" #%d", i)
+ }
+ fmt.Fprintf(pipe, `%v [label="%v%s\n%v",id="graph_node_%v_%v",tooltip="%v"];`, b, b, layout, b.Kind.String(), id, b, b.LongString())
+ }
+ indexOf := make([]int, f.NumBlocks())
+ for i, b := range f.Blocks {
+ indexOf[b.ID] = i
+ }
+ layoutDrawn := make([]bool, f.NumBlocks())
+
+ ponums := make([]int32, f.NumBlocks())
+ _ = postorderWithNumbering(f, ponums)
+ isBackEdge := func(from, to ID) bool {
+ return ponums[from] <= ponums[to]
+ }
+
+ for _, b := range f.Blocks {
+ for i, s := range b.Succs {
+ style := "solid"
+ color := "black"
+ arrow := "vee"
+ if b.unlikelyIndex() == i {
+ style = "dashed"
+ }
+ if f.laidout && indexOf[s.b.ID] == indexOf[b.ID]+1 {
+ // Red color means ordered edge. It overrides other colors.
+ arrow = "dotvee"
+ layoutDrawn[s.b.ID] = true
+ } else if isBackEdge(b.ID, s.b.ID) {
+ color = "#2893ff"
+ }
+ fmt.Fprintf(pipe, `%v -> %v [label=" %d ",style="%s",color="%s",arrowhead="%s"];`, b, s.b, i, style, color, arrow)
+ }
+ }
+ if f.laidout {
+ fmt.Fprintln(pipe, `edge[constraint=false,color=gray,style=solid,arrowhead=dot];`)
+ colors := [...]string{"#eea24f", "#f38385", "#f4d164", "#ca89fc", "gray"}
+ ci := 0
+ for i := 1; i < len(f.Blocks); i++ {
+ if layoutDrawn[f.Blocks[i].ID] {
+ continue
+ }
+ fmt.Fprintf(pipe, `%s -> %s [color="%s"];`, f.Blocks[i-1], f.Blocks[i], colors[ci])
+ ci = (ci + 1) % len(colors)
+ }
+ }
+ fmt.Fprint(pipe, "}")
+ pipe.Close()
+ err = cmd.Wait()
+ if err != nil {
+ d.broken = true
+ fmt.Printf("dot: %v\n%v\n", err, bufErr.String())
+ return
+ }
+
+ svgID := "svg_graph_" + id
+ fmt.Fprintf(w, `<div class="zoom"><button onclick="return graphReduce('%s');">-</button> <button onclick="return graphEnlarge('%s');">+</button></div>`, svgID, svgID)
+ // For now, an awful hack: edit the html as it passes through
+ // our fingers, finding '<svg ' and injecting needed attributes after it.
+ err = d.copyUntil(w, buf, `<svg `)
+ if err != nil {
+ fmt.Printf("injecting attributes: %v\n", err)
+ return
+ }
+ fmt.Fprintf(w, ` id="%s" onload="makeDraggable(evt)" `, svgID)
+ io.Copy(w, buf)
+}
+
+func (b *Block) unlikelyIndex() int {
+ switch b.Likely {
+ case BranchLikely:
+ return 1
+ case BranchUnlikely:
+ return 0
+ }
+ return -1
+}
+
+func (d *dotWriter) copyUntil(w io.Writer, buf *bytes.Buffer, sep string) error {
+ i := bytes.Index(buf.Bytes(), []byte(sep))
+ if i == -1 {
+ return fmt.Errorf("couldn't find dot sep %q", sep)
+ }
+ _, err := io.CopyN(w, buf, int64(i+len(sep)))
+ return err
+}
+
+type htmlFuncPrinter struct {
+ w io.Writer
+}
+
+func (p htmlFuncPrinter) header(f *Func) {}
+
+func (p htmlFuncPrinter) startBlock(b *Block, reachable bool) {
+ var dead string
+ if !reachable {
+ dead = "dead-block"
+ }
+ fmt.Fprintf(p.w, "<ul class=\"%s ssa-print-func %s\">", b, dead)
+ fmt.Fprintf(p.w, "<li class=\"ssa-start-block\">%s:", b.HTML())
+ if len(b.Preds) > 0 {
+ io.WriteString(p.w, " &#8592;") // left arrow
+ for _, e := range b.Preds {
+ pred := e.b
+ fmt.Fprintf(p.w, " %s", pred.HTML())
+ }
+ }
+ if len(b.Values) > 0 {
+ io.WriteString(p.w, `<button onclick="hideBlock(this)">-</button>`)
+ }
+ io.WriteString(p.w, "</li>")
+ if len(b.Values) > 0 { // start list of values
+ io.WriteString(p.w, "<li class=\"ssa-value-list\">")
+ io.WriteString(p.w, "<ul>")
+ }
+}
+
+func (p htmlFuncPrinter) endBlock(b *Block) {
+ if len(b.Values) > 0 { // end list of values
+ io.WriteString(p.w, "</ul>")
+ io.WriteString(p.w, "</li>")
+ }
+ io.WriteString(p.w, "<li class=\"ssa-end-block\">")
+ fmt.Fprint(p.w, b.LongHTML())
+ io.WriteString(p.w, "</li>")
+ io.WriteString(p.w, "</ul>")
+}
+
+func (p htmlFuncPrinter) value(v *Value, live bool) {
+ var dead string
+ if !live {
+ dead = "dead-value"
+ }
+ fmt.Fprintf(p.w, "<li class=\"ssa-long-value %s\">", dead)
+ fmt.Fprint(p.w, v.LongHTML())
+ io.WriteString(p.w, "</li>")
+}
+
+func (p htmlFuncPrinter) startDepCycle() {
+ fmt.Fprintln(p.w, "<span class=\"depcycle\">")
+}
+
+func (p htmlFuncPrinter) endDepCycle() {
+ fmt.Fprintln(p.w, "</span>")
+}
+
+func (p htmlFuncPrinter) named(n LocalSlot, vals []*Value) {
+ fmt.Fprintf(p.w, "<li>name %s: ", n)
+ for _, val := range vals {
+ fmt.Fprintf(p.w, "%s ", val.HTML())
+ }
+ fmt.Fprintf(p.w, "</li>")
+}
+
+type dotWriter struct {
+ path string
+ broken bool
+ phases map[string]bool // keys specify phases with CFGs
+}
+
+// newDotWriter returns non-nil value when mask is valid.
+// dotWriter will generate SVGs only for the phases specified in the mask.
+// mask can contain following patterns and combinations of them:
+// * - all of them;
+// x-y - x through y, inclusive;
+// x,y - x and y, but not the passes between.
+func newDotWriter(mask string) *dotWriter {
+ if mask == "" {
+ return nil
+ }
+ // User can specify phase name with _ instead of spaces.
+ mask = strings.Replace(mask, "_", " ", -1)
+ ph := make(map[string]bool)
+ ranges := strings.Split(mask, ",")
+ for _, r := range ranges {
+ spl := strings.Split(r, "-")
+ if len(spl) > 2 {
+ fmt.Printf("range is not valid: %v\n", mask)
+ return nil
+ }
+ var first, last int
+ if mask == "*" {
+ first = 0
+ last = len(passes) - 1
+ } else {
+ first = passIdxByName(spl[0])
+ last = passIdxByName(spl[len(spl)-1])
+ }
+ if first < 0 || last < 0 || first > last {
+ fmt.Printf("range is not valid: %v\n", r)
+ return nil
+ }
+ for p := first; p <= last; p++ {
+ ph[passes[p].name] = true
+ }
+ }
+
+ path, err := exec.LookPath("dot")
+ if err != nil {
+ fmt.Println(err)
+ return nil
+ }
+ return &dotWriter{path: path, phases: ph}
+}
+
+func passIdxByName(name string) int {
+ for i, p := range passes {
+ if p.name == name {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/src/cmd/compile/internal/ssa/id.go b/src/cmd/compile/internal/ssa/id.go
new file mode 100644
index 0000000..725279e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/id.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+type ID int32
+
+// idAlloc provides an allocator for unique integers.
+type idAlloc struct {
+ last ID
+}
+
+// get allocates an ID and returns it. IDs are always > 0.
+func (a *idAlloc) get() ID {
+ x := a.last
+ x++
+ if x == 1<<31-1 {
+ panic("too many ids for this function")
+ }
+ a.last = x
+ return x
+}
+
+// num returns the maximum ID ever returned + 1.
+func (a *idAlloc) num() int {
+ return int(a.last + 1)
+}
diff --git a/src/cmd/compile/internal/ssa/layout.go b/src/cmd/compile/internal/ssa/layout.go
new file mode 100644
index 0000000..30b7b97
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/layout.go
@@ -0,0 +1,180 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// layout orders basic blocks in f with the goal of minimizing control flow instructions.
+// After this phase returns, the order of f.Blocks matters and is the order
+// in which those blocks will appear in the assembly output.
+func layout(f *Func) {
+ f.Blocks = layoutOrder(f)
+}
+
+// Register allocation may use a different order which has constraints
+// imposed by the linear-scan algorithm. Note that f.pass here is
+// regalloc, so the switch is conditional on -d=ssa/regalloc/test=N
+func layoutRegallocOrder(f *Func) []*Block {
+
+ switch f.pass.test {
+ case 0: // layout order
+ return layoutOrder(f)
+ case 1: // existing block order
+ return f.Blocks
+ case 2: // reverse of postorder; legal, but usually not good.
+ po := f.postorder()
+ visitOrder := make([]*Block, len(po))
+ for i, b := range po {
+ j := len(po) - i - 1
+ visitOrder[j] = b
+ }
+ return visitOrder
+ }
+
+ return nil
+}
+
+func layoutOrder(f *Func) []*Block {
+ order := make([]*Block, 0, f.NumBlocks())
+ scheduled := make([]bool, f.NumBlocks())
+ idToBlock := make([]*Block, f.NumBlocks())
+ indegree := make([]int, f.NumBlocks())
+ posdegree := f.newSparseSet(f.NumBlocks()) // blocks with positive remaining degree
+ defer f.retSparseSet(posdegree)
+ zerodegree := f.newSparseSet(f.NumBlocks()) // blocks with zero remaining degree
+ defer f.retSparseSet(zerodegree)
+ exit := f.newSparseSet(f.NumBlocks()) // exit blocks
+ defer f.retSparseSet(exit)
+
+ // Populate idToBlock and find exit blocks.
+ for _, b := range f.Blocks {
+ idToBlock[b.ID] = b
+ if b.Kind == BlockExit {
+ exit.add(b.ID)
+ }
+ }
+
+ // Expand exit to include blocks post-dominated by exit blocks.
+ for {
+ changed := false
+ for _, id := range exit.contents() {
+ b := idToBlock[id]
+ NextPred:
+ for _, pe := range b.Preds {
+ p := pe.b
+ if exit.contains(p.ID) {
+ continue
+ }
+ for _, s := range p.Succs {
+ if !exit.contains(s.b.ID) {
+ continue NextPred
+ }
+ }
+ // All Succs are in exit; add p.
+ exit.add(p.ID)
+ changed = true
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+
+ // Initialize indegree of each block
+ for _, b := range f.Blocks {
+ if exit.contains(b.ID) {
+ // exit blocks are always scheduled last
+ continue
+ }
+ indegree[b.ID] = len(b.Preds)
+ if len(b.Preds) == 0 {
+ zerodegree.add(b.ID)
+ } else {
+ posdegree.add(b.ID)
+ }
+ }
+
+ bid := f.Entry.ID
+blockloop:
+ for {
+ // add block to schedule
+ b := idToBlock[bid]
+ order = append(order, b)
+ scheduled[bid] = true
+ if len(order) == len(f.Blocks) {
+ break
+ }
+
+ for _, e := range b.Succs {
+ c := e.b
+ indegree[c.ID]--
+ if indegree[c.ID] == 0 {
+ posdegree.remove(c.ID)
+ zerodegree.add(c.ID)
+ }
+ }
+
+ // Pick the next block to schedule
+ // Pick among the successor blocks that have not been scheduled yet.
+
+ // Use likely direction if we have it.
+ var likely *Block
+ switch b.Likely {
+ case BranchLikely:
+ likely = b.Succs[0].b
+ case BranchUnlikely:
+ likely = b.Succs[1].b
+ }
+ if likely != nil && !scheduled[likely.ID] {
+ bid = likely.ID
+ continue
+ }
+
+ // Use degree for now.
+ bid = 0
+ mindegree := f.NumBlocks()
+ for _, e := range order[len(order)-1].Succs {
+ c := e.b
+ if scheduled[c.ID] || c.Kind == BlockExit {
+ continue
+ }
+ if indegree[c.ID] < mindegree {
+ mindegree = indegree[c.ID]
+ bid = c.ID
+ }
+ }
+ if bid != 0 {
+ continue
+ }
+ // TODO: improve this part
+ // No successor of the previously scheduled block works.
+ // Pick a zero-degree block if we can.
+ for zerodegree.size() > 0 {
+ cid := zerodegree.pop()
+ if !scheduled[cid] {
+ bid = cid
+ continue blockloop
+ }
+ }
+ // Still nothing, pick any non-exit block.
+ for posdegree.size() > 0 {
+ cid := posdegree.pop()
+ if !scheduled[cid] {
+ bid = cid
+ continue blockloop
+ }
+ }
+ // Pick any exit block.
+ // TODO: Order these to minimize jump distances?
+ for {
+ cid := exit.pop()
+ if !scheduled[cid] {
+ bid = cid
+ continue blockloop
+ }
+ }
+ }
+ f.laidout = true
+ return order
+ //f.Blocks = order
+}
diff --git a/src/cmd/compile/internal/ssa/lca.go b/src/cmd/compile/internal/ssa/lca.go
new file mode 100644
index 0000000..5cb7391
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/lca.go
@@ -0,0 +1,123 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// Code to compute lowest common ancestors in the dominator tree.
+// https://en.wikipedia.org/wiki/Lowest_common_ancestor
+// https://en.wikipedia.org/wiki/Range_minimum_query#Solution_using_constant_time_and_linearithmic_space
+
+// lcaRange is a data structure that can compute lowest common ancestor queries
+// in O(n lg n) precomputed space and O(1) time per query.
+type lcaRange struct {
+ // Additional information about each block (indexed by block ID).
+ blocks []lcaRangeBlock
+
+ // Data structure for range minimum queries.
+ // rangeMin[k][i] contains the ID of the minimum depth block
+ // in the Euler tour from positions i to i+1<<k-1, inclusive.
+ rangeMin [][]ID
+}
+
+type lcaRangeBlock struct {
+ b *Block
+ parent ID // parent in dominator tree. 0 = no parent (entry or unreachable)
+ firstChild ID // first child in dominator tree
+ sibling ID // next child of parent
+ pos int32 // an index in the Euler tour where this block appears (any one of its occurrences)
+ depth int32 // depth in dominator tree (root=0, its children=1, etc.)
+}
+
+func makeLCArange(f *Func) *lcaRange {
+ dom := f.Idom()
+
+ // Build tree
+ blocks := make([]lcaRangeBlock, f.NumBlocks())
+ for _, b := range f.Blocks {
+ blocks[b.ID].b = b
+ if dom[b.ID] == nil {
+ continue // entry or unreachable
+ }
+ parent := dom[b.ID].ID
+ blocks[b.ID].parent = parent
+ blocks[b.ID].sibling = blocks[parent].firstChild
+ blocks[parent].firstChild = b.ID
+ }
+
+ // Compute euler tour ordering.
+ // Each reachable block will appear #children+1 times in the tour.
+ tour := make([]ID, 0, f.NumBlocks()*2-1)
+ type queueEntry struct {
+ bid ID // block to work on
+ cid ID // child we're already working on (0 = haven't started yet)
+ }
+ q := []queueEntry{{f.Entry.ID, 0}}
+ for len(q) > 0 {
+ n := len(q) - 1
+ bid := q[n].bid
+ cid := q[n].cid
+ q = q[:n]
+
+ // Add block to tour.
+ blocks[bid].pos = int32(len(tour))
+ tour = append(tour, bid)
+
+ // Proceed down next child edge (if any).
+ if cid == 0 {
+ // This is our first visit to b. Set its depth.
+ blocks[bid].depth = blocks[blocks[bid].parent].depth + 1
+ // Then explore its first child.
+ cid = blocks[bid].firstChild
+ } else {
+ // We've seen b before. Explore the next child.
+ cid = blocks[cid].sibling
+ }
+ if cid != 0 {
+ q = append(q, queueEntry{bid, cid}, queueEntry{cid, 0})
+ }
+ }
+
+ // Compute fast range-minimum query data structure
+ var rangeMin [][]ID
+ rangeMin = append(rangeMin, tour) // 1-size windows are just the tour itself.
+ for logS, s := 1, 2; s < len(tour); logS, s = logS+1, s*2 {
+ r := make([]ID, len(tour)-s+1)
+ for i := 0; i < len(tour)-s+1; i++ {
+ bid := rangeMin[logS-1][i]
+ bid2 := rangeMin[logS-1][i+s/2]
+ if blocks[bid2].depth < blocks[bid].depth {
+ bid = bid2
+ }
+ r[i] = bid
+ }
+ rangeMin = append(rangeMin, r)
+ }
+
+ return &lcaRange{blocks: blocks, rangeMin: rangeMin}
+}
+
+// find returns the lowest common ancestor of a and b.
+func (lca *lcaRange) find(a, b *Block) *Block {
+ if a == b {
+ return a
+ }
+ // Find the positions of a and bin the Euler tour.
+ p1 := lca.blocks[a.ID].pos
+ p2 := lca.blocks[b.ID].pos
+ if p1 > p2 {
+ p1, p2 = p2, p1
+ }
+
+ // The lowest common ancestor is the minimum depth block
+ // on the tour from p1 to p2. We've precomputed minimum
+ // depth blocks for powers-of-two subsequences of the tour.
+ // Combine the right two precomputed values to get the answer.
+ logS := uint(log64(int64(p2 - p1)))
+ bid1 := lca.rangeMin[logS][p1]
+ bid2 := lca.rangeMin[logS][p2-1<<logS+1]
+ if lca.blocks[bid1].depth < lca.blocks[bid2].depth {
+ return lca.blocks[bid1].b
+ }
+ return lca.blocks[bid2].b
+}
diff --git a/src/cmd/compile/internal/ssa/lca_test.go b/src/cmd/compile/internal/ssa/lca_test.go
new file mode 100644
index 0000000..8c8920c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/lca_test.go
@@ -0,0 +1,88 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "testing"
+
+func testLCAgen(t *testing.T, bg blockGen, size int) {
+ c := testConfig(t)
+ fun := c.Fun("entry", bg(size)...)
+ CheckFunc(fun.f)
+ if size == 4 {
+ t.Logf(fun.f.String())
+ }
+ lca1 := makeLCArange(fun.f)
+ lca2 := makeLCAeasy(fun.f)
+ for _, b := range fun.f.Blocks {
+ for _, c := range fun.f.Blocks {
+ l1 := lca1.find(b, c)
+ l2 := lca2.find(b, c)
+ if l1 != l2 {
+ t.Errorf("lca(%s,%s)=%s, want %s", b, c, l1, l2)
+ }
+ }
+ }
+}
+
+func TestLCALinear(t *testing.T) {
+ testLCAgen(t, genLinear, 10)
+ testLCAgen(t, genLinear, 100)
+}
+
+func TestLCAFwdBack(t *testing.T) {
+ testLCAgen(t, genFwdBack, 10)
+ testLCAgen(t, genFwdBack, 100)
+}
+
+func TestLCAManyPred(t *testing.T) {
+ testLCAgen(t, genManyPred, 10)
+ testLCAgen(t, genManyPred, 100)
+}
+
+func TestLCAMaxPred(t *testing.T) {
+ testLCAgen(t, genMaxPred, 10)
+ testLCAgen(t, genMaxPred, 100)
+}
+
+func TestLCAMaxPredValue(t *testing.T) {
+ testLCAgen(t, genMaxPredValue, 10)
+ testLCAgen(t, genMaxPredValue, 100)
+}
+
+// Simple implementation of LCA to compare against.
+type lcaEasy struct {
+ parent []*Block
+}
+
+func makeLCAeasy(f *Func) *lcaEasy {
+ return &lcaEasy{parent: dominators(f)}
+}
+
+func (lca *lcaEasy) find(a, b *Block) *Block {
+ da := lca.depth(a)
+ db := lca.depth(b)
+ for da > db {
+ da--
+ a = lca.parent[a.ID]
+ }
+ for da < db {
+ db--
+ b = lca.parent[b.ID]
+ }
+ for a != b {
+ a = lca.parent[a.ID]
+ b = lca.parent[b.ID]
+ }
+ return a
+}
+
+func (lca *lcaEasy) depth(b *Block) int {
+ n := 0
+ for b != nil {
+ b = lca.parent[b.ID]
+ n++
+ }
+ return n
+}
diff --git a/src/cmd/compile/internal/ssa/likelyadjust.go b/src/cmd/compile/internal/ssa/likelyadjust.go
new file mode 100644
index 0000000..49898a1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/likelyadjust.go
@@ -0,0 +1,575 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+)
+
+type loop struct {
+ header *Block // The header node of this (reducible) loop
+ outer *loop // loop containing this loop
+
+ // By default, children, exits, and depth are not initialized.
+ children []*loop // loops nested directly within this loop. Initialized by assembleChildren().
+ exits []*Block // exits records blocks reached by exits from this loop. Initialized by findExits().
+
+ // Next three fields used by regalloc and/or
+ // aid in computation of inner-ness and list of blocks.
+ nBlocks int32 // Number of blocks in this loop but not within inner loops
+ depth int16 // Nesting depth of the loop; 1 is outermost. Initialized by calculateDepths().
+ isInner bool // True if never discovered to contain a loop
+
+ // register allocation uses this.
+ containsUnavoidableCall bool // True if all paths through the loop have a call
+}
+
+// outerinner records that outer contains inner
+func (sdom SparseTree) outerinner(outer, inner *loop) {
+ // There could be other outer loops found in some random order,
+ // locate the new outer loop appropriately among them.
+
+ // Outer loop headers dominate inner loop headers.
+ // Use this to put the "new" "outer" loop in the right place.
+ oldouter := inner.outer
+ for oldouter != nil && sdom.isAncestor(outer.header, oldouter.header) {
+ inner = oldouter
+ oldouter = inner.outer
+ }
+ if outer == oldouter {
+ return
+ }
+ if oldouter != nil {
+ sdom.outerinner(oldouter, outer)
+ }
+
+ inner.outer = outer
+ outer.isInner = false
+}
+
+func checkContainsCall(bb *Block) bool {
+ if bb.Kind == BlockDefer {
+ return true
+ }
+ for _, v := range bb.Values {
+ if opcodeTable[v.Op].call {
+ return true
+ }
+ }
+ return false
+}
+
+type loopnest struct {
+ f *Func
+ b2l []*loop
+ po []*Block
+ sdom SparseTree
+ loops []*loop
+ hasIrreducible bool // TODO current treatment of irreducible loops is very flaky, if accurate loops are needed, must punt at function level.
+
+ // Record which of the lazily initialized fields have actually been initialized.
+ initializedChildren, initializedDepth, initializedExits bool
+}
+
+func min8(a, b int8) int8 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max8(a, b int8) int8 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+const (
+ blDEFAULT = 0
+ blMin = blDEFAULT
+ blCALL = 1
+ blRET = 2
+ blEXIT = 3
+)
+
+var bllikelies = [4]string{"default", "call", "ret", "exit"}
+
+func describePredictionAgrees(b *Block, prediction BranchPrediction) string {
+ s := ""
+ if prediction == b.Likely {
+ s = " (agrees with previous)"
+ } else if b.Likely != BranchUnknown {
+ s = " (disagrees with previous, ignored)"
+ }
+ return s
+}
+
+func describeBranchPrediction(f *Func, b *Block, likely, not int8, prediction BranchPrediction) {
+ f.Warnl(b.Pos, "Branch prediction rule %s < %s%s",
+ bllikelies[likely-blMin], bllikelies[not-blMin], describePredictionAgrees(b, prediction))
+}
+
+func likelyadjust(f *Func) {
+ // The values assigned to certain and local only matter
+ // in their rank order. 0 is default, more positive
+ // is less likely. It's possible to assign a negative
+ // unlikeliness (though not currently the case).
+ certain := make([]int8, f.NumBlocks()) // In the long run, all outcomes are at least this bad. Mainly for Exit
+ local := make([]int8, f.NumBlocks()) // for our immediate predecessors.
+
+ po := f.postorder()
+ nest := f.loopnest()
+ b2l := nest.b2l
+
+ for _, b := range po {
+ switch b.Kind {
+ case BlockExit:
+ // Very unlikely.
+ local[b.ID] = blEXIT
+ certain[b.ID] = blEXIT
+
+ // Ret, it depends.
+ case BlockRet, BlockRetJmp:
+ local[b.ID] = blRET
+ certain[b.ID] = blRET
+
+ // Calls. TODO not all calls are equal, names give useful clues.
+ // Any name-based heuristics are only relative to other calls,
+ // and less influential than inferences from loop structure.
+ case BlockDefer:
+ local[b.ID] = blCALL
+ certain[b.ID] = max8(blCALL, certain[b.Succs[0].b.ID])
+
+ default:
+ if len(b.Succs) == 1 {
+ certain[b.ID] = certain[b.Succs[0].b.ID]
+ } else if len(b.Succs) == 2 {
+ // If successor is an unvisited backedge, it's in loop and we don't care.
+ // Its default unlikely is also zero which is consistent with favoring loop edges.
+ // Notice that this can act like a "reset" on unlikeliness at loops; the
+ // default "everything returns" unlikeliness is erased by min with the
+ // backedge likeliness; however a loop with calls on every path will be
+ // tagged with call cost. Net effect is that loop entry is favored.
+ b0 := b.Succs[0].b.ID
+ b1 := b.Succs[1].b.ID
+ certain[b.ID] = min8(certain[b0], certain[b1])
+
+ l := b2l[b.ID]
+ l0 := b2l[b0]
+ l1 := b2l[b1]
+
+ prediction := b.Likely
+ // Weak loop heuristic -- both source and at least one dest are in loops,
+ // and there is a difference in the destinations.
+ // TODO what is best arrangement for nested loops?
+ if l != nil && l0 != l1 {
+ noprediction := false
+ switch {
+ // prefer not to exit loops
+ case l1 == nil:
+ prediction = BranchLikely
+ case l0 == nil:
+ prediction = BranchUnlikely
+
+ // prefer to stay in loop, not exit to outer.
+ case l == l0:
+ prediction = BranchLikely
+ case l == l1:
+ prediction = BranchUnlikely
+ default:
+ noprediction = true
+ }
+ if f.pass.debug > 0 && !noprediction {
+ f.Warnl(b.Pos, "Branch prediction rule stay in loop%s",
+ describePredictionAgrees(b, prediction))
+ }
+
+ } else {
+ // Lacking loop structure, fall back on heuristics.
+ if certain[b1] > certain[b0] {
+ prediction = BranchLikely
+ if f.pass.debug > 0 {
+ describeBranchPrediction(f, b, certain[b0], certain[b1], prediction)
+ }
+ } else if certain[b0] > certain[b1] {
+ prediction = BranchUnlikely
+ if f.pass.debug > 0 {
+ describeBranchPrediction(f, b, certain[b1], certain[b0], prediction)
+ }
+ } else if local[b1] > local[b0] {
+ prediction = BranchLikely
+ if f.pass.debug > 0 {
+ describeBranchPrediction(f, b, local[b0], local[b1], prediction)
+ }
+ } else if local[b0] > local[b1] {
+ prediction = BranchUnlikely
+ if f.pass.debug > 0 {
+ describeBranchPrediction(f, b, local[b1], local[b0], prediction)
+ }
+ }
+ }
+ if b.Likely != prediction {
+ if b.Likely == BranchUnknown {
+ b.Likely = prediction
+ }
+ }
+ }
+ // Look for calls in the block. If there is one, make this block unlikely.
+ for _, v := range b.Values {
+ if opcodeTable[v.Op].call {
+ local[b.ID] = blCALL
+ certain[b.ID] = max8(blCALL, certain[b.Succs[0].b.ID])
+ }
+ }
+ }
+ if f.pass.debug > 2 {
+ f.Warnl(b.Pos, "BP: Block %s, local=%s, certain=%s", b, bllikelies[local[b.ID]-blMin], bllikelies[certain[b.ID]-blMin])
+ }
+
+ }
+}
+
+func (l *loop) String() string {
+ return fmt.Sprintf("hdr:%s", l.header)
+}
+
+func (l *loop) LongString() string {
+ i := ""
+ o := ""
+ if l.isInner {
+ i = ", INNER"
+ }
+ if l.outer != nil {
+ o = ", o=" + l.outer.header.String()
+ }
+ return fmt.Sprintf("hdr:%s%s%s", l.header, i, o)
+}
+
+func (l *loop) isWithinOrEq(ll *loop) bool {
+ if ll == nil { // nil means whole program
+ return true
+ }
+ for ; l != nil; l = l.outer {
+ if l == ll {
+ return true
+ }
+ }
+ return false
+}
+
+// nearestOuterLoop returns the outer loop of loop most nearly
+// containing block b; the header must dominate b. loop itself
+// is assumed to not be that loop. For acceptable performance,
+// we're relying on loop nests to not be terribly deep.
+func (l *loop) nearestOuterLoop(sdom SparseTree, b *Block) *loop {
+ var o *loop
+ for o = l.outer; o != nil && !sdom.IsAncestorEq(o.header, b); o = o.outer {
+ }
+ return o
+}
+
+func loopnestfor(f *Func) *loopnest {
+ po := f.postorder()
+ sdom := f.Sdom()
+ b2l := make([]*loop, f.NumBlocks())
+ loops := make([]*loop, 0)
+ visited := make([]bool, f.NumBlocks())
+ sawIrred := false
+
+ if f.pass.debug > 2 {
+ fmt.Printf("loop finding in %s\n", f.Name)
+ }
+
+ // Reducible-loop-nest-finding.
+ for _, b := range po {
+ if f.pass != nil && f.pass.debug > 3 {
+ fmt.Printf("loop finding at %s\n", b)
+ }
+
+ var innermost *loop // innermost header reachable from this block
+
+ // IF any successor s of b is in a loop headed by h
+ // AND h dominates b
+ // THEN b is in the loop headed by h.
+ //
+ // Choose the first/innermost such h.
+ //
+ // IF s itself dominates b, then s is a loop header;
+ // and there may be more than one such s.
+ // Since there's at most 2 successors, the inner/outer ordering
+ // between them can be established with simple comparisons.
+ for _, e := range b.Succs {
+ bb := e.b
+ l := b2l[bb.ID]
+
+ if sdom.IsAncestorEq(bb, b) { // Found a loop header
+ if f.pass != nil && f.pass.debug > 4 {
+ fmt.Printf("loop finding succ %s of %s is header\n", bb.String(), b.String())
+ }
+ if l == nil {
+ l = &loop{header: bb, isInner: true}
+ loops = append(loops, l)
+ b2l[bb.ID] = l
+ }
+ } else if !visited[bb.ID] { // Found an irreducible loop
+ sawIrred = true
+ if f.pass != nil && f.pass.debug > 4 {
+ fmt.Printf("loop finding succ %s of %s is IRRED, in %s\n", bb.String(), b.String(), f.Name)
+ }
+ } else if l != nil {
+ // TODO handle case where l is irreducible.
+ // Perhaps a loop header is inherited.
+ // is there any loop containing our successor whose
+ // header dominates b?
+ if !sdom.IsAncestorEq(l.header, b) {
+ l = l.nearestOuterLoop(sdom, b)
+ }
+ if f.pass != nil && f.pass.debug > 4 {
+ if l == nil {
+ fmt.Printf("loop finding succ %s of %s has no loop\n", bb.String(), b.String())
+ } else {
+ fmt.Printf("loop finding succ %s of %s provides loop with header %s\n", bb.String(), b.String(), l.header.String())
+ }
+ }
+ } else { // No loop
+ if f.pass != nil && f.pass.debug > 4 {
+ fmt.Printf("loop finding succ %s of %s has no loop\n", bb.String(), b.String())
+ }
+
+ }
+
+ if l == nil || innermost == l {
+ continue
+ }
+
+ if innermost == nil {
+ innermost = l
+ continue
+ }
+
+ if sdom.isAncestor(innermost.header, l.header) {
+ sdom.outerinner(innermost, l)
+ innermost = l
+ } else if sdom.isAncestor(l.header, innermost.header) {
+ sdom.outerinner(l, innermost)
+ }
+ }
+
+ if innermost != nil {
+ b2l[b.ID] = innermost
+ innermost.nBlocks++
+ }
+ visited[b.ID] = true
+ }
+
+ ln := &loopnest{f: f, b2l: b2l, po: po, sdom: sdom, loops: loops, hasIrreducible: sawIrred}
+
+ // Calculate containsUnavoidableCall for regalloc
+ dominatedByCall := make([]bool, f.NumBlocks())
+ for _, b := range po {
+ if checkContainsCall(b) {
+ dominatedByCall[b.ID] = true
+ }
+ }
+ // Run dfs to find path through the loop that avoids all calls.
+ // Such path either escapes loop or return back to header.
+ // It isn't enough to have exit not dominated by any call, for example:
+ // ... some loop
+ // call1 call2
+ // \ /
+ // exit
+ // ...
+ // exit is not dominated by any call, but we don't have call-free path to it.
+ for _, l := range loops {
+ // Header contains call.
+ if dominatedByCall[l.header.ID] {
+ l.containsUnavoidableCall = true
+ continue
+ }
+ callfreepath := false
+ tovisit := make([]*Block, 0, len(l.header.Succs))
+ // Push all non-loop non-exit successors of header onto toVisit.
+ for _, s := range l.header.Succs {
+ nb := s.Block()
+ // This corresponds to loop with zero iterations.
+ if !l.iterationEnd(nb, b2l) {
+ tovisit = append(tovisit, nb)
+ }
+ }
+ for len(tovisit) > 0 {
+ cur := tovisit[len(tovisit)-1]
+ tovisit = tovisit[:len(tovisit)-1]
+ if dominatedByCall[cur.ID] {
+ continue
+ }
+ // Record visited in dominatedByCall.
+ dominatedByCall[cur.ID] = true
+ for _, s := range cur.Succs {
+ nb := s.Block()
+ if l.iterationEnd(nb, b2l) {
+ callfreepath = true
+ }
+ if !dominatedByCall[nb.ID] {
+ tovisit = append(tovisit, nb)
+ }
+
+ }
+ if callfreepath {
+ break
+ }
+ }
+ if !callfreepath {
+ l.containsUnavoidableCall = true
+ }
+ }
+
+ // Curious about the loopiness? "-d=ssa/likelyadjust/stats"
+ if f.pass != nil && f.pass.stats > 0 && len(loops) > 0 {
+ ln.assembleChildren()
+ ln.calculateDepths()
+ ln.findExits()
+
+ // Note stats for non-innermost loops are slightly flawed because
+ // they don't account for inner loop exits that span multiple levels.
+
+ for _, l := range loops {
+ x := len(l.exits)
+ cf := 0
+ if !l.containsUnavoidableCall {
+ cf = 1
+ }
+ inner := 0
+ if l.isInner {
+ inner++
+ }
+
+ f.LogStat("loopstats:",
+ l.depth, "depth", x, "exits",
+ inner, "is_inner", cf, "always_calls", l.nBlocks, "n_blocks")
+ }
+ }
+
+ if f.pass != nil && f.pass.debug > 1 && len(loops) > 0 {
+ fmt.Printf("Loops in %s:\n", f.Name)
+ for _, l := range loops {
+ fmt.Printf("%s, b=", l.LongString())
+ for _, b := range f.Blocks {
+ if b2l[b.ID] == l {
+ fmt.Printf(" %s", b)
+ }
+ }
+ fmt.Print("\n")
+ }
+ fmt.Printf("Nonloop blocks in %s:", f.Name)
+ for _, b := range f.Blocks {
+ if b2l[b.ID] == nil {
+ fmt.Printf(" %s", b)
+ }
+ }
+ fmt.Print("\n")
+ }
+ return ln
+}
+
+// assembleChildren initializes the children field of each
+// loop in the nest. Loop A is a child of loop B if A is
+// directly nested within B (based on the reducible-loops
+// detection above)
+func (ln *loopnest) assembleChildren() {
+ if ln.initializedChildren {
+ return
+ }
+ for _, l := range ln.loops {
+ if l.outer != nil {
+ l.outer.children = append(l.outer.children, l)
+ }
+ }
+ ln.initializedChildren = true
+}
+
+// calculateDepths uses the children field of loops
+// to determine the nesting depth (outer=1) of each
+// loop. This is helpful for finding exit edges.
+func (ln *loopnest) calculateDepths() {
+ if ln.initializedDepth {
+ return
+ }
+ ln.assembleChildren()
+ for _, l := range ln.loops {
+ if l.outer == nil {
+ l.setDepth(1)
+ }
+ }
+ ln.initializedDepth = true
+}
+
+// findExits uses loop depth information to find the
+// exits from a loop.
+func (ln *loopnest) findExits() {
+ if ln.initializedExits {
+ return
+ }
+ ln.calculateDepths()
+ b2l := ln.b2l
+ for _, b := range ln.po {
+ l := b2l[b.ID]
+ if l != nil && len(b.Succs) == 2 {
+ sl := b2l[b.Succs[0].b.ID]
+ if recordIfExit(l, sl, b.Succs[0].b) {
+ continue
+ }
+ sl = b2l[b.Succs[1].b.ID]
+ if recordIfExit(l, sl, b.Succs[1].b) {
+ continue
+ }
+ }
+ }
+ ln.initializedExits = true
+}
+
+// depth returns the loop nesting level of block b.
+func (ln *loopnest) depth(b ID) int16 {
+ if l := ln.b2l[b]; l != nil {
+ return l.depth
+ }
+ return 0
+}
+
+// recordIfExit checks sl (the loop containing b) to see if it
+// is outside of loop l, and if so, records b as an exit block
+// from l and returns true.
+func recordIfExit(l, sl *loop, b *Block) bool {
+ if sl != l {
+ if sl == nil || sl.depth <= l.depth {
+ l.exits = append(l.exits, b)
+ return true
+ }
+ // sl is not nil, and is deeper than l
+ // it's possible for this to be a goto into an irreducible loop made from gotos.
+ for sl.depth > l.depth {
+ sl = sl.outer
+ }
+ if sl != l {
+ l.exits = append(l.exits, b)
+ return true
+ }
+ }
+ return false
+}
+
+func (l *loop) setDepth(d int16) {
+ l.depth = d
+ for _, c := range l.children {
+ c.setDepth(d + 1)
+ }
+}
+
+// iterationEnd checks if block b ends iteration of loop l.
+// Ending iteration means either escaping to outer loop/code or
+// going back to header
+func (l *loop) iterationEnd(b *Block, b2l []*loop) bool {
+ return b == l.header || b2l[b.ID] == nil || (b2l[b.ID] != l && b2l[b.ID].depth <= l.depth)
+}
diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go
new file mode 100644
index 0000000..a333982
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/location.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+// A place that an ssa variable can reside.
+type Location interface {
+ String() string // name to use in assembly templates: AX, 16(SP), ...
+}
+
+// A Register is a machine register, like AX.
+// They are numbered densely from 0 (for each architecture).
+type Register struct {
+ num int32 // dense numbering
+ objNum int16 // register number from cmd/internal/obj/$ARCH
+ gcNum int16 // GC register map number (dense numbering of registers that can contain pointers)
+ name string
+}
+
+func (r *Register) String() string {
+ return r.name
+}
+
+// ObjNum returns the register number from cmd/internal/obj/$ARCH that
+// corresponds to this register.
+func (r *Register) ObjNum() int16 {
+ return r.objNum
+}
+
+// GCNum returns the runtime GC register index of r, or -1 if this
+// register can't contain pointers.
+func (r *Register) GCNum() int16 {
+ return r.gcNum
+}
+
+// A LocalSlot is a location in the stack frame, which identifies and stores
+// part or all of a PPARAM, PPARAMOUT, or PAUTO ONAME node.
+// It can represent a whole variable, part of a larger stack slot, or part of a
+// variable that has been decomposed into multiple stack slots.
+// As an example, a string could have the following configurations:
+//
+// stack layout LocalSlots
+//
+// Optimizations are disabled. s is on the stack and represented in its entirety.
+// [ ------- s string ---- ] { N: s, Type: string, Off: 0 }
+//
+// s was not decomposed, but the SSA operates on its parts individually, so
+// there is a LocalSlot for each of its fields that points into the single stack slot.
+// [ ------- s string ---- ] { N: s, Type: *uint8, Off: 0 }, {N: s, Type: int, Off: 8}
+//
+// s was decomposed. Each of its fields is in its own stack slot and has its own LocalSLot.
+// [ ptr *uint8 ] [ len int] { N: ptr, Type: *uint8, Off: 0, SplitOf: parent, SplitOffset: 0},
+// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8}
+// parent = &{N: s, Type: string}
+type LocalSlot struct {
+ N GCNode // an ONAME *gc.Node representing a stack location.
+ Type *types.Type // type of slot
+ Off int64 // offset of slot in N
+
+ SplitOf *LocalSlot // slot is a decomposition of SplitOf
+ SplitOffset int64 // .. at this offset.
+}
+
+func (s LocalSlot) String() string {
+ if s.Off == 0 {
+ return fmt.Sprintf("%v[%v]", s.N, s.Type)
+ }
+ return fmt.Sprintf("%v+%d[%v]", s.N, s.Off, s.Type)
+}
+
+type LocPair [2]Location
+
+func (t LocPair) String() string {
+ n0, n1 := "nil", "nil"
+ if t[0] != nil {
+ n0 = t[0].String()
+ }
+ if t[1] != nil {
+ n1 = t[1].String()
+ }
+ return fmt.Sprintf("<%s,%s>", n0, n1)
+}
diff --git a/src/cmd/compile/internal/ssa/loopbce.go b/src/cmd/compile/internal/ssa/loopbce.go
new file mode 100644
index 0000000..5a4bc1d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/loopbce.go
@@ -0,0 +1,346 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "math"
+)
+
+type indVarFlags uint8
+
+const (
+ indVarMinExc indVarFlags = 1 << iota // minimum value is exclusive (default: inclusive)
+ indVarMaxInc // maximum value is inclusive (default: exclusive)
+)
+
+type indVar struct {
+ ind *Value // induction variable
+ min *Value // minimum value, inclusive/exclusive depends on flags
+ max *Value // maximum value, inclusive/exclusive depends on flags
+ entry *Block // entry block in the loop.
+ flags indVarFlags
+ // Invariant: for all blocks strictly dominated by entry:
+ // min <= ind < max [if flags == 0]
+ // min < ind < max [if flags == indVarMinExc]
+ // min <= ind <= max [if flags == indVarMaxInc]
+ // min < ind <= max [if flags == indVarMinExc|indVarMaxInc]
+}
+
+// parseIndVar checks whether the SSA value passed as argument is a valid induction
+// variable, and, if so, extracts:
+// * the minimum bound
+// * the increment value
+// * the "next" value (SSA value that is Phi'd into the induction variable every loop)
+// Currently, we detect induction variables that match (Phi min nxt),
+// with nxt being (Add inc ind).
+// If it can't parse the induction variable correctly, it returns (nil, nil, nil).
+func parseIndVar(ind *Value) (min, inc, nxt *Value) {
+ if ind.Op != OpPhi {
+ return
+ }
+
+ if n := ind.Args[0]; n.Op == OpAdd64 && (n.Args[0] == ind || n.Args[1] == ind) {
+ min, nxt = ind.Args[1], n
+ } else if n := ind.Args[1]; n.Op == OpAdd64 && (n.Args[0] == ind || n.Args[1] == ind) {
+ min, nxt = ind.Args[0], n
+ } else {
+ // Not a recognized induction variable.
+ return
+ }
+
+ if nxt.Args[0] == ind { // nxt = ind + inc
+ inc = nxt.Args[1]
+ } else if nxt.Args[1] == ind { // nxt = inc + ind
+ inc = nxt.Args[0]
+ } else {
+ panic("unreachable") // one of the cases must be true from the above.
+ }
+
+ return
+}
+
+// findIndVar finds induction variables in a function.
+//
+// Look for variables and blocks that satisfy the following
+//
+// loop:
+// ind = (Phi min nxt),
+// if ind < max
+// then goto enter_loop
+// else goto exit_loop
+//
+// enter_loop:
+// do something
+// nxt = inc + ind
+// goto loop
+//
+// exit_loop:
+//
+//
+// TODO: handle 32 bit operations
+func findIndVar(f *Func) []indVar {
+ var iv []indVar
+ sdom := f.Sdom()
+
+ for _, b := range f.Blocks {
+ if b.Kind != BlockIf || len(b.Preds) != 2 {
+ continue
+ }
+
+ var flags indVarFlags
+ var ind, max *Value // induction, and maximum
+
+ // Check thet the control if it either ind </<= max or max >/>= ind.
+ // TODO: Handle 32-bit comparisons.
+ // TODO: Handle unsigned comparisons?
+ c := b.Controls[0]
+ switch c.Op {
+ case OpLeq64:
+ flags |= indVarMaxInc
+ fallthrough
+ case OpLess64:
+ ind, max = c.Args[0], c.Args[1]
+ default:
+ continue
+ }
+
+ // See if this is really an induction variable
+ less := true
+ min, inc, nxt := parseIndVar(ind)
+ if min == nil {
+ // We failed to parse the induction variable. Before punting, we want to check
+ // whether the control op was written with arguments in non-idiomatic order,
+ // so that we believe being "max" (the upper bound) is actually the induction
+ // variable itself. This would happen for code like:
+ // for i := 0; len(n) > i; i++
+ min, inc, nxt = parseIndVar(max)
+ if min == nil {
+ // No recognied induction variable on either operand
+ continue
+ }
+
+ // Ok, the arguments were reversed. Swap them, and remember that we're
+ // looking at a ind >/>= loop (so the induction must be decrementing).
+ ind, max = max, ind
+ less = false
+ }
+
+ // Expect the increment to be a nonzero constant.
+ if inc.Op != OpConst64 {
+ continue
+ }
+ step := inc.AuxInt
+ if step == 0 {
+ continue
+ }
+
+ // Increment sign must match comparison direction.
+ // When incrementing, the termination comparison must be ind </<= max.
+ // When decrementing, the termination comparison must be ind >/>= max.
+ // See issue 26116.
+ if step > 0 && !less {
+ continue
+ }
+ if step < 0 && less {
+ continue
+ }
+
+ // If the increment is negative, swap min/max and their flags
+ if step < 0 {
+ min, max = max, min
+ oldf := flags
+ flags = indVarMaxInc
+ if oldf&indVarMaxInc == 0 {
+ flags |= indVarMinExc
+ }
+ step = -step
+ }
+
+ // Up to now we extracted the induction variable (ind),
+ // the increment delta (inc), the temporary sum (nxt),
+ // the mininum value (min) and the maximum value (max).
+ //
+ // We also know that ind has the form (Phi min nxt) where
+ // nxt is (Add inc nxt) which means: 1) inc dominates nxt
+ // and 2) there is a loop starting at inc and containing nxt.
+ //
+ // We need to prove that the induction variable is incremented
+ // only when it's smaller than the maximum value.
+ // Two conditions must happen listed below to accept ind
+ // as an induction variable.
+
+ // First condition: loop entry has a single predecessor, which
+ // is the header block. This implies that b.Succs[0] is
+ // reached iff ind < max.
+ if len(b.Succs[0].b.Preds) != 1 {
+ // b.Succs[1] must exit the loop.
+ continue
+ }
+
+ // Second condition: b.Succs[0] dominates nxt so that
+ // nxt is computed when inc < max, meaning nxt <= max.
+ if !sdom.IsAncestorEq(b.Succs[0].b, nxt.Block) {
+ // inc+ind can only be reached through the branch that enters the loop.
+ continue
+ }
+
+ // We can only guarantee that the loop runs within limits of induction variable
+ // if (one of)
+ // (1) the increment is ±1
+ // (2) the limits are constants
+ // (3) loop is of the form k0 upto Known_not_negative-k inclusive, step <= k
+ // (4) loop is of the form k0 upto Known_not_negative-k exclusive, step <= k+1
+ // (5) loop is of the form Known_not_negative downto k0, minint+step < k0
+ if step > 1 {
+ ok := false
+ if min.Op == OpConst64 && max.Op == OpConst64 {
+ if max.AuxInt > min.AuxInt && max.AuxInt%step == min.AuxInt%step { // handle overflow
+ ok = true
+ }
+ }
+ // Handle induction variables of these forms.
+ // KNN is known-not-negative.
+ // SIGNED ARITHMETIC ONLY. (see switch on c above)
+ // Possibilities for KNN are len and cap; perhaps we can infer others.
+ // for i := 0; i <= KNN-k ; i += k
+ // for i := 0; i < KNN-(k-1); i += k
+ // Also handle decreasing.
+
+ // "Proof" copied from https://go-review.googlesource.com/c/go/+/104041/10/src/cmd/compile/internal/ssa/loopbce.go#164
+ //
+ // In the case of
+ // // PC is Positive Constant
+ // L := len(A)-PC
+ // for i := 0; i < L; i = i+PC
+ //
+ // we know:
+ //
+ // 0 + PC does not over/underflow.
+ // len(A)-PC does not over/underflow
+ // maximum value for L is MaxInt-PC
+ // i < L <= MaxInt-PC means i + PC < MaxInt hence no overflow.
+
+ // To match in SSA:
+ // if (a) min.Op == OpConst64(k0)
+ // and (b) k0 >= MININT + step
+ // and (c) max.Op == OpSubtract(Op{StringLen,SliceLen,SliceCap}, k)
+ // or (c) max.Op == OpAdd(Op{StringLen,SliceLen,SliceCap}, -k)
+ // or (c) max.Op == Op{StringLen,SliceLen,SliceCap}
+ // and (d) if upto loop, require indVarMaxInc && step <= k or !indVarMaxInc && step-1 <= k
+
+ if min.Op == OpConst64 && min.AuxInt >= step+math.MinInt64 {
+ knn := max
+ k := int64(0)
+ var kArg *Value
+
+ switch max.Op {
+ case OpSub64:
+ knn = max.Args[0]
+ kArg = max.Args[1]
+
+ case OpAdd64:
+ knn = max.Args[0]
+ kArg = max.Args[1]
+ if knn.Op == OpConst64 {
+ knn, kArg = kArg, knn
+ }
+ }
+ switch knn.Op {
+ case OpSliceLen, OpStringLen, OpSliceCap:
+ default:
+ knn = nil
+ }
+
+ if kArg != nil && kArg.Op == OpConst64 {
+ k = kArg.AuxInt
+ if max.Op == OpAdd64 {
+ k = -k
+ }
+ }
+ if k >= 0 && knn != nil {
+ if inc.AuxInt > 0 { // increasing iteration
+ // The concern for the relation between step and k is to ensure that iv never exceeds knn
+ // i.e., iv < knn-(K-1) ==> iv + K <= knn; iv <= knn-K ==> iv +K < knn
+ if step <= k || flags&indVarMaxInc == 0 && step-1 == k {
+ ok = true
+ }
+ } else { // decreasing iteration
+ // Will be decrementing from max towards min; max is knn-k; will only attempt decrement if
+ // knn-k >[=] min; underflow is only a concern if min-step is not smaller than min.
+ // This all assumes signed integer arithmetic
+ // This is already assured by the test above: min.AuxInt >= step+math.MinInt64
+ ok = true
+ }
+ }
+ }
+
+ // TODO: other unrolling idioms
+ // for i := 0; i < KNN - KNN % k ; i += k
+ // for i := 0; i < KNN&^(k-1) ; i += k // k a power of 2
+ // for i := 0; i < KNN&(-k) ; i += k // k a power of 2
+
+ if !ok {
+ continue
+ }
+ }
+
+ if f.pass.debug >= 1 {
+ printIndVar(b, ind, min, max, step, flags)
+ }
+
+ iv = append(iv, indVar{
+ ind: ind,
+ min: min,
+ max: max,
+ entry: b.Succs[0].b,
+ flags: flags,
+ })
+ b.Logf("found induction variable %v (inc = %v, min = %v, max = %v)\n", ind, inc, min, max)
+ }
+
+ return iv
+}
+
+func dropAdd64(v *Value) (*Value, int64) {
+ if v.Op == OpAdd64 && v.Args[0].Op == OpConst64 {
+ return v.Args[1], v.Args[0].AuxInt
+ }
+ if v.Op == OpAdd64 && v.Args[1].Op == OpConst64 {
+ return v.Args[0], v.Args[1].AuxInt
+ }
+ return v, 0
+}
+
+func printIndVar(b *Block, i, min, max *Value, inc int64, flags indVarFlags) {
+ mb1, mb2 := "[", "]"
+ if flags&indVarMinExc != 0 {
+ mb1 = "("
+ }
+ if flags&indVarMaxInc == 0 {
+ mb2 = ")"
+ }
+
+ mlim1, mlim2 := fmt.Sprint(min.AuxInt), fmt.Sprint(max.AuxInt)
+ if !min.isGenericIntConst() {
+ if b.Func.pass.debug >= 2 {
+ mlim1 = fmt.Sprint(min)
+ } else {
+ mlim1 = "?"
+ }
+ }
+ if !max.isGenericIntConst() {
+ if b.Func.pass.debug >= 2 {
+ mlim2 = fmt.Sprint(max)
+ } else {
+ mlim2 = "?"
+ }
+ }
+ extra := ""
+ if b.Func.pass.debug >= 2 {
+ extra = fmt.Sprintf(" (%s)", i)
+ }
+ b.Func.Warnl(b.Pos, "Induction variable: limits %v%v,%v%v, increment %d%s", mb1, mlim1, mlim2, mb2, inc, extra)
+}
diff --git a/src/cmd/compile/internal/ssa/loopreschedchecks.go b/src/cmd/compile/internal/ssa/loopreschedchecks.go
new file mode 100644
index 0000000..9c73bcf
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/loopreschedchecks.go
@@ -0,0 +1,499 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+// an edgeMem records a backedge, together with the memory
+// phi functions at the target of the backedge that must
+// be updated when a rescheduling check replaces the backedge.
+type edgeMem struct {
+ e Edge
+ m *Value // phi for memory at dest of e
+}
+
+// a rewriteTarget is a value-argindex pair indicating
+// where a rewrite is applied. Note that this is for values,
+// not for block controls, because block controls are not targets
+// for the rewrites performed in inserting rescheduling checks.
+type rewriteTarget struct {
+ v *Value
+ i int
+}
+
+type rewrite struct {
+ before, after *Value // before is the expected value before rewrite, after is the new value installed.
+ rewrites []rewriteTarget // all the targets for this rewrite.
+}
+
+func (r *rewrite) String() string {
+ s := "\n\tbefore=" + r.before.String() + ", after=" + r.after.String()
+ for _, rw := range r.rewrites {
+ s += ", (i=" + fmt.Sprint(rw.i) + ", v=" + rw.v.LongString() + ")"
+ }
+ s += "\n"
+ return s
+}
+
+// insertLoopReschedChecks inserts rescheduling checks on loop backedges.
+func insertLoopReschedChecks(f *Func) {
+ // TODO: when split information is recorded in export data, insert checks only on backedges that can be reached on a split-call-free path.
+
+ // Loop reschedule checks compare the stack pointer with
+ // the per-g stack bound. If the pointer appears invalid,
+ // that means a reschedule check is needed.
+ //
+ // Steps:
+ // 1. locate backedges.
+ // 2. Record memory definitions at block end so that
+ // the SSA graph for mem can be properly modified.
+ // 3. Ensure that phi functions that will-be-needed for mem
+ // are present in the graph, initially with trivial inputs.
+ // 4. Record all to-be-modified uses of mem;
+ // apply modifications (split into two steps to simplify and
+ // avoided nagging order-dependencies).
+ // 5. Rewrite backedges to include reschedule check,
+ // and modify destination phi function appropriately with new
+ // definitions for mem.
+
+ if f.NoSplit { // nosplit functions don't reschedule.
+ return
+ }
+
+ backedges := backedges(f)
+ if len(backedges) == 0 { // no backedges means no rescheduling checks.
+ return
+ }
+
+ lastMems := findLastMems(f)
+
+ idom := f.Idom()
+ po := f.postorder()
+ // The ordering in the dominator tree matters; it's important that
+ // the walk of the dominator tree also be a preorder (i.e., a node is
+ // visited only after all its non-backedge predecessors have been visited).
+ sdom := newSparseOrderedTree(f, idom, po)
+
+ if f.pass.debug > 1 {
+ fmt.Printf("before %s = %s\n", f.Name, sdom.treestructure(f.Entry))
+ }
+
+ tofixBackedges := []edgeMem{}
+
+ for _, e := range backedges { // TODO: could filter here by calls in loops, if declared and inferred nosplit are recorded in export data.
+ tofixBackedges = append(tofixBackedges, edgeMem{e, nil})
+ }
+
+ // It's possible that there is no memory state (no global/pointer loads/stores or calls)
+ if lastMems[f.Entry.ID] == nil {
+ lastMems[f.Entry.ID] = f.Entry.NewValue0(f.Entry.Pos, OpInitMem, types.TypeMem)
+ }
+
+ memDefsAtBlockEnds := make([]*Value, f.NumBlocks()) // For each block, the mem def seen at its bottom. Could be from earlier block.
+
+ // Propagate last mem definitions forward through successor blocks.
+ for i := len(po) - 1; i >= 0; i-- {
+ b := po[i]
+ mem := lastMems[b.ID]
+ for j := 0; mem == nil; j++ { // if there's no def, then there's no phi, so the visible mem is identical in all predecessors.
+ // loop because there might be backedges that haven't been visited yet.
+ mem = memDefsAtBlockEnds[b.Preds[j].b.ID]
+ }
+ memDefsAtBlockEnds[b.ID] = mem
+ if f.pass.debug > 2 {
+ fmt.Printf("memDefsAtBlockEnds[%s] = %s\n", b, mem)
+ }
+ }
+
+ // Maps from block to newly-inserted phi function in block.
+ newmemphis := make(map[*Block]rewrite)
+
+ // Insert phi functions as necessary for future changes to flow graph.
+ for i, emc := range tofixBackedges {
+ e := emc.e
+ h := e.b
+
+ // find the phi function for the memory input at "h", if there is one.
+ var headerMemPhi *Value // look for header mem phi
+
+ for _, v := range h.Values {
+ if v.Op == OpPhi && v.Type.IsMemory() {
+ headerMemPhi = v
+ }
+ }
+
+ if headerMemPhi == nil {
+ // if the header is nil, make a trivial phi from the dominator
+ mem0 := memDefsAtBlockEnds[idom[h.ID].ID]
+ headerMemPhi = newPhiFor(h, mem0)
+ newmemphis[h] = rewrite{before: mem0, after: headerMemPhi}
+ addDFphis(mem0, h, h, f, memDefsAtBlockEnds, newmemphis, sdom)
+
+ }
+ tofixBackedges[i].m = headerMemPhi
+
+ }
+ if f.pass.debug > 0 {
+ for b, r := range newmemphis {
+ fmt.Printf("before b=%s, rewrite=%s\n", b, r.String())
+ }
+ }
+
+ // dfPhiTargets notes inputs to phis in dominance frontiers that should not
+ // be rewritten as part of the dominated children of some outer rewrite.
+ dfPhiTargets := make(map[rewriteTarget]bool)
+
+ rewriteNewPhis(f.Entry, f.Entry, f, memDefsAtBlockEnds, newmemphis, dfPhiTargets, sdom)
+
+ if f.pass.debug > 0 {
+ for b, r := range newmemphis {
+ fmt.Printf("after b=%s, rewrite=%s\n", b, r.String())
+ }
+ }
+
+ // Apply collected rewrites.
+ for _, r := range newmemphis {
+ for _, rw := range r.rewrites {
+ rw.v.SetArg(rw.i, r.after)
+ }
+ }
+
+ // Rewrite backedges to include reschedule checks.
+ for _, emc := range tofixBackedges {
+ e := emc.e
+ headerMemPhi := emc.m
+ h := e.b
+ i := e.i
+ p := h.Preds[i]
+ bb := p.b
+ mem0 := headerMemPhi.Args[i]
+ // bb e->p h,
+ // Because we're going to insert a rare-call, make sure the
+ // looping edge still looks likely.
+ likely := BranchLikely
+ if p.i != 0 {
+ likely = BranchUnlikely
+ }
+ if bb.Kind != BlockPlain { // backedges can be unconditional. e.g., if x { something; continue }
+ bb.Likely = likely
+ }
+
+ // rewrite edge to include reschedule check
+ // existing edges:
+ //
+ // bb.Succs[p.i] == Edge{h, i}
+ // h.Preds[i] == p == Edge{bb,p.i}
+ //
+ // new block(s):
+ // test:
+ // if sp < g.limit { goto sched }
+ // goto join
+ // sched:
+ // mem1 := call resched (mem0)
+ // goto join
+ // join:
+ // mem2 := phi(mem0, mem1)
+ // goto h
+ //
+ // and correct arg i of headerMemPhi and headerCtrPhi
+ //
+ // EXCEPT: join block containing only phi functions is bad
+ // for the register allocator. Therefore, there is no
+ // join, and branches targeting join must instead target
+ // the header, and the other phi functions within header are
+ // adjusted for the additional input.
+
+ test := f.NewBlock(BlockIf)
+ sched := f.NewBlock(BlockPlain)
+
+ test.Pos = bb.Pos
+ sched.Pos = bb.Pos
+
+ // if sp < g.limit { goto sched }
+ // goto header
+
+ cfgtypes := &f.Config.Types
+ pt := cfgtypes.Uintptr
+ g := test.NewValue1(bb.Pos, OpGetG, pt, mem0)
+ sp := test.NewValue0(bb.Pos, OpSP, pt)
+ cmpOp := OpLess64U
+ if pt.Size() == 4 {
+ cmpOp = OpLess32U
+ }
+ limaddr := test.NewValue1I(bb.Pos, OpOffPtr, pt, 2*pt.Size(), g)
+ lim := test.NewValue2(bb.Pos, OpLoad, pt, limaddr, mem0)
+ cmp := test.NewValue2(bb.Pos, cmpOp, cfgtypes.Bool, sp, lim)
+ test.SetControl(cmp)
+
+ // if true, goto sched
+ test.AddEdgeTo(sched)
+
+ // if false, rewrite edge to header.
+ // do NOT remove+add, because that will perturb all the other phi functions
+ // as well as messing up other edges to the header.
+ test.Succs = append(test.Succs, Edge{h, i})
+ h.Preds[i] = Edge{test, 1}
+ headerMemPhi.SetArg(i, mem0)
+
+ test.Likely = BranchUnlikely
+
+ // sched:
+ // mem1 := call resched (mem0)
+ // goto header
+ resched := f.fe.Syslook("goschedguarded")
+ mem1 := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeMem, StaticAuxCall(resched, nil, nil), mem0)
+ sched.AddEdgeTo(h)
+ headerMemPhi.AddArg(mem1)
+
+ bb.Succs[p.i] = Edge{test, 0}
+ test.Preds = append(test.Preds, Edge{bb, p.i})
+
+ // Must correct all the other phi functions in the header for new incoming edge.
+ // Except for mem phis, it will be the same value seen on the original
+ // backedge at index i.
+ for _, v := range h.Values {
+ if v.Op == OpPhi && v != headerMemPhi {
+ v.AddArg(v.Args[i])
+ }
+ }
+ }
+
+ f.invalidateCFG()
+
+ if f.pass.debug > 1 {
+ sdom = newSparseTree(f, f.Idom())
+ fmt.Printf("after %s = %s\n", f.Name, sdom.treestructure(f.Entry))
+ }
+}
+
+// newPhiFor inserts a new Phi function into b,
+// with all inputs set to v.
+func newPhiFor(b *Block, v *Value) *Value {
+ phiV := b.NewValue0(b.Pos, OpPhi, v.Type)
+
+ for range b.Preds {
+ phiV.AddArg(v)
+ }
+ return phiV
+}
+
+// rewriteNewPhis updates newphis[h] to record all places where the new phi function inserted
+// in block h will replace a previous definition. Block b is the block currently being processed;
+// if b has its own phi definition then it takes the place of h.
+// defsForUses provides information about other definitions of the variable that are present
+// (and if nil, indicates that the variable is no longer live)
+// sdom must yield a preorder of the flow graph if recursively walked, root-to-children.
+// The result of newSparseOrderedTree with order supplied by a dfs-postorder satisfies this
+// requirement.
+func rewriteNewPhis(h, b *Block, f *Func, defsForUses []*Value, newphis map[*Block]rewrite, dfPhiTargets map[rewriteTarget]bool, sdom SparseTree) {
+ // If b is a block with a new phi, then a new rewrite applies below it in the dominator tree.
+ if _, ok := newphis[b]; ok {
+ h = b
+ }
+ change := newphis[h]
+ x := change.before
+ y := change.after
+
+ // Apply rewrites to this block
+ if x != nil { // don't waste time on the common case of no definition.
+ p := &change.rewrites
+ for _, v := range b.Values {
+ if v == y { // don't rewrite self -- phi inputs are handled below.
+ continue
+ }
+ for i, w := range v.Args {
+ if w != x {
+ continue
+ }
+ tgt := rewriteTarget{v, i}
+
+ // It's possible dominated control flow will rewrite this instead.
+ // Visiting in preorder (a property of how sdom was constructed)
+ // ensures that these are seen in the proper order.
+ if dfPhiTargets[tgt] {
+ continue
+ }
+ *p = append(*p, tgt)
+ if f.pass.debug > 1 {
+ fmt.Printf("added block target for h=%v, b=%v, x=%v, y=%v, tgt.v=%s, tgt.i=%d\n",
+ h, b, x, y, v, i)
+ }
+ }
+ }
+
+ // Rewrite appropriate inputs of phis reached in successors
+ // in dominance frontier, self, and dominated.
+ // If the variable def reaching uses in b is itself defined in b, then the new phi function
+ // does not reach the successors of b. (This assumes a bit about the structure of the
+ // phi use-def graph, but it's true for memory.)
+ if dfu := defsForUses[b.ID]; dfu != nil && dfu.Block != b {
+ for _, e := range b.Succs {
+ s := e.b
+
+ for _, v := range s.Values {
+ if v.Op == OpPhi && v.Args[e.i] == x {
+ tgt := rewriteTarget{v, e.i}
+ *p = append(*p, tgt)
+ dfPhiTargets[tgt] = true
+ if f.pass.debug > 1 {
+ fmt.Printf("added phi target for h=%v, b=%v, s=%v, x=%v, y=%v, tgt.v=%s, tgt.i=%d\n",
+ h, b, s, x, y, v.LongString(), e.i)
+ }
+ break
+ }
+ }
+ }
+ }
+ newphis[h] = change
+ }
+
+ for c := sdom[b.ID].child; c != nil; c = sdom[c.ID].sibling {
+ rewriteNewPhis(h, c, f, defsForUses, newphis, dfPhiTargets, sdom) // TODO: convert to explicit stack from recursion.
+ }
+}
+
+// addDFphis creates new trivial phis that are necessary to correctly reflect (within SSA)
+// a new definition for variable "x" inserted at h (usually but not necessarily a phi).
+// These new phis can only occur at the dominance frontier of h; block s is in the dominance
+// frontier of h if h does not strictly dominate s and if s is a successor of a block b where
+// either b = h or h strictly dominates b.
+// These newly created phis are themselves new definitions that may require addition of their
+// own trivial phi functions in their own dominance frontier, and this is handled recursively.
+func addDFphis(x *Value, h, b *Block, f *Func, defForUses []*Value, newphis map[*Block]rewrite, sdom SparseTree) {
+ oldv := defForUses[b.ID]
+ if oldv != x { // either a new definition replacing x, or nil if it is proven that there are no uses reachable from b
+ return
+ }
+ idom := f.Idom()
+outer:
+ for _, e := range b.Succs {
+ s := e.b
+ // check phi functions in the dominance frontier
+ if sdom.isAncestor(h, s) {
+ continue // h dominates s, successor of b, therefore s is not in the frontier.
+ }
+ if _, ok := newphis[s]; ok {
+ continue // successor s of b already has a new phi function, so there is no need to add another.
+ }
+ if x != nil {
+ for _, v := range s.Values {
+ if v.Op == OpPhi && v.Args[e.i] == x {
+ continue outer // successor s of b has an old phi function, so there is no need to add another.
+ }
+ }
+ }
+
+ old := defForUses[idom[s.ID].ID] // new phi function is correct-but-redundant, combining value "old" on all inputs.
+ headerPhi := newPhiFor(s, old)
+ // the new phi will replace "old" in block s and all blocks dominated by s.
+ newphis[s] = rewrite{before: old, after: headerPhi} // record new phi, to have inputs labeled "old" rewritten to "headerPhi"
+ addDFphis(old, s, s, f, defForUses, newphis, sdom) // the new definition may also create new phi functions.
+ }
+ for c := sdom[b.ID].child; c != nil; c = sdom[c.ID].sibling {
+ addDFphis(x, h, c, f, defForUses, newphis, sdom) // TODO: convert to explicit stack from recursion.
+ }
+}
+
+// findLastMems maps block ids to last memory-output op in a block, if any
+func findLastMems(f *Func) []*Value {
+
+ var stores []*Value
+ lastMems := make([]*Value, f.NumBlocks())
+ storeUse := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(storeUse)
+ for _, b := range f.Blocks {
+ // Find all the stores in this block. Categorize their uses:
+ // storeUse contains stores which are used by a subsequent store.
+ storeUse.clear()
+ stores = stores[:0]
+ var memPhi *Value
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ if v.Type.IsMemory() {
+ memPhi = v
+ }
+ continue
+ }
+ if v.Type.IsMemory() {
+ stores = append(stores, v)
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ storeUse.add(a.ID)
+ }
+ }
+ }
+ }
+ if len(stores) == 0 {
+ lastMems[b.ID] = memPhi
+ continue
+ }
+
+ // find last store in the block
+ var last *Value
+ for _, v := range stores {
+ if storeUse.contains(v.ID) {
+ continue
+ }
+ if last != nil {
+ b.Fatalf("two final stores - simultaneous live stores %s %s", last, v)
+ }
+ last = v
+ }
+ if last == nil {
+ b.Fatalf("no last store found - cycle?")
+ }
+ lastMems[b.ID] = last
+ }
+ return lastMems
+}
+
+// mark values
+type markKind uint8
+
+const (
+ notFound markKind = iota // block has not been discovered yet
+ notExplored // discovered and in queue, outedges not processed yet
+ explored // discovered and in queue, outedges processed
+ done // all done, in output ordering
+)
+
+type backedgesState struct {
+ b *Block
+ i int
+}
+
+// backedges returns a slice of successor edges that are back
+// edges. For reducible loops, edge.b is the header.
+func backedges(f *Func) []Edge {
+ edges := []Edge{}
+ mark := make([]markKind, f.NumBlocks())
+ stack := []backedgesState{}
+
+ mark[f.Entry.ID] = notExplored
+ stack = append(stack, backedgesState{f.Entry, 0})
+
+ for len(stack) > 0 {
+ l := len(stack)
+ x := stack[l-1]
+ if x.i < len(x.b.Succs) {
+ e := x.b.Succs[x.i]
+ stack[l-1].i++
+ s := e.b
+ if mark[s.ID] == notFound {
+ mark[s.ID] = notExplored
+ stack = append(stack, backedgesState{s, 0})
+ } else if mark[s.ID] == notExplored {
+ edges = append(edges, e)
+ }
+ } else {
+ mark[x.b.ID] = done
+ stack = stack[0 : l-1]
+ }
+ }
+ return edges
+}
diff --git a/src/cmd/compile/internal/ssa/looprotate.go b/src/cmd/compile/internal/ssa/looprotate.go
new file mode 100644
index 0000000..2e5e421
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/looprotate.go
@@ -0,0 +1,106 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// loopRotate converts loops with a check-loop-condition-at-beginning
+// to loops with a check-loop-condition-at-end.
+// This helps loops avoid extra unnecessary jumps.
+//
+// loop:
+// CMPQ ...
+// JGE exit
+// ...
+// JMP loop
+// exit:
+//
+// JMP entry
+// loop:
+// ...
+// entry:
+// CMPQ ...
+// JLT loop
+func loopRotate(f *Func) {
+ loopnest := f.loopnest()
+ if loopnest.hasIrreducible {
+ return
+ }
+ if len(loopnest.loops) == 0 {
+ return
+ }
+
+ idToIdx := make([]int, f.NumBlocks())
+ for i, b := range f.Blocks {
+ idToIdx[b.ID] = i
+ }
+
+ // Set of blocks we're moving, by ID.
+ move := map[ID]struct{}{}
+
+ // Map from block ID to the moving blocks that should
+ // come right after it.
+ after := map[ID][]*Block{}
+
+ // Check each loop header and decide if we want to move it.
+ for _, loop := range loopnest.loops {
+ b := loop.header
+ var p *Block // b's in-loop predecessor
+ for _, e := range b.Preds {
+ if e.b.Kind != BlockPlain {
+ continue
+ }
+ if loopnest.b2l[e.b.ID] != loop {
+ continue
+ }
+ p = e.b
+ }
+ if p == nil || p == b {
+ continue
+ }
+ after[p.ID] = []*Block{b}
+ for {
+ nextIdx := idToIdx[b.ID] + 1
+ if nextIdx >= len(f.Blocks) { // reached end of function (maybe impossible?)
+ break
+ }
+ nextb := f.Blocks[nextIdx]
+ if nextb == p { // original loop predecessor is next
+ break
+ }
+ if loopnest.b2l[nextb.ID] != loop { // about to leave loop
+ break
+ }
+ after[p.ID] = append(after[p.ID], nextb)
+ b = nextb
+ }
+
+ // Place b after p.
+ for _, b := range after[p.ID] {
+ move[b.ID] = struct{}{}
+ }
+ }
+
+ // Move blocks to their destinations in a single pass.
+ // We rely here on the fact that loop headers must come
+ // before the rest of the loop. And that relies on the
+ // fact that we only identify reducible loops.
+ j := 0
+ for i, b := range f.Blocks {
+ if _, ok := move[b.ID]; ok {
+ continue
+ }
+ f.Blocks[j] = b
+ j++
+ for _, a := range after[b.ID] {
+ if j > i {
+ f.Fatalf("head before tail in loop %s", b)
+ }
+ f.Blocks[j] = a
+ j++
+ }
+ }
+ if j != len(f.Blocks) {
+ f.Fatalf("bad reordering in looprotate")
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/lower.go b/src/cmd/compile/internal/ssa/lower.go
new file mode 100644
index 0000000..f332b2e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/lower.go
@@ -0,0 +1,39 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// convert to machine-dependent ops
+func lower(f *Func) {
+ // repeat rewrites until we find no more rewrites
+ applyRewrite(f, f.Config.lowerBlock, f.Config.lowerValue, removeDeadValues)
+}
+
+// checkLower checks for unlowered opcodes and fails if we find one.
+func checkLower(f *Func) {
+ // Needs to be a separate phase because it must run after both
+ // lowering and a subsequent dead code elimination (because lowering
+ // rules may leave dead generic ops behind).
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if !opcodeTable[v.Op].generic {
+ continue // lowered
+ }
+ switch v.Op {
+ case OpSP, OpSB, OpInitMem, OpArg, OpPhi, OpVarDef, OpVarKill, OpVarLive, OpKeepAlive, OpSelect0, OpSelect1, OpConvert, OpInlMark:
+ continue // ok not to lower
+ case OpGetG:
+ if f.Config.hasGReg {
+ // has hardware g register, regalloc takes care of it
+ continue // ok not to lower
+ }
+ }
+ s := "not lowered: " + v.String() + ", " + v.Op.String() + " " + v.Type.SimpleString()
+ for _, a := range v.Args {
+ s += " " + a.Type.SimpleString()
+ }
+ f.Fatalf("%s", s)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/magic.go b/src/cmd/compile/internal/ssa/magic.go
new file mode 100644
index 0000000..93f8801
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/magic.go
@@ -0,0 +1,424 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "math/big"
+ "math/bits"
+)
+
+// So you want to compute x / c for some constant c?
+// Machine division instructions are slow, so we try to
+// compute this division with a multiplication + a few
+// other cheap instructions instead.
+// (We assume here that c != 0, +/- 1, or +/- 2^i. Those
+// cases are easy to handle in different ways).
+
+// Technique from https://gmplib.org/~tege/divcnst-pldi94.pdf
+
+// First consider unsigned division.
+// Our strategy is to precompute 1/c then do
+// ⎣x / c⎦ = ⎣x * (1/c)⎦.
+// 1/c is less than 1, so we can't compute it directly in
+// integer arithmetic. Let's instead compute 2^e/c
+// for a value of e TBD (^ = exponentiation). Then
+// ⎣x / c⎦ = ⎣x * (2^e/c) / 2^e⎦.
+// Dividing by 2^e is easy. 2^e/c isn't an integer, unfortunately.
+// So we must approximate it. Let's call its approximation m.
+// We'll then compute
+// ⎣x * m / 2^e⎦
+// Which we want to be equal to ⎣x / c⎦ for 0 <= x < 2^n-1
+// where n is the word size.
+// Setting x = c gives us c * m >= 2^e.
+// We'll chose m = ⎡2^e/c⎤ to satisfy that equation.
+// What remains is to choose e.
+// Let m = 2^e/c + delta, 0 <= delta < 1
+// ⎣x * (2^e/c + delta) / 2^e⎦
+// ⎣x / c + x * delta / 2^e⎦
+// We must have x * delta / 2^e < 1/c so that this
+// additional term never rounds differently than ⎣x / c⎦ does.
+// Rearranging,
+// 2^e > x * delta * c
+// x can be at most 2^n-1 and delta can be at most 1.
+// So it is sufficient to have 2^e >= 2^n*c.
+// So we'll choose e = n + s, with s = ⎡log2(c)⎤.
+//
+// An additional complication arises because m has n+1 bits in it.
+// Hardware restricts us to n bit by n bit multiplies.
+// We divide into 3 cases:
+//
+// Case 1: m is even.
+// ⎣x / c⎦ = ⎣x * m / 2^(n+s)⎦
+// ⎣x / c⎦ = ⎣x * (m/2) / 2^(n+s-1)⎦
+// ⎣x / c⎦ = ⎣x * (m/2) / 2^n / 2^(s-1)⎦
+// ⎣x / c⎦ = ⎣⎣x * (m/2) / 2^n⎦ / 2^(s-1)⎦
+// multiply + shift
+//
+// Case 2: c is even.
+// ⎣x / c⎦ = ⎣(x/2) / (c/2)⎦
+// ⎣x / c⎦ = ⎣⎣x/2⎦ / (c/2)⎦
+// This is just the original problem, with x' = ⎣x/2⎦, c' = c/2, n' = n-1.
+// s' = s-1
+// m' = ⎡2^(n'+s')/c'⎤
+// = ⎡2^(n+s-1)/c⎤
+// = ⎡m/2⎤
+// ⎣x / c⎦ = ⎣x' * m' / 2^(n'+s')⎦
+// ⎣x / c⎦ = ⎣⎣x/2⎦ * ⎡m/2⎤ / 2^(n+s-2)⎦
+// ⎣x / c⎦ = ⎣⎣⎣x/2⎦ * ⎡m/2⎤ / 2^n⎦ / 2^(s-2)⎦
+// shift + multiply + shift
+//
+// Case 3: everything else
+// let k = m - 2^n. k fits in n bits.
+// ⎣x / c⎦ = ⎣x * m / 2^(n+s)⎦
+// ⎣x / c⎦ = ⎣x * (2^n + k) / 2^(n+s)⎦
+// ⎣x / c⎦ = ⎣(x + x * k / 2^n) / 2^s⎦
+// ⎣x / c⎦ = ⎣(x + ⎣x * k / 2^n⎦) / 2^s⎦
+// ⎣x / c⎦ = ⎣(x + ⎣x * k / 2^n⎦) / 2^s⎦
+// ⎣x / c⎦ = ⎣⎣(x + ⎣x * k / 2^n⎦) / 2⎦ / 2^(s-1)⎦
+// multiply + avg + shift
+//
+// These can be implemented in hardware using:
+// ⎣a * b / 2^n⎦ - aka high n bits of an n-bit by n-bit multiply.
+// ⎣(a+b) / 2⎦ - aka "average" of two n-bit numbers.
+// (Not just a regular add & shift because the intermediate result
+// a+b has n+1 bits in it. Nevertheless, can be done
+// in 2 instructions on x86.)
+
+// umagicOK reports whether we should strength reduce a n-bit divide by c.
+func umagicOK(n uint, c int64) bool {
+ // Convert from ConstX auxint values to the real uint64 constant they represent.
+ d := uint64(c) << (64 - n) >> (64 - n)
+
+ // Doesn't work for 0.
+ // Don't use for powers of 2.
+ return d&(d-1) != 0
+}
+
+// umagicOKn reports whether we should strength reduce an unsigned n-bit divide by c.
+// We can strength reduce when c != 0 and c is not a power of two.
+func umagicOK8(c int8) bool { return c&(c-1) != 0 }
+func umagicOK16(c int16) bool { return c&(c-1) != 0 }
+func umagicOK32(c int32) bool { return c&(c-1) != 0 }
+func umagicOK64(c int64) bool { return c&(c-1) != 0 }
+
+type umagicData struct {
+ s int64 // ⎡log2(c)⎤
+ m uint64 // ⎡2^(n+s)/c⎤ - 2^n
+}
+
+// umagic computes the constants needed to strength reduce unsigned n-bit divides by the constant uint64(c).
+// The return values satisfy for all 0 <= x < 2^n
+// floor(x / uint64(c)) = x * (m + 2^n) >> (n+s)
+func umagic(n uint, c int64) umagicData {
+ // Convert from ConstX auxint values to the real uint64 constant they represent.
+ d := uint64(c) << (64 - n) >> (64 - n)
+
+ C := new(big.Int).SetUint64(d)
+ s := C.BitLen()
+ M := big.NewInt(1)
+ M.Lsh(M, n+uint(s)) // 2^(n+s)
+ M.Add(M, C) // 2^(n+s)+c
+ M.Sub(M, big.NewInt(1)) // 2^(n+s)+c-1
+ M.Div(M, C) // ⎡2^(n+s)/c⎤
+ if M.Bit(int(n)) != 1 {
+ panic("n+1st bit isn't set")
+ }
+ M.SetBit(M, int(n), 0)
+ m := M.Uint64()
+ return umagicData{s: int64(s), m: m}
+}
+
+func umagic8(c int8) umagicData { return umagic(8, int64(c)) }
+func umagic16(c int16) umagicData { return umagic(16, int64(c)) }
+func umagic32(c int32) umagicData { return umagic(32, int64(c)) }
+func umagic64(c int64) umagicData { return umagic(64, c) }
+
+// For signed division, we use a similar strategy.
+// First, we enforce a positive c.
+// x / c = -(x / (-c))
+// This will require an additional Neg op for c<0.
+//
+// If x is positive we're in a very similar state
+// to the unsigned case above. We define:
+// s = ⎡log2(c)⎤-1
+// m = ⎡2^(n+s)/c⎤
+// Then
+// ⎣x / c⎦ = ⎣x * m / 2^(n+s)⎦
+// If x is negative we have
+// ⎡x / c⎤ = ⎣x * m / 2^(n+s)⎦ + 1
+// (TODO: derivation?)
+//
+// The multiply is a bit odd, as it is a signed n-bit value
+// times an unsigned n-bit value. For n smaller than the
+// word size, we can extend x and m appropriately and use the
+// signed multiply instruction. For n == word size,
+// we must use the signed multiply high and correct
+// the result by adding x*2^n.
+//
+// Adding 1 if x<0 is done by subtracting x>>(n-1).
+
+func smagicOK(n uint, c int64) bool {
+ if c < 0 {
+ // Doesn't work for negative c.
+ return false
+ }
+ // Doesn't work for 0.
+ // Don't use it for powers of 2.
+ return c&(c-1) != 0
+}
+
+// smagicOKn reports whether we should strength reduce an signed n-bit divide by c.
+func smagicOK8(c int8) bool { return smagicOK(8, int64(c)) }
+func smagicOK16(c int16) bool { return smagicOK(16, int64(c)) }
+func smagicOK32(c int32) bool { return smagicOK(32, int64(c)) }
+func smagicOK64(c int64) bool { return smagicOK(64, c) }
+
+type smagicData struct {
+ s int64 // ⎡log2(c)⎤-1
+ m uint64 // ⎡2^(n+s)/c⎤
+}
+
+// magic computes the constants needed to strength reduce signed n-bit divides by the constant c.
+// Must have c>0.
+// The return values satisfy for all -2^(n-1) <= x < 2^(n-1)
+// trunc(x / c) = x * m >> (n+s) + (x < 0 ? 1 : 0)
+func smagic(n uint, c int64) smagicData {
+ C := new(big.Int).SetInt64(c)
+ s := C.BitLen() - 1
+ M := big.NewInt(1)
+ M.Lsh(M, n+uint(s)) // 2^(n+s)
+ M.Add(M, C) // 2^(n+s)+c
+ M.Sub(M, big.NewInt(1)) // 2^(n+s)+c-1
+ M.Div(M, C) // ⎡2^(n+s)/c⎤
+ if M.Bit(int(n)) != 0 {
+ panic("n+1st bit is set")
+ }
+ if M.Bit(int(n-1)) == 0 {
+ panic("nth bit is not set")
+ }
+ m := M.Uint64()
+ return smagicData{s: int64(s), m: m}
+}
+
+func smagic8(c int8) smagicData { return smagic(8, int64(c)) }
+func smagic16(c int16) smagicData { return smagic(16, int64(c)) }
+func smagic32(c int32) smagicData { return smagic(32, int64(c)) }
+func smagic64(c int64) smagicData { return smagic(64, c) }
+
+// Divisibility x%c == 0 can be checked more efficiently than directly computing
+// the modulus x%c and comparing against 0.
+//
+// The same "Division by invariant integers using multiplication" paper
+// by Granlund and Montgomery referenced above briefly mentions this method
+// and it is further elaborated in "Hacker's Delight" by Warren Section 10-17
+//
+// The first thing to note is that for odd integers, exact division can be computed
+// by using the modular inverse with respect to the word size 2^n.
+//
+// Given c, compute m such that (c * m) mod 2^n == 1
+// Then if c divides x (x%c ==0), the quotient is given by q = x/c == x*m mod 2^n
+//
+// x can range from 0, c, 2c, 3c, ... ⎣(2^n - 1)/c⎦ * c the maximum multiple
+// Thus, x*m mod 2^n is 0, 1, 2, 3, ... ⎣(2^n - 1)/c⎦
+// i.e. the quotient takes all values from zero up to max = ⎣(2^n - 1)/c⎦
+//
+// If x is not divisible by c, then x*m mod 2^n must take some larger value than max.
+//
+// This gives x*m mod 2^n <= ⎣(2^n - 1)/c⎦ as a test for divisibility
+// involving one multiplication and compare.
+//
+// To extend this to even integers, consider c = d0 * 2^k where d0 is odd.
+// We can test whether x is divisible by both d0 and 2^k.
+// For d0, the test is the same as above. Let m be such that m*d0 mod 2^n == 1
+// Then x*m mod 2^n <= ⎣(2^n - 1)/d0⎦ is the first test.
+// The test for divisibility by 2^k is a check for k trailing zeroes.
+// Note that since d0 is odd, m is odd and thus x*m will have the same number of
+// trailing zeroes as x. So the two tests are,
+//
+// x*m mod 2^n <= ⎣(2^n - 1)/d0⎦
+// and x*m ends in k zero bits
+//
+// These can be combined into a single comparison by the following
+// (theorem ZRU in Hacker's Delight) for unsigned integers.
+//
+// x <= a and x ends in k zero bits if and only if RotRight(x ,k) <= ⎣a/(2^k)⎦
+// Where RotRight(x ,k) is right rotation of x by k bits.
+//
+// To prove the first direction, x <= a -> ⎣x/(2^k)⎦ <= ⎣a/(2^k)⎦
+// But since x ends in k zeroes all the rotated bits would be zero too.
+// So RotRight(x, k) == ⎣x/(2^k)⎦ <= ⎣a/(2^k)⎦
+//
+// If x does not end in k zero bits, then RotRight(x, k)
+// has some non-zero bits in the k highest bits.
+// ⎣x/(2^k)⎦ has all zeroes in the k highest bits,
+// so RotRight(x, k) > ⎣x/(2^k)⎦
+//
+// Finally, if x > a and has k trailing zero bits, then RotRight(x, k) == ⎣x/(2^k)⎦
+// and ⎣x/(2^k)⎦ must be greater than ⎣a/(2^k)⎦, that is the top n-k bits of x must
+// be greater than the top n-k bits of a because the rest of x bits are zero.
+//
+// So the two conditions about can be replaced with the single test
+//
+// RotRight(x*m mod 2^n, k) <= ⎣(2^n - 1)/c⎦
+//
+// Where d0*2^k was replaced by c on the right hand side.
+
+// udivisibleOK reports whether we should strength reduce an unsigned n-bit divisibilty check by c.
+func udivisibleOK(n uint, c int64) bool {
+ // Convert from ConstX auxint values to the real uint64 constant they represent.
+ d := uint64(c) << (64 - n) >> (64 - n)
+
+ // Doesn't work for 0.
+ // Don't use for powers of 2.
+ return d&(d-1) != 0
+}
+
+func udivisibleOK8(c int8) bool { return udivisibleOK(8, int64(c)) }
+func udivisibleOK16(c int16) bool { return udivisibleOK(16, int64(c)) }
+func udivisibleOK32(c int32) bool { return udivisibleOK(32, int64(c)) }
+func udivisibleOK64(c int64) bool { return udivisibleOK(64, c) }
+
+type udivisibleData struct {
+ k int64 // trailingZeros(c)
+ m uint64 // m * (c>>k) mod 2^n == 1 multiplicative inverse of odd portion modulo 2^n
+ max uint64 // ⎣(2^n - 1)/ c⎦ max value to for divisibility
+}
+
+func udivisible(n uint, c int64) udivisibleData {
+ // Convert from ConstX auxint values to the real uint64 constant they represent.
+ d := uint64(c) << (64 - n) >> (64 - n)
+
+ k := bits.TrailingZeros64(d)
+ d0 := d >> uint(k) // the odd portion of the divisor
+
+ mask := ^uint64(0) >> (64 - n)
+
+ // Calculate the multiplicative inverse via Newton's method.
+ // Quadratic convergence doubles the number of correct bits per iteration.
+ m := d0 // initial guess correct to 3-bits d0*d0 mod 8 == 1
+ m = m * (2 - m*d0) // 6-bits
+ m = m * (2 - m*d0) // 12-bits
+ m = m * (2 - m*d0) // 24-bits
+ m = m * (2 - m*d0) // 48-bits
+ m = m * (2 - m*d0) // 96-bits >= 64-bits
+ m = m & mask
+
+ max := mask / d
+
+ return udivisibleData{
+ k: int64(k),
+ m: m,
+ max: max,
+ }
+}
+
+func udivisible8(c int8) udivisibleData { return udivisible(8, int64(c)) }
+func udivisible16(c int16) udivisibleData { return udivisible(16, int64(c)) }
+func udivisible32(c int32) udivisibleData { return udivisible(32, int64(c)) }
+func udivisible64(c int64) udivisibleData { return udivisible(64, c) }
+
+// For signed integers, a similar method follows.
+//
+// Given c > 1 and odd, compute m such that (c * m) mod 2^n == 1
+// Then if c divides x (x%c ==0), the quotient is given by q = x/c == x*m mod 2^n
+//
+// x can range from ⎡-2^(n-1)/c⎤ * c, ... -c, 0, c, ... ⎣(2^(n-1) - 1)/c⎦ * c
+// Thus, x*m mod 2^n is ⎡-2^(n-1)/c⎤, ... -2, -1, 0, 1, 2, ... ⎣(2^(n-1) - 1)/c⎦
+//
+// So, x is a multiple of c if and only if:
+// ⎡-2^(n-1)/c⎤ <= x*m mod 2^n <= ⎣(2^(n-1) - 1)/c⎦
+//
+// Since c > 1 and odd, this can be simplified by
+// ⎡-2^(n-1)/c⎤ == ⎡(-2^(n-1) + 1)/c⎤ == -⎣(2^(n-1) - 1)/c⎦
+//
+// -⎣(2^(n-1) - 1)/c⎦ <= x*m mod 2^n <= ⎣(2^(n-1) - 1)/c⎦
+//
+// To extend this to even integers, consider c = d0 * 2^k where d0 is odd.
+// We can test whether x is divisible by both d0 and 2^k.
+//
+// Let m be such that (d0 * m) mod 2^n == 1.
+// Let q = x*m mod 2^n. Then c divides x if:
+//
+// -⎣(2^(n-1) - 1)/d0⎦ <= q <= ⎣(2^(n-1) - 1)/d0⎦ and q ends in at least k 0-bits
+//
+// To transform this to a single comparison, we use the following theorem (ZRS in Hacker's Delight).
+//
+// For a >= 0 the following conditions are equivalent:
+// 1) -a <= x <= a and x ends in at least k 0-bits
+// 2) RotRight(x+a', k) <= ⎣2a'/2^k⎦
+//
+// Where a' = a & -2^k (a with its right k bits set to zero)
+//
+// To see that 1 & 2 are equivalent, note that -a <= x <= a is equivalent to
+// -a' <= x <= a' if and only if x ends in at least k 0-bits. Adding -a' to each side gives,
+// 0 <= x + a' <= 2a' and x + a' ends in at least k 0-bits if and only if x does since a' has
+// k 0-bits by definition. We can use theorem ZRU above with x -> x + a' and a -> 2a' giving 1) == 2).
+//
+// Let m be such that (d0 * m) mod 2^n == 1.
+// Let q = x*m mod 2^n.
+// Let a' = ⎣(2^(n-1) - 1)/d0⎦ & -2^k
+//
+// Then the divisibility test is:
+//
+// RotRight(q+a', k) <= ⎣2a'/2^k⎦
+//
+// Note that the calculation is performed using unsigned integers.
+// Since a' can have n-1 bits, 2a' may have n bits and there is no risk of overflow.
+
+// sdivisibleOK reports whether we should strength reduce a signed n-bit divisibilty check by c.
+func sdivisibleOK(n uint, c int64) bool {
+ if c < 0 {
+ // Doesn't work for negative c.
+ return false
+ }
+ // Doesn't work for 0.
+ // Don't use it for powers of 2.
+ return c&(c-1) != 0
+}
+
+func sdivisibleOK8(c int8) bool { return sdivisibleOK(8, int64(c)) }
+func sdivisibleOK16(c int16) bool { return sdivisibleOK(16, int64(c)) }
+func sdivisibleOK32(c int32) bool { return sdivisibleOK(32, int64(c)) }
+func sdivisibleOK64(c int64) bool { return sdivisibleOK(64, c) }
+
+type sdivisibleData struct {
+ k int64 // trailingZeros(c)
+ m uint64 // m * (c>>k) mod 2^n == 1 multiplicative inverse of odd portion modulo 2^n
+ a uint64 // ⎣(2^(n-1) - 1)/ (c>>k)⎦ & -(1<<k) additive constant
+ max uint64 // ⎣(2 a) / (1<<k)⎦ max value to for divisibility
+}
+
+func sdivisible(n uint, c int64) sdivisibleData {
+ d := uint64(c)
+ k := bits.TrailingZeros64(d)
+ d0 := d >> uint(k) // the odd portion of the divisor
+
+ mask := ^uint64(0) >> (64 - n)
+
+ // Calculate the multiplicative inverse via Newton's method.
+ // Quadratic convergence doubles the number of correct bits per iteration.
+ m := d0 // initial guess correct to 3-bits d0*d0 mod 8 == 1
+ m = m * (2 - m*d0) // 6-bits
+ m = m * (2 - m*d0) // 12-bits
+ m = m * (2 - m*d0) // 24-bits
+ m = m * (2 - m*d0) // 48-bits
+ m = m * (2 - m*d0) // 96-bits >= 64-bits
+ m = m & mask
+
+ a := ((mask >> 1) / d0) & -(1 << uint(k))
+ max := (2 * a) >> uint(k)
+
+ return sdivisibleData{
+ k: int64(k),
+ m: m,
+ a: a,
+ max: max,
+ }
+}
+
+func sdivisible8(c int8) sdivisibleData { return sdivisible(8, int64(c)) }
+func sdivisible16(c int16) sdivisibleData { return sdivisible(16, int64(c)) }
+func sdivisible32(c int32) sdivisibleData { return sdivisible(32, int64(c)) }
+func sdivisible64(c int64) sdivisibleData { return sdivisible(64, c) }
diff --git a/src/cmd/compile/internal/ssa/magic_test.go b/src/cmd/compile/internal/ssa/magic_test.go
new file mode 100644
index 0000000..7c6009d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/magic_test.go
@@ -0,0 +1,410 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "math/big"
+ "testing"
+)
+
+func TestMagicExhaustive8(t *testing.T) {
+ testMagicExhaustive(t, 8)
+}
+func TestMagicExhaustive8U(t *testing.T) {
+ testMagicExhaustiveU(t, 8)
+}
+func TestMagicExhaustive16(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ testMagicExhaustive(t, 16)
+}
+func TestMagicExhaustive16U(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ testMagicExhaustiveU(t, 16)
+}
+
+// exhaustive test of magic for n bits
+func testMagicExhaustive(t *testing.T, n uint) {
+ min := -int64(1) << (n - 1)
+ max := int64(1) << (n - 1)
+ for c := int64(1); c < max; c++ {
+ if !smagicOK(n, int64(c)) {
+ continue
+ }
+ m := int64(smagic(n, c).m)
+ s := smagic(n, c).s
+ for i := min; i < max; i++ {
+ want := i / c
+ got := (i * m) >> (n + uint(s))
+ if i < 0 {
+ got++
+ }
+ if want != got {
+ t.Errorf("signed magic wrong for %d / %d: got %d, want %d (m=%d,s=%d)\n", i, c, got, want, m, s)
+ }
+ }
+ }
+}
+func testMagicExhaustiveU(t *testing.T, n uint) {
+ max := uint64(1) << n
+ for c := uint64(1); c < max; c++ {
+ if !umagicOK(n, int64(c)) {
+ continue
+ }
+ m := umagic(n, int64(c)).m
+ s := umagic(n, int64(c)).s
+ for i := uint64(0); i < max; i++ {
+ want := i / c
+ got := (i * (max + m)) >> (n + uint(s))
+ if want != got {
+ t.Errorf("unsigned magic wrong for %d / %d: got %d, want %d (m=%d,s=%d)\n", i, c, got, want, m, s)
+ }
+ }
+ }
+}
+
+func TestMagicUnsigned(t *testing.T) {
+ One := new(big.Int).SetUint64(1)
+ for _, n := range [...]uint{8, 16, 32, 64} {
+ TwoN := new(big.Int).Lsh(One, n)
+ Max := new(big.Int).Sub(TwoN, One)
+ for _, c := range [...]uint64{
+ 3,
+ 5,
+ 6,
+ 7,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 17,
+ 1<<8 - 1,
+ 1<<8 + 1,
+ 1<<16 - 1,
+ 1<<16 + 1,
+ 1<<32 - 1,
+ 1<<32 + 1,
+ 1<<64 - 1,
+ } {
+ if c>>n != 0 {
+ continue // not appropriate for the given n.
+ }
+ if !umagicOK(n, int64(c)) {
+ t.Errorf("expected n=%d c=%d to pass\n", n, c)
+ }
+ m := umagic(n, int64(c)).m
+ s := umagic(n, int64(c)).s
+
+ C := new(big.Int).SetUint64(c)
+ M := new(big.Int).SetUint64(m)
+ M.Add(M, TwoN)
+
+ // Find largest multiple of c.
+ Mul := new(big.Int).Div(Max, C)
+ Mul.Mul(Mul, C)
+ mul := Mul.Uint64()
+
+ // Try some input values, mostly around multiples of c.
+ for _, x := range [...]uint64{0, 1,
+ c - 1, c, c + 1,
+ 2*c - 1, 2 * c, 2*c + 1,
+ mul - 1, mul, mul + 1,
+ uint64(1)<<n - 1,
+ } {
+ X := new(big.Int).SetUint64(x)
+ if X.Cmp(Max) > 0 {
+ continue
+ }
+ Want := new(big.Int).Quo(X, C)
+ Got := new(big.Int).Mul(X, M)
+ Got.Rsh(Got, n+uint(s))
+ if Want.Cmp(Got) != 0 {
+ t.Errorf("umagic for %d/%d n=%d doesn't work, got=%s, want %s\n", x, c, n, Got, Want)
+ }
+ }
+ }
+ }
+}
+
+func TestMagicSigned(t *testing.T) {
+ One := new(big.Int).SetInt64(1)
+ for _, n := range [...]uint{8, 16, 32, 64} {
+ TwoNMinusOne := new(big.Int).Lsh(One, n-1)
+ Max := new(big.Int).Sub(TwoNMinusOne, One)
+ Min := new(big.Int).Neg(TwoNMinusOne)
+ for _, c := range [...]int64{
+ 3,
+ 5,
+ 6,
+ 7,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 17,
+ 1<<7 - 1,
+ 1<<7 + 1,
+ 1<<15 - 1,
+ 1<<15 + 1,
+ 1<<31 - 1,
+ 1<<31 + 1,
+ 1<<63 - 1,
+ } {
+ if c>>(n-1) != 0 {
+ continue // not appropriate for the given n.
+ }
+ if !smagicOK(n, int64(c)) {
+ t.Errorf("expected n=%d c=%d to pass\n", n, c)
+ }
+ m := smagic(n, int64(c)).m
+ s := smagic(n, int64(c)).s
+
+ C := new(big.Int).SetInt64(c)
+ M := new(big.Int).SetUint64(m)
+
+ // Find largest multiple of c.
+ Mul := new(big.Int).Div(Max, C)
+ Mul.Mul(Mul, C)
+ mul := Mul.Int64()
+
+ // Try some input values, mostly around multiples of c.
+ for _, x := range [...]int64{
+ -1, 1,
+ -c - 1, -c, -c + 1, c - 1, c, c + 1,
+ -2*c - 1, -2 * c, -2*c + 1, 2*c - 1, 2 * c, 2*c + 1,
+ -mul - 1, -mul, -mul + 1, mul - 1, mul, mul + 1,
+ int64(1)<<(n-1) - 1, -int64(1) << (n - 1),
+ } {
+ X := new(big.Int).SetInt64(x)
+ if X.Cmp(Min) < 0 || X.Cmp(Max) > 0 {
+ continue
+ }
+ Want := new(big.Int).Quo(X, C)
+ Got := new(big.Int).Mul(X, M)
+ Got.Rsh(Got, n+uint(s))
+ if x < 0 {
+ Got.Add(Got, One)
+ }
+ if Want.Cmp(Got) != 0 {
+ t.Errorf("smagic for %d/%d n=%d doesn't work, got=%s, want %s\n", x, c, n, Got, Want)
+ }
+ }
+ }
+ }
+}
+
+func testDivisibleExhaustiveU(t *testing.T, n uint) {
+ maxU := uint64(1) << n
+ for c := uint64(1); c < maxU; c++ {
+ if !udivisibleOK(n, int64(c)) {
+ continue
+ }
+ k := udivisible(n, int64(c)).k
+ m := udivisible(n, int64(c)).m
+ max := udivisible(n, int64(c)).max
+ mask := ^uint64(0) >> (64 - n)
+ for i := uint64(0); i < maxU; i++ {
+ want := i%c == 0
+ mul := (i * m) & mask
+ rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask
+ got := rot <= max
+ if want != got {
+ t.Errorf("unsigned divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,max=%d)\n", i, c, got, want, k, m, max)
+ }
+ }
+ }
+}
+
+func TestDivisibleExhaustive8U(t *testing.T) {
+ testDivisibleExhaustiveU(t, 8)
+}
+
+func TestDivisibleExhaustive16U(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ testDivisibleExhaustiveU(t, 16)
+}
+
+func TestDivisibleUnsigned(t *testing.T) {
+ One := new(big.Int).SetUint64(1)
+ for _, n := range [...]uint{8, 16, 32, 64} {
+ TwoN := new(big.Int).Lsh(One, n)
+ Max := new(big.Int).Sub(TwoN, One)
+ for _, c := range [...]uint64{
+ 3,
+ 5,
+ 6,
+ 7,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 17,
+ 1<<8 - 1,
+ 1<<8 + 1,
+ 1<<16 - 1,
+ 1<<16 + 1,
+ 1<<32 - 1,
+ 1<<32 + 1,
+ 1<<64 - 1,
+ } {
+ if c>>n != 0 {
+ continue // c too large for the given n.
+ }
+ if !udivisibleOK(n, int64(c)) {
+ t.Errorf("expected n=%d c=%d to pass\n", n, c)
+ }
+ k := udivisible(n, int64(c)).k
+ m := udivisible(n, int64(c)).m
+ max := udivisible(n, int64(c)).max
+ mask := ^uint64(0) >> (64 - n)
+
+ C := new(big.Int).SetUint64(c)
+
+ // Find largest multiple of c.
+ Mul := new(big.Int).Div(Max, C)
+ Mul.Mul(Mul, C)
+ mul := Mul.Uint64()
+
+ // Try some input values, mostly around multiples of c.
+ for _, x := range [...]uint64{0, 1,
+ c - 1, c, c + 1,
+ 2*c - 1, 2 * c, 2*c + 1,
+ mul - 1, mul, mul + 1,
+ uint64(1)<<n - 1,
+ } {
+ X := new(big.Int).SetUint64(x)
+ if X.Cmp(Max) > 0 {
+ continue
+ }
+ want := x%c == 0
+ mul := (x * m) & mask
+ rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask
+ got := rot <= max
+ if want != got {
+ t.Errorf("unsigned divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,max=%d)\n", x, c, got, want, k, m, max)
+ }
+ }
+ }
+ }
+}
+
+func testDivisibleExhaustive(t *testing.T, n uint) {
+ minI := -int64(1) << (n - 1)
+ maxI := int64(1) << (n - 1)
+ for c := int64(1); c < maxI; c++ {
+ if !sdivisibleOK(n, int64(c)) {
+ continue
+ }
+ k := sdivisible(n, int64(c)).k
+ m := sdivisible(n, int64(c)).m
+ a := sdivisible(n, int64(c)).a
+ max := sdivisible(n, int64(c)).max
+ mask := ^uint64(0) >> (64 - n)
+ for i := minI; i < maxI; i++ {
+ want := i%c == 0
+ mul := (uint64(i)*m + a) & mask
+ rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask
+ got := rot <= max
+ if want != got {
+ t.Errorf("signed divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,a=%d,max=%d)\n", i, c, got, want, k, m, a, max)
+ }
+ }
+ }
+}
+
+func TestDivisibleExhaustive8(t *testing.T) {
+ testDivisibleExhaustive(t, 8)
+}
+
+func TestDivisibleExhaustive16(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ testDivisibleExhaustive(t, 16)
+}
+
+func TestDivisibleSigned(t *testing.T) {
+ One := new(big.Int).SetInt64(1)
+ for _, n := range [...]uint{8, 16, 32, 64} {
+ TwoNMinusOne := new(big.Int).Lsh(One, n-1)
+ Max := new(big.Int).Sub(TwoNMinusOne, One)
+ Min := new(big.Int).Neg(TwoNMinusOne)
+ for _, c := range [...]int64{
+ 3,
+ 5,
+ 6,
+ 7,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 17,
+ 1<<7 - 1,
+ 1<<7 + 1,
+ 1<<15 - 1,
+ 1<<15 + 1,
+ 1<<31 - 1,
+ 1<<31 + 1,
+ 1<<63 - 1,
+ } {
+ if c>>(n-1) != 0 {
+ continue // not appropriate for the given n.
+ }
+ if !sdivisibleOK(n, int64(c)) {
+ t.Errorf("expected n=%d c=%d to pass\n", n, c)
+ }
+ k := sdivisible(n, int64(c)).k
+ m := sdivisible(n, int64(c)).m
+ a := sdivisible(n, int64(c)).a
+ max := sdivisible(n, int64(c)).max
+ mask := ^uint64(0) >> (64 - n)
+
+ C := new(big.Int).SetInt64(c)
+
+ // Find largest multiple of c.
+ Mul := new(big.Int).Div(Max, C)
+ Mul.Mul(Mul, C)
+ mul := Mul.Int64()
+
+ // Try some input values, mostly around multiples of c.
+ for _, x := range [...]int64{
+ -1, 1,
+ -c - 1, -c, -c + 1, c - 1, c, c + 1,
+ -2*c - 1, -2 * c, -2*c + 1, 2*c - 1, 2 * c, 2*c + 1,
+ -mul - 1, -mul, -mul + 1, mul - 1, mul, mul + 1,
+ int64(1)<<(n-1) - 1, -int64(1) << (n - 1),
+ } {
+ X := new(big.Int).SetInt64(x)
+ if X.Cmp(Min) < 0 || X.Cmp(Max) > 0 {
+ continue
+ }
+ want := x%c == 0
+ mul := (uint64(x)*m + a) & mask
+ rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask
+ got := rot <= max
+ if want != got {
+ t.Errorf("signed divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,a=%d,max=%d)\n", x, c, got, want, k, m, a, max)
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go
new file mode 100644
index 0000000..d1bad52
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/nilcheck.go
@@ -0,0 +1,336 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// nilcheckelim eliminates unnecessary nil checks.
+// runs on machine-independent code.
+func nilcheckelim(f *Func) {
+ // A nil check is redundant if the same nil check was successful in a
+ // dominating block. The efficacy of this pass depends heavily on the
+ // efficacy of the cse pass.
+ sdom := f.Sdom()
+
+ // TODO: Eliminate more nil checks.
+ // We can recursively remove any chain of fixed offset calculations,
+ // i.e. struct fields and array elements, even with non-constant
+ // indices: x is non-nil iff x.a.b[i].c is.
+
+ type walkState int
+ const (
+ Work walkState = iota // process nil checks and traverse to dominees
+ ClearPtr // forget the fact that ptr is nil
+ )
+
+ type bp struct {
+ block *Block // block, or nil in ClearPtr state
+ ptr *Value // if non-nil, ptr that is to be cleared in ClearPtr state
+ op walkState
+ }
+
+ work := make([]bp, 0, 256)
+ work = append(work, bp{block: f.Entry})
+
+ // map from value ID to bool indicating if value is known to be non-nil
+ // in the current dominator path being walked. This slice is updated by
+ // walkStates to maintain the known non-nil values.
+ nonNilValues := make([]bool, f.NumValues())
+
+ // make an initial pass identifying any non-nil values
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ // a value resulting from taking the address of a
+ // value, or a value constructed from an offset of a
+ // non-nil ptr (OpAddPtr) implies it is non-nil
+ // We also assume unsafe pointer arithmetic generates non-nil pointers. See #27180.
+ // We assume that SlicePtr is non-nil because we do a bounds check
+ // before the slice access (and all cap>0 slices have a non-nil ptr). See #30366.
+ if v.Op == OpAddr || v.Op == OpLocalAddr || v.Op == OpAddPtr || v.Op == OpOffPtr || v.Op == OpAdd32 || v.Op == OpAdd64 || v.Op == OpSub32 || v.Op == OpSub64 || v.Op == OpSlicePtr {
+ nonNilValues[v.ID] = true
+ }
+ }
+ }
+
+ for changed := true; changed; {
+ changed = false
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ // phis whose arguments are all non-nil
+ // are non-nil
+ if v.Op == OpPhi {
+ argsNonNil := true
+ for _, a := range v.Args {
+ if !nonNilValues[a.ID] {
+ argsNonNil = false
+ break
+ }
+ }
+ if argsNonNil {
+ if !nonNilValues[v.ID] {
+ changed = true
+ }
+ nonNilValues[v.ID] = true
+ }
+ }
+ }
+ }
+ }
+
+ // allocate auxiliary date structures for computing store order
+ sset := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(sset)
+ storeNumber := make([]int32, f.NumValues())
+
+ // perform a depth first walk of the dominee tree
+ for len(work) > 0 {
+ node := work[len(work)-1]
+ work = work[:len(work)-1]
+
+ switch node.op {
+ case Work:
+ b := node.block
+
+ // First, see if we're dominated by an explicit nil check.
+ if len(b.Preds) == 1 {
+ p := b.Preds[0].b
+ if p.Kind == BlockIf && p.Controls[0].Op == OpIsNonNil && p.Succs[0].b == b {
+ if ptr := p.Controls[0].Args[0]; !nonNilValues[ptr.ID] {
+ nonNilValues[ptr.ID] = true
+ work = append(work, bp{op: ClearPtr, ptr: ptr})
+ }
+ }
+ }
+
+ // Next, order values in the current block w.r.t. stores.
+ b.Values = storeOrder(b.Values, sset, storeNumber)
+
+ pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
+ pendingLines.clear()
+
+ // Next, process values in the block.
+ i := 0
+ for _, v := range b.Values {
+ b.Values[i] = v
+ i++
+ switch v.Op {
+ case OpIsNonNil:
+ ptr := v.Args[0]
+ if nonNilValues[ptr.ID] {
+ if v.Pos.IsStmt() == src.PosIsStmt { // Boolean true is a terrible statement boundary.
+ pendingLines.add(v.Pos)
+ v.Pos = v.Pos.WithNotStmt()
+ }
+ // This is a redundant explicit nil check.
+ v.reset(OpConstBool)
+ v.AuxInt = 1 // true
+ }
+ case OpNilCheck:
+ ptr := v.Args[0]
+ if nonNilValues[ptr.ID] {
+ // This is a redundant implicit nil check.
+ // Logging in the style of the former compiler -- and omit line 1,
+ // which is usually in generated code.
+ if f.fe.Debug_checknil() && v.Pos.Line() > 1 {
+ f.Warnl(v.Pos, "removed nil check")
+ }
+ if v.Pos.IsStmt() == src.PosIsStmt { // About to lose a statement boundary
+ pendingLines.add(v.Pos)
+ }
+ v.reset(OpUnknown)
+ f.freeValue(v)
+ i--
+ continue
+ }
+ // Record the fact that we know ptr is non nil, and remember to
+ // undo that information when this dominator subtree is done.
+ nonNilValues[ptr.ID] = true
+ work = append(work, bp{op: ClearPtr, ptr: ptr})
+ fallthrough // a non-eliminated nil check might be a good place for a statement boundary.
+ default:
+ if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) {
+ v.Pos = v.Pos.WithIsStmt()
+ pendingLines.remove(v.Pos)
+ }
+ }
+ }
+ // This reduces the lost statement count in "go" by 5 (out of 500 total).
+ for j := 0; j < i; j++ { // is this an ordering problem?
+ v := b.Values[j]
+ if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) {
+ v.Pos = v.Pos.WithIsStmt()
+ pendingLines.remove(v.Pos)
+ }
+ }
+ if pendingLines.contains(b.Pos) {
+ b.Pos = b.Pos.WithIsStmt()
+ pendingLines.remove(b.Pos)
+ }
+ b.truncateValues(i)
+
+ // Add all dominated blocks to the work list.
+ for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling {
+ work = append(work, bp{op: Work, block: w})
+ }
+
+ case ClearPtr:
+ nonNilValues[node.ptr.ID] = false
+ continue
+ }
+ }
+}
+
+// All platforms are guaranteed to fault if we load/store to anything smaller than this address.
+//
+// This should agree with minLegalPointer in the runtime.
+const minZeroPage = 4096
+
+// faultOnLoad is true if a load to an address below minZeroPage will trigger a SIGSEGV.
+var faultOnLoad = objabi.GOOS != "aix"
+
+// nilcheckelim2 eliminates unnecessary nil checks.
+// Runs after lowering and scheduling.
+func nilcheckelim2(f *Func) {
+ unnecessary := f.newSparseMap(f.NumValues()) // map from pointer that will be dereferenced to index of dereferencing value in b.Values[]
+ defer f.retSparseMap(unnecessary)
+
+ pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
+
+ for _, b := range f.Blocks {
+ // Walk the block backwards. Find instructions that will fault if their
+ // input pointer is nil. Remove nil checks on those pointers, as the
+ // faulting instruction effectively does the nil check for free.
+ unnecessary.clear()
+ pendingLines.clear()
+ // Optimization: keep track of removed nilcheck with smallest index
+ firstToRemove := len(b.Values)
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if opcodeTable[v.Op].nilCheck && unnecessary.contains(v.Args[0].ID) {
+ if f.fe.Debug_checknil() && v.Pos.Line() > 1 {
+ f.Warnl(v.Pos, "removed nil check")
+ }
+ // For bug 33724, policy is that we might choose to bump an existing position
+ // off the faulting load/store in favor of the one from the nil check.
+
+ // Iteration order means that first nilcheck in the chain wins, others
+ // are bumped into the ordinary statement preservation algorithm.
+ u := b.Values[unnecessary.get(v.Args[0].ID)]
+ if !u.Pos.SameFileAndLine(v.Pos) {
+ if u.Pos.IsStmt() == src.PosIsStmt {
+ pendingLines.add(u.Pos)
+ }
+ u.Pos = v.Pos
+ } else if v.Pos.IsStmt() == src.PosIsStmt {
+ pendingLines.add(v.Pos)
+ }
+
+ v.reset(OpUnknown)
+ firstToRemove = i
+ continue
+ }
+ if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
+ if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(GCNode).Typ().HasPointers()) {
+ // These ops don't really change memory.
+ continue
+ // Note: OpVarDef requires that the defined variable not have pointers.
+ // We need to make sure that there's no possible faulting
+ // instruction between a VarDef and that variable being
+ // fully initialized. If there was, then anything scanning
+ // the stack during the handling of that fault will see
+ // a live but uninitialized pointer variable on the stack.
+ //
+ // If we have:
+ //
+ // NilCheck p
+ // VarDef x
+ // x = *p
+ //
+ // We can't rewrite that to
+ //
+ // VarDef x
+ // NilCheck p
+ // x = *p
+ //
+ // Particularly, even though *p faults on p==nil, we still
+ // have to do the explicit nil check before the VarDef.
+ // See issue #32288.
+ }
+ // This op changes memory. Any faulting instruction after v that
+ // we've recorded in the unnecessary map is now obsolete.
+ unnecessary.clear()
+ }
+
+ // Find any pointers that this op is guaranteed to fault on if nil.
+ var ptrstore [2]*Value
+ ptrs := ptrstore[:0]
+ if opcodeTable[v.Op].faultOnNilArg0 && (faultOnLoad || v.Type.IsMemory()) {
+ // On AIX, only writing will fault.
+ ptrs = append(ptrs, v.Args[0])
+ }
+ if opcodeTable[v.Op].faultOnNilArg1 && (faultOnLoad || (v.Type.IsMemory() && v.Op != OpPPC64LoweredMove)) {
+ // On AIX, only writing will fault.
+ // LoweredMove is a special case because it's considered as a "mem" as it stores on arg0 but arg1 is accessed as a load and should be checked.
+ ptrs = append(ptrs, v.Args[1])
+ }
+
+ for _, ptr := range ptrs {
+ // Check to make sure the offset is small.
+ switch opcodeTable[v.Op].auxType {
+ case auxSym:
+ if v.Aux != nil {
+ continue
+ }
+ case auxSymOff:
+ if v.Aux != nil || v.AuxInt < 0 || v.AuxInt >= minZeroPage {
+ continue
+ }
+ case auxSymValAndOff:
+ off := ValAndOff(v.AuxInt).Off()
+ if v.Aux != nil || off < 0 || off >= minZeroPage {
+ continue
+ }
+ case auxInt32:
+ // Mips uses this auxType for atomic add constant. It does not affect the effective address.
+ case auxInt64:
+ // ARM uses this auxType for duffcopy/duffzero/alignment info.
+ // It does not affect the effective address.
+ case auxNone:
+ // offset is zero.
+ default:
+ v.Fatalf("can't handle aux %s (type %d) yet\n", v.auxString(), int(opcodeTable[v.Op].auxType))
+ }
+ // This instruction is guaranteed to fault if ptr is nil.
+ // Any previous nil check op is unnecessary.
+ unnecessary.set(ptr.ID, int32(i), src.NoXPos)
+ }
+ }
+ // Remove values we've clobbered with OpUnknown.
+ i := firstToRemove
+ for j := i; j < len(b.Values); j++ {
+ v := b.Values[j]
+ if v.Op != OpUnknown {
+ if !notStmtBoundary(v.Op) && pendingLines.contains(v.Pos) { // Late in compilation, so any remaining NotStmt values are probably okay now.
+ v.Pos = v.Pos.WithIsStmt()
+ pendingLines.remove(v.Pos)
+ }
+ b.Values[i] = v
+ i++
+ }
+ }
+
+ if pendingLines.contains(b.Pos) {
+ b.Pos = b.Pos.WithIsStmt()
+ }
+
+ b.truncateValues(i)
+
+ // TODO: if b.Kind == BlockPlain, start the analysis in the subsequent block to find
+ // more unnecessary nil checks. Would fix test/nilptr3.go:159.
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go
new file mode 100644
index 0000000..16d9461
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/nilcheck_test.go
@@ -0,0 +1,434 @@
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "strconv"
+ "testing"
+)
+
+func BenchmarkNilCheckDeep1(b *testing.B) { benchmarkNilCheckDeep(b, 1) }
+func BenchmarkNilCheckDeep10(b *testing.B) { benchmarkNilCheckDeep(b, 10) }
+func BenchmarkNilCheckDeep100(b *testing.B) { benchmarkNilCheckDeep(b, 100) }
+func BenchmarkNilCheckDeep1000(b *testing.B) { benchmarkNilCheckDeep(b, 1000) }
+func BenchmarkNilCheckDeep10000(b *testing.B) { benchmarkNilCheckDeep(b, 10000) }
+
+// benchmarkNilCheckDeep is a stress test of nilcheckelim.
+// It uses the worst possible input: A linear string of
+// nil checks, none of which can be eliminated.
+// Run with multiple depths to observe big-O behavior.
+func benchmarkNilCheckDeep(b *testing.B, depth int) {
+ c := testConfig(b)
+ ptrType := c.config.Types.BytePtr
+
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto(blockn(0)),
+ ),
+ )
+ for i := 0; i < depth; i++ {
+ blocs = append(blocs,
+ Bloc(blockn(i),
+ Valu(ptrn(i), OpAddr, ptrType, 0, nil, "sb"),
+ Valu(booln(i), OpIsNonNil, c.config.Types.Bool, 0, nil, ptrn(i)),
+ If(booln(i), blockn(i+1), "exit"),
+ ),
+ )
+ }
+ blocs = append(blocs,
+ Bloc(blockn(depth), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ fun := c.Fun("entry", blocs...)
+
+ CheckFunc(fun.f)
+ b.SetBytes(int64(depth)) // helps for eyeballing linearity
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ nilcheckelim(fun.f)
+ }
+}
+
+func blockn(n int) string { return "b" + strconv.Itoa(n) }
+func ptrn(n int) string { return "p" + strconv.Itoa(n) }
+func booln(n int) string { return "c" + strconv.Itoa(n) }
+
+func isNilCheck(b *Block) bool {
+ return b.Kind == BlockIf && b.Controls[0].Op == OpIsNonNil
+}
+
+// TestNilcheckSimple verifies that a second repeated nilcheck is removed.
+func TestNilcheckSimple(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "secondCheck", "exit")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ t.Errorf("secondCheck was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckDomOrder ensures that the nil check elimination isn't dependent
+// on the order of the dominees.
+func TestNilcheckDomOrder(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "secondCheck", "exit")),
+ Bloc("exit",
+ Exit("mem")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ t.Errorf("secondCheck was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckAddr verifies that nilchecks of OpAddr constructed values are removed.
+func TestNilcheckAddr(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["checkPtr"] && isNilCheck(b) {
+ t.Errorf("checkPtr was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckAddPtr verifies that nilchecks of OpAddPtr constructed values are removed.
+func TestNilcheckAddPtr(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("off", OpConst64, c.config.Types.Int64, 20, nil),
+ Valu("ptr1", OpAddPtr, ptrType, 0, nil, "sb", "off"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["checkPtr"] && isNilCheck(b) {
+ t.Errorf("checkPtr was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckPhi tests that nil checks of phis, for which all values are known to be
+// non-nil are removed.
+func TestNilcheckPhi(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("baddr", OpLocalAddr, c.config.Types.Bool, 0, "b", "sp", "mem"),
+ Valu("bool1", OpLoad, c.config.Types.Bool, 0, nil, "baddr", "mem"),
+ If("bool1", "b1", "b2")),
+ Bloc("b1",
+ Valu("ptr1", OpAddr, ptrType, 0, nil, "sb"),
+ Goto("checkPtr")),
+ Bloc("b2",
+ Valu("ptr2", OpAddr, ptrType, 0, nil, "sb"),
+ Goto("checkPtr")),
+ // both ptr1 and ptr2 are guaranteed non-nil here
+ Bloc("checkPtr",
+ Valu("phi", OpPhi, ptrType, 0, nil, "ptr1", "ptr2"),
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "phi"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["checkPtr"] && isNilCheck(b) {
+ t.Errorf("checkPtr was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckKeepRemove verifies that duplicate checks of the same pointer
+// are removed, but checks of different pointers are not.
+func TestNilcheckKeepRemove(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "differentCheck", "exit")),
+ Bloc("differentCheck",
+ Valu("ptr2", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr2"),
+ If("bool2", "secondCheck", "exit")),
+ Bloc("secondCheck",
+ Valu("bool3", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool3", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ foundDifferentCheck := false
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ t.Errorf("secondCheck was not eliminated")
+ }
+ if b == fun.blocks["differentCheck"] && isNilCheck(b) {
+ foundDifferentCheck = true
+ }
+ }
+ if !foundDifferentCheck {
+ t.Errorf("removed differentCheck, but shouldn't have")
+ }
+}
+
+// TestNilcheckInFalseBranch tests that nil checks in the false branch of a nilcheck
+// block are *not* removed.
+func TestNilcheckInFalseBranch(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "extra", "secondCheck")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "thirdCheck")),
+ Bloc("thirdCheck",
+ Valu("bool3", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool3", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ foundSecondCheck := false
+ foundThirdCheck := false
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ foundSecondCheck = true
+ }
+ if b == fun.blocks["thirdCheck"] && isNilCheck(b) {
+ foundThirdCheck = true
+ }
+ }
+ if !foundSecondCheck {
+ t.Errorf("removed secondCheck, but shouldn't have [false branch]")
+ }
+ if !foundThirdCheck {
+ t.Errorf("removed thirdCheck, but shouldn't have [false branch]")
+ }
+}
+
+// TestNilcheckUser verifies that a user nil check that dominates a generated nil check
+// wil remove the generated nil check.
+func TestNilcheckUser(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "secondCheck", "exit")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ // we need the opt here to rewrite the user nilcheck
+ opt(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ t.Errorf("secondCheck was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckBug reproduces a bug in nilcheckelim found by compiling math/big
+func TestNilcheckBug(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "secondCheck", "couldBeNil")),
+ Bloc("couldBeNil",
+ Goto("secondCheck")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ // prevent fuse from eliminating this block
+ Valu("store", OpStore, types.TypeMem, 0, ptrType, "ptr1", "nilptr", "mem"),
+ Goto("exit")),
+ Bloc("exit",
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, "mem", "store"),
+ Exit("phi")))
+
+ CheckFunc(fun.f)
+ // we need the opt here to rewrite the user nilcheck
+ opt(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ foundSecondCheck := false
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ foundSecondCheck = true
+ }
+ }
+ if !foundSecondCheck {
+ t.Errorf("secondCheck was eliminated, but shouldn't have")
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/numberlines.go b/src/cmd/compile/internal/ssa/numberlines.go
new file mode 100644
index 0000000..f4e62b8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/numberlines.go
@@ -0,0 +1,271 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+ "sort"
+)
+
+func isPoorStatementOp(op Op) bool {
+ switch op {
+ // Note that Nilcheck often vanishes, but when it doesn't, you'd love to start the statement there
+ // so that a debugger-user sees the stop before the panic, and can examine the value.
+ case OpAddr, OpLocalAddr, OpOffPtr, OpStructSelect, OpPhi, OpITab, OpIData,
+ OpIMake, OpStringMake, OpSliceMake, OpStructMake0, OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4,
+ OpConstBool, OpConst8, OpConst16, OpConst32, OpConst64, OpConst32F, OpConst64F:
+ return true
+ }
+ return false
+}
+
+// LosesStmtMark reports whether a prog with op as loses its statement mark on the way to DWARF.
+// The attributes from some opcodes are lost in translation.
+// TODO: this is an artifact of how funcpctab combines information for instructions at a single PC.
+// Should try to fix it there.
+func LosesStmtMark(as obj.As) bool {
+ // is_stmt does not work for these; it DOES for ANOP even though that generates no code.
+ return as == obj.APCDATA || as == obj.AFUNCDATA
+}
+
+// nextGoodStatementIndex returns an index at i or later that is believed
+// to be a good place to start the statement for b. This decision is
+// based on v's Op, the possibility of a better later operation, and
+// whether the values following i are the same line as v.
+// If a better statement index isn't found, then i is returned.
+func nextGoodStatementIndex(v *Value, i int, b *Block) int {
+ // If the value is the last one in the block, too bad, it will have to do
+ // (this assumes that the value ordering vaguely corresponds to the source
+ // program execution order, which tends to be true directly after ssa is
+ // first built.
+ if i >= len(b.Values)-1 {
+ return i
+ }
+ // Skip the likely-ephemeral/fragile opcodes expected to vanish in a rewrite.
+ if !isPoorStatementOp(v.Op) {
+ return i
+ }
+ // Look ahead to see what the line number is on the next thing that could be a boundary.
+ for j := i + 1; j < len(b.Values); j++ {
+ u := b.Values[j]
+ if u.Pos.IsStmt() == src.PosNotStmt { // ignore non-statements
+ continue
+ }
+ if u.Pos.SameFileAndLine(v.Pos) {
+ if isPoorStatementOp(u.Op) {
+ continue // Keep looking, this is also not a good statement op
+ }
+ return j
+ }
+ return i
+ }
+ return i
+}
+
+// notStmtBoundary reports whether a value with opcode op can never be a statement
+// boundary. Such values don't correspond to a user's understanding of a
+// statement boundary.
+func notStmtBoundary(op Op) bool {
+ switch op {
+ case OpCopy, OpPhi, OpVarKill, OpVarDef, OpVarLive, OpUnknown, OpFwdRef, OpArg:
+ return true
+ }
+ return false
+}
+
+func (b *Block) FirstPossibleStmtValue() *Value {
+ for _, v := range b.Values {
+ if notStmtBoundary(v.Op) {
+ continue
+ }
+ return v
+ }
+ return nil
+}
+
+func flc(p src.XPos) string {
+ if p == src.NoXPos {
+ return "none"
+ }
+ return fmt.Sprintf("(%d):%d:%d", p.FileIndex(), p.Line(), p.Col())
+}
+
+type fileAndPair struct {
+ f int32
+ lp lineRange
+}
+
+type fileAndPairs []fileAndPair
+
+func (fap fileAndPairs) Len() int {
+ return len(fap)
+}
+func (fap fileAndPairs) Less(i, j int) bool {
+ return fap[i].f < fap[j].f
+}
+func (fap fileAndPairs) Swap(i, j int) {
+ fap[i], fap[j] = fap[j], fap[i]
+}
+
+// -d=ssa/number_lines/stats=1 (that bit) for line and file distribution statistics
+// -d=ssa/number_lines/debug for information about why particular values are marked as statements.
+func numberLines(f *Func) {
+ po := f.Postorder()
+ endlines := make(map[ID]src.XPos)
+ ranges := make(map[int]lineRange)
+ note := func(p src.XPos) {
+ line := uint32(p.Line())
+ i := int(p.FileIndex())
+ lp, found := ranges[i]
+ change := false
+ if line < lp.first || !found {
+ lp.first = line
+ change = true
+ }
+ if line > lp.last {
+ lp.last = line
+ change = true
+ }
+ if change {
+ ranges[i] = lp
+ }
+ }
+
+ // Visit in reverse post order so that all non-loop predecessors come first.
+ for j := len(po) - 1; j >= 0; j-- {
+ b := po[j]
+ // Find the first interesting position and check to see if it differs from any predecessor
+ firstPos := src.NoXPos
+ firstPosIndex := -1
+ if b.Pos.IsStmt() != src.PosNotStmt {
+ note(b.Pos)
+ }
+ for i := 0; i < len(b.Values); i++ {
+ v := b.Values[i]
+ if v.Pos.IsStmt() != src.PosNotStmt {
+ note(v.Pos)
+ // skip ahead to better instruction for this line if possible
+ i = nextGoodStatementIndex(v, i, b)
+ v = b.Values[i]
+ firstPosIndex = i
+ firstPos = v.Pos
+ v.Pos = firstPos.WithDefaultStmt() // default to default
+ break
+ }
+ }
+
+ if firstPosIndex == -1 { // Effectively empty block, check block's own Pos, consider preds.
+ line := src.NoXPos
+ for _, p := range b.Preds {
+ pbi := p.Block().ID
+ if !endlines[pbi].SameFileAndLine(line) {
+ if line == src.NoXPos {
+ line = endlines[pbi]
+ continue
+ } else {
+ line = src.NoXPos
+ break
+ }
+
+ }
+ }
+ // If the block has no statement itself and is effectively empty, tag it w/ predecessor(s) but not as a statement
+ if b.Pos.IsStmt() == src.PosNotStmt {
+ b.Pos = line
+ endlines[b.ID] = line
+ continue
+ }
+ // If the block differs from its predecessors, mark it as a statement
+ if line == src.NoXPos || !line.SameFileAndLine(b.Pos) {
+ b.Pos = b.Pos.WithIsStmt()
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt effectively-empty-block %s %s %s\n", f.Name, b, flc(b.Pos))
+ }
+ }
+ endlines[b.ID] = b.Pos
+ continue
+ }
+ // check predecessors for any difference; if firstPos differs, then it is a boundary.
+ if len(b.Preds) == 0 { // Don't forget the entry block
+ b.Values[firstPosIndex].Pos = firstPos.WithIsStmt()
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt entry-block %s %s %s %s\n", f.Name, b, b.Values[firstPosIndex], flc(firstPos))
+ }
+ } else { // differing pred
+ for _, p := range b.Preds {
+ pbi := p.Block().ID
+ if !endlines[pbi].SameFileAndLine(firstPos) {
+ b.Values[firstPosIndex].Pos = firstPos.WithIsStmt()
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt differing-pred %s %s %s %s, different=%s ending %s\n",
+ f.Name, b, b.Values[firstPosIndex], flc(firstPos), p.Block(), flc(endlines[pbi]))
+ }
+ break
+ }
+ }
+ }
+ // iterate forward setting each new (interesting) position as a statement boundary.
+ for i := firstPosIndex + 1; i < len(b.Values); i++ {
+ v := b.Values[i]
+ if v.Pos.IsStmt() == src.PosNotStmt {
+ continue
+ }
+ note(v.Pos)
+ // skip ahead if possible
+ i = nextGoodStatementIndex(v, i, b)
+ v = b.Values[i]
+ if !v.Pos.SameFileAndLine(firstPos) {
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt new line %s %s %s %s prev pos = %s\n", f.Name, b, v, flc(v.Pos), flc(firstPos))
+ }
+ firstPos = v.Pos
+ v.Pos = v.Pos.WithIsStmt()
+ } else {
+ v.Pos = v.Pos.WithDefaultStmt()
+ }
+ }
+ if b.Pos.IsStmt() != src.PosNotStmt && !b.Pos.SameFileAndLine(firstPos) {
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt end of block differs %s %s %s prev pos = %s\n", f.Name, b, flc(b.Pos), flc(firstPos))
+ }
+ b.Pos = b.Pos.WithIsStmt()
+ firstPos = b.Pos
+ }
+ endlines[b.ID] = firstPos
+ }
+ if f.pass.stats&1 != 0 {
+ // Report summary statistics on the shape of the sparse map about to be constructed
+ // TODO use this information to make sparse maps faster.
+ var entries fileAndPairs
+ for k, v := range ranges {
+ entries = append(entries, fileAndPair{int32(k), v})
+ }
+ sort.Sort(entries)
+ total := uint64(0) // sum over files of maxline(file) - minline(file)
+ maxfile := int32(0) // max(file indices)
+ minline := uint32(0xffffffff) // min over files of minline(file)
+ maxline := uint32(0) // max over files of maxline(file)
+ for _, v := range entries {
+ if f.pass.stats > 1 {
+ f.LogStat("file", v.f, "low", v.lp.first, "high", v.lp.last)
+ }
+ total += uint64(v.lp.last - v.lp.first)
+ if maxfile < v.f {
+ maxfile = v.f
+ }
+ if minline > v.lp.first {
+ minline = v.lp.first
+ }
+ if maxline < v.lp.last {
+ maxline = v.lp.last
+ }
+ }
+ f.LogStat("SUM_LINE_RANGE", total, "MAXMIN_LINE_RANGE", maxline-minline, "MAXFILE", maxfile, "NFILES", len(entries))
+ }
+ // cachedLineStarts is an empty sparse map for values that are included within ranges.
+ f.cachedLineStarts = newXposmap(ranges)
+}
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
new file mode 100644
index 0000000..d167335
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -0,0 +1,405 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "fmt"
+)
+
+// An Op encodes the specific operation that a Value performs.
+// Opcodes' semantics can be modified by the type and aux fields of the Value.
+// For instance, OpAdd can be 32 or 64 bit, signed or unsigned, float or complex, depending on Value.Type.
+// Semantics of each op are described in the opcode files in gen/*Ops.go.
+// There is one file for generic (architecture-independent) ops and one file
+// for each architecture.
+type Op int32
+
+type opInfo struct {
+ name string
+ reg regInfo
+ auxType auxType
+ argLen int32 // the number of arguments, -1 if variable length
+ asm obj.As
+ generic bool // this is a generic (arch-independent) opcode
+ rematerializeable bool // this op is rematerializeable
+ commutative bool // this operation is commutative (e.g. addition)
+ resultInArg0 bool // (first, if a tuple) output of v and v.Args[0] must be allocated to the same register
+ resultNotInArgs bool // outputs must not be allocated to the same registers as inputs
+ clobberFlags bool // this op clobbers flags register
+ call bool // is a function call
+ nilCheck bool // this op is a nil check on arg0
+ faultOnNilArg0 bool // this op will fault if arg0 is nil (and aux encodes a small offset)
+ faultOnNilArg1 bool // this op will fault if arg1 is nil (and aux encodes a small offset)
+ usesScratch bool // this op requires scratch memory space
+ hasSideEffects bool // for "reasons", not to be eliminated. E.g., atomic store, #19182.
+ zeroWidth bool // op never translates into any machine code. example: copy, which may sometimes translate to machine code, is not zero-width.
+ unsafePoint bool // this op is an unsafe point, i.e. not safe for async preemption
+ symEffect SymEffect // effect this op has on symbol in aux
+ scale uint8 // amd64/386 indexed load scale
+}
+
+type inputInfo struct {
+ idx int // index in Args array
+ regs regMask // allowed input registers
+}
+
+type outputInfo struct {
+ idx int // index in output tuple
+ regs regMask // allowed output registers
+}
+
+type regInfo struct {
+ // inputs encodes the register restrictions for an instruction's inputs.
+ // Each entry specifies an allowed register set for a particular input.
+ // They are listed in the order in which regalloc should pick a register
+ // from the register set (most constrained first).
+ // Inputs which do not need registers are not listed.
+ inputs []inputInfo
+ // clobbers encodes the set of registers that are overwritten by
+ // the instruction (other than the output registers).
+ clobbers regMask
+ // outputs is the same as inputs, but for the outputs of the instruction.
+ outputs []outputInfo
+}
+
+type auxType int8
+
+type Param struct {
+ Type *types.Type
+ Offset int32 // TODO someday this will be a register
+}
+
+type AuxCall struct {
+ Fn *obj.LSym
+ args []Param // Includes receiver for method calls. Does NOT include hidden closure pointer.
+ results []Param
+}
+
+// ResultForOffset returns the index of the result at a particular offset among the results
+// This does not include the mem result for the call opcode.
+func (a *AuxCall) ResultForOffset(offset int64) int64 {
+ which := int64(-1)
+ for i := int64(0); i < a.NResults(); i++ { // note aux NResults does not include mem result.
+ if a.OffsetOfResult(i) == offset {
+ which = i
+ break
+ }
+ }
+ return which
+}
+
+// OffsetOfResult returns the SP offset of result which (indexed 0, 1, etc).
+func (a *AuxCall) OffsetOfResult(which int64) int64 {
+ return int64(a.results[which].Offset)
+}
+
+// OffsetOfArg returns the SP offset of argument which (indexed 0, 1, etc).
+func (a *AuxCall) OffsetOfArg(which int64) int64 {
+ return int64(a.args[which].Offset)
+}
+
+// TypeOfResult returns the type of result which (indexed 0, 1, etc).
+func (a *AuxCall) TypeOfResult(which int64) *types.Type {
+ return a.results[which].Type
+}
+
+// TypeOfArg returns the type of argument which (indexed 0, 1, etc).
+func (a *AuxCall) TypeOfArg(which int64) *types.Type {
+ return a.args[which].Type
+}
+
+// SizeOfResult returns the size of result which (indexed 0, 1, etc).
+func (a *AuxCall) SizeOfResult(which int64) int64 {
+ return a.TypeOfResult(which).Width
+}
+
+// SizeOfArg returns the size of argument which (indexed 0, 1, etc).
+func (a *AuxCall) SizeOfArg(which int64) int64 {
+ return a.TypeOfArg(which).Width
+}
+
+// NResults returns the number of results
+func (a *AuxCall) NResults() int64 {
+ return int64(len(a.results))
+}
+
+// LateExpansionResultType returns the result type (including trailing mem)
+// for a call that will be expanded later in the SSA phase.
+func (a *AuxCall) LateExpansionResultType() *types.Type {
+ var tys []*types.Type
+ for i := int64(0); i < a.NResults(); i++ {
+ tys = append(tys, a.TypeOfResult(i))
+ }
+ tys = append(tys, types.TypeMem)
+ return types.NewResults(tys)
+}
+
+// NArgs returns the number of arguments
+func (a *AuxCall) NArgs() int64 {
+ return int64(len(a.args))
+}
+
+// String returns
+// "AuxCall{<fn>(<args>)}" if len(results) == 0;
+// "AuxCall{<fn>(<args>)<results[0]>}" if len(results) == 1;
+// "AuxCall{<fn>(<args>)(<results>)}" otherwise.
+func (a *AuxCall) String() string {
+ var fn string
+ if a.Fn == nil {
+ fn = "AuxCall{nil" // could be interface/closure etc.
+ } else {
+ fn = fmt.Sprintf("AuxCall{%v", a.Fn)
+ }
+
+ if len(a.args) == 0 {
+ fn += "()"
+ } else {
+ s := "("
+ for _, arg := range a.args {
+ fn += fmt.Sprintf("%s[%v,%v]", s, arg.Type, arg.Offset)
+ s = ","
+ }
+ fn += ")"
+ }
+
+ if len(a.results) > 0 { // usual is zero or one; only some RT calls have more than one.
+ if len(a.results) == 1 {
+ fn += fmt.Sprintf("[%v,%v]", a.results[0].Type, a.results[0].Offset)
+ } else {
+ s := "("
+ for _, result := range a.results {
+ fn += fmt.Sprintf("%s[%v,%v]", s, result.Type, result.Offset)
+ s = ","
+ }
+ fn += ")"
+ }
+ }
+
+ return fn + "}"
+}
+
+// StaticAuxCall returns an AuxCall for a static call.
+func StaticAuxCall(sym *obj.LSym, args []Param, results []Param) *AuxCall {
+ return &AuxCall{Fn: sym, args: args, results: results}
+}
+
+// InterfaceAuxCall returns an AuxCall for an interface call.
+func InterfaceAuxCall(args []Param, results []Param) *AuxCall {
+ return &AuxCall{Fn: nil, args: args, results: results}
+}
+
+// ClosureAuxCall returns an AuxCall for a closure call.
+func ClosureAuxCall(args []Param, results []Param) *AuxCall {
+ return &AuxCall{Fn: nil, args: args, results: results}
+}
+
+const (
+ auxNone auxType = iota
+ auxBool // auxInt is 0/1 for false/true
+ auxInt8 // auxInt is an 8-bit integer
+ auxInt16 // auxInt is a 16-bit integer
+ auxInt32 // auxInt is a 32-bit integer
+ auxInt64 // auxInt is a 64-bit integer
+ auxInt128 // auxInt represents a 128-bit integer. Always 0.
+ auxUInt8 // auxInt is an 8-bit unsigned integer
+ auxFloat32 // auxInt is a float32 (encoded with math.Float64bits)
+ auxFloat64 // auxInt is a float64 (encoded with math.Float64bits)
+ auxFlagConstant // auxInt is a flagConstant
+ auxString // aux is a string
+ auxSym // aux is a symbol (a *gc.Node for locals, an *obj.LSym for globals, or nil for none)
+ auxSymOff // aux is a symbol, auxInt is an offset
+ auxSymValAndOff // aux is a symbol, auxInt is a ValAndOff
+ auxTyp // aux is a type
+ auxTypSize // aux is a type, auxInt is a size, must have Aux.(Type).Size() == AuxInt
+ auxCCop // aux is a ssa.Op that represents a flags-to-bool conversion (e.g. LessThan)
+ auxCall // aux is a *ssa.AuxCall
+ auxCallOff // aux is a *ssa.AuxCall, AuxInt is int64 param (in+out) size
+
+ // architecture specific aux types
+ auxARM64BitField // aux is an arm64 bitfield lsb and width packed into auxInt
+ auxS390XRotateParams // aux is a s390x rotate parameters object encoding start bit, end bit and rotate amount
+ auxS390XCCMask // aux is a s390x 4-bit condition code mask
+ auxS390XCCMaskInt8 // aux is a s390x 4-bit condition code mask, auxInt is a int8 immediate
+ auxS390XCCMaskUint8 // aux is a s390x 4-bit condition code mask, auxInt is a uint8 immediate
+)
+
+// A SymEffect describes the effect that an SSA Value has on the variable
+// identified by the symbol in its Aux field.
+type SymEffect int8
+
+const (
+ SymRead SymEffect = 1 << iota
+ SymWrite
+ SymAddr
+
+ SymRdWr = SymRead | SymWrite
+
+ SymNone SymEffect = 0
+)
+
+// A Sym represents a symbolic offset from a base register.
+// Currently a Sym can be one of 3 things:
+// - a *gc.Node, for an offset from SP (the stack pointer)
+// - a *obj.LSym, for an offset from SB (the global pointer)
+// - nil, for no offset
+type Sym interface {
+ String() string
+ CanBeAnSSASym()
+}
+
+// A ValAndOff is used by the several opcodes. It holds
+// both a value and a pointer offset.
+// A ValAndOff is intended to be encoded into an AuxInt field.
+// The zero ValAndOff encodes a value of 0 and an offset of 0.
+// The high 32 bits hold a value.
+// The low 32 bits hold a pointer offset.
+type ValAndOff int64
+
+func (x ValAndOff) Val() int64 { return int64(x) >> 32 }
+func (x ValAndOff) Val32() int32 { return int32(int64(x) >> 32) }
+func (x ValAndOff) Val16() int16 { return int16(int64(x) >> 32) }
+func (x ValAndOff) Val8() int8 { return int8(int64(x) >> 32) }
+
+func (x ValAndOff) Off() int64 { return int64(int32(x)) }
+func (x ValAndOff) Off32() int32 { return int32(x) }
+
+func (x ValAndOff) String() string {
+ return fmt.Sprintf("val=%d,off=%d", x.Val(), x.Off())
+}
+
+// validVal reports whether the value can be used
+// as an argument to makeValAndOff.
+func validVal(val int64) bool {
+ return val == int64(int32(val))
+}
+
+// validOff reports whether the offset can be used
+// as an argument to makeValAndOff.
+func validOff(off int64) bool {
+ return off == int64(int32(off))
+}
+
+// validValAndOff reports whether we can fit the value and offset into
+// a ValAndOff value.
+func validValAndOff(val, off int64) bool {
+ if !validVal(val) {
+ return false
+ }
+ if !validOff(off) {
+ return false
+ }
+ return true
+}
+
+func makeValAndOff32(val, off int32) ValAndOff {
+ return ValAndOff(int64(val)<<32 + int64(uint32(off)))
+}
+func makeValAndOff64(val, off int64) ValAndOff {
+ if !validValAndOff(val, off) {
+ panic("invalid makeValAndOff64")
+ }
+ return ValAndOff(val<<32 + int64(uint32(off)))
+}
+
+func (x ValAndOff) canAdd32(off int32) bool {
+ newoff := x.Off() + int64(off)
+ return newoff == int64(int32(newoff))
+}
+func (x ValAndOff) canAdd64(off int64) bool {
+ newoff := x.Off() + off
+ return newoff == int64(int32(newoff))
+}
+
+func (x ValAndOff) addOffset32(off int32) ValAndOff {
+ if !x.canAdd32(off) {
+ panic("invalid ValAndOff.addOffset32")
+ }
+ return makeValAndOff64(x.Val(), x.Off()+int64(off))
+}
+func (x ValAndOff) addOffset64(off int64) ValAndOff {
+ if !x.canAdd64(off) {
+ panic("invalid ValAndOff.addOffset64")
+ }
+ return makeValAndOff64(x.Val(), x.Off()+off)
+}
+
+// int128 is a type that stores a 128-bit constant.
+// The only allowed constant right now is 0, so we can cheat quite a bit.
+type int128 int64
+
+type BoundsKind uint8
+
+const (
+ BoundsIndex BoundsKind = iota // indexing operation, 0 <= idx < len failed
+ BoundsIndexU // ... with unsigned idx
+ BoundsSliceAlen // 2-arg slicing operation, 0 <= high <= len failed
+ BoundsSliceAlenU // ... with unsigned high
+ BoundsSliceAcap // 2-arg slicing operation, 0 <= high <= cap failed
+ BoundsSliceAcapU // ... with unsigned high
+ BoundsSliceB // 2-arg slicing operation, 0 <= low <= high failed
+ BoundsSliceBU // ... with unsigned low
+ BoundsSlice3Alen // 3-arg slicing operation, 0 <= max <= len failed
+ BoundsSlice3AlenU // ... with unsigned max
+ BoundsSlice3Acap // 3-arg slicing operation, 0 <= max <= cap failed
+ BoundsSlice3AcapU // ... with unsigned max
+ BoundsSlice3B // 3-arg slicing operation, 0 <= high <= max failed
+ BoundsSlice3BU // ... with unsigned high
+ BoundsSlice3C // 3-arg slicing operation, 0 <= low <= high failed
+ BoundsSlice3CU // ... with unsigned low
+ BoundsKindCount
+)
+
+// boundsAPI determines which register arguments a bounds check call should use. For an [a:b:c] slice, we do:
+// CMPQ c, cap
+// JA fail1
+// CMPQ b, c
+// JA fail2
+// CMPQ a, b
+// JA fail3
+//
+// fail1: CALL panicSlice3Acap (c, cap)
+// fail2: CALL panicSlice3B (b, c)
+// fail3: CALL panicSlice3C (a, b)
+//
+// When we register allocate that code, we want the same register to be used for
+// the first arg of panicSlice3Acap and the second arg to panicSlice3B. That way,
+// initializing that register once will satisfy both calls.
+// That desire ends up dividing the set of bounds check calls into 3 sets. This function
+// determines which set to use for a given panic call.
+// The first arg for set 0 should be the second arg for set 1.
+// The first arg for set 1 should be the second arg for set 2.
+func boundsABI(b int64) int {
+ switch BoundsKind(b) {
+ case BoundsSlice3Alen,
+ BoundsSlice3AlenU,
+ BoundsSlice3Acap,
+ BoundsSlice3AcapU:
+ return 0
+ case BoundsSliceAlen,
+ BoundsSliceAlenU,
+ BoundsSliceAcap,
+ BoundsSliceAcapU,
+ BoundsSlice3B,
+ BoundsSlice3BU:
+ return 1
+ case BoundsIndex,
+ BoundsIndexU,
+ BoundsSliceB,
+ BoundsSliceBU,
+ BoundsSlice3C,
+ BoundsSlice3CU:
+ return 2
+ default:
+ panic("bad BoundsKind")
+ }
+}
+
+// arm64BitFileld is the GO type of ARM64BitField auxInt.
+// if x is an ARM64BitField, then width=x&0xff, lsb=(x>>8)&0xff, and
+// width+lsb<64 for 64-bit variant, width+lsb<32 for 32-bit variant.
+// the meaning of width and lsb are instruction-dependent.
+type arm64BitField int16
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
new file mode 100644
index 0000000..a3f9a22
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -0,0 +1,36677 @@
+// Code generated from gen/*Ops.go; DO NOT EDIT.
+
+package ssa
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "cmd/internal/obj/arm64"
+ "cmd/internal/obj/mips"
+ "cmd/internal/obj/ppc64"
+ "cmd/internal/obj/riscv"
+ "cmd/internal/obj/s390x"
+ "cmd/internal/obj/wasm"
+ "cmd/internal/obj/x86"
+)
+
+const (
+ BlockInvalid BlockKind = iota
+
+ Block386EQ
+ Block386NE
+ Block386LT
+ Block386LE
+ Block386GT
+ Block386GE
+ Block386OS
+ Block386OC
+ Block386ULT
+ Block386ULE
+ Block386UGT
+ Block386UGE
+ Block386EQF
+ Block386NEF
+ Block386ORD
+ Block386NAN
+
+ BlockAMD64EQ
+ BlockAMD64NE
+ BlockAMD64LT
+ BlockAMD64LE
+ BlockAMD64GT
+ BlockAMD64GE
+ BlockAMD64OS
+ BlockAMD64OC
+ BlockAMD64ULT
+ BlockAMD64ULE
+ BlockAMD64UGT
+ BlockAMD64UGE
+ BlockAMD64EQF
+ BlockAMD64NEF
+ BlockAMD64ORD
+ BlockAMD64NAN
+
+ BlockARMEQ
+ BlockARMNE
+ BlockARMLT
+ BlockARMLE
+ BlockARMGT
+ BlockARMGE
+ BlockARMULT
+ BlockARMULE
+ BlockARMUGT
+ BlockARMUGE
+ BlockARMLTnoov
+ BlockARMLEnoov
+ BlockARMGTnoov
+ BlockARMGEnoov
+
+ BlockARM64EQ
+ BlockARM64NE
+ BlockARM64LT
+ BlockARM64LE
+ BlockARM64GT
+ BlockARM64GE
+ BlockARM64ULT
+ BlockARM64ULE
+ BlockARM64UGT
+ BlockARM64UGE
+ BlockARM64Z
+ BlockARM64NZ
+ BlockARM64ZW
+ BlockARM64NZW
+ BlockARM64TBZ
+ BlockARM64TBNZ
+ BlockARM64FLT
+ BlockARM64FLE
+ BlockARM64FGT
+ BlockARM64FGE
+ BlockARM64LTnoov
+ BlockARM64LEnoov
+ BlockARM64GTnoov
+ BlockARM64GEnoov
+
+ BlockMIPSEQ
+ BlockMIPSNE
+ BlockMIPSLTZ
+ BlockMIPSLEZ
+ BlockMIPSGTZ
+ BlockMIPSGEZ
+ BlockMIPSFPT
+ BlockMIPSFPF
+
+ BlockMIPS64EQ
+ BlockMIPS64NE
+ BlockMIPS64LTZ
+ BlockMIPS64LEZ
+ BlockMIPS64GTZ
+ BlockMIPS64GEZ
+ BlockMIPS64FPT
+ BlockMIPS64FPF
+
+ BlockPPC64EQ
+ BlockPPC64NE
+ BlockPPC64LT
+ BlockPPC64LE
+ BlockPPC64GT
+ BlockPPC64GE
+ BlockPPC64FLT
+ BlockPPC64FLE
+ BlockPPC64FGT
+ BlockPPC64FGE
+
+ BlockRISCV64BEQ
+ BlockRISCV64BNE
+ BlockRISCV64BLT
+ BlockRISCV64BGE
+ BlockRISCV64BLTU
+ BlockRISCV64BGEU
+ BlockRISCV64BEQZ
+ BlockRISCV64BNEZ
+ BlockRISCV64BLEZ
+ BlockRISCV64BGEZ
+ BlockRISCV64BLTZ
+ BlockRISCV64BGTZ
+
+ BlockS390XBRC
+ BlockS390XCRJ
+ BlockS390XCGRJ
+ BlockS390XCLRJ
+ BlockS390XCLGRJ
+ BlockS390XCIJ
+ BlockS390XCGIJ
+ BlockS390XCLIJ
+ BlockS390XCLGIJ
+
+ BlockPlain
+ BlockIf
+ BlockDefer
+ BlockRet
+ BlockRetJmp
+ BlockExit
+ BlockFirst
+)
+
+var blockString = [...]string{
+ BlockInvalid: "BlockInvalid",
+
+ Block386EQ: "EQ",
+ Block386NE: "NE",
+ Block386LT: "LT",
+ Block386LE: "LE",
+ Block386GT: "GT",
+ Block386GE: "GE",
+ Block386OS: "OS",
+ Block386OC: "OC",
+ Block386ULT: "ULT",
+ Block386ULE: "ULE",
+ Block386UGT: "UGT",
+ Block386UGE: "UGE",
+ Block386EQF: "EQF",
+ Block386NEF: "NEF",
+ Block386ORD: "ORD",
+ Block386NAN: "NAN",
+
+ BlockAMD64EQ: "EQ",
+ BlockAMD64NE: "NE",
+ BlockAMD64LT: "LT",
+ BlockAMD64LE: "LE",
+ BlockAMD64GT: "GT",
+ BlockAMD64GE: "GE",
+ BlockAMD64OS: "OS",
+ BlockAMD64OC: "OC",
+ BlockAMD64ULT: "ULT",
+ BlockAMD64ULE: "ULE",
+ BlockAMD64UGT: "UGT",
+ BlockAMD64UGE: "UGE",
+ BlockAMD64EQF: "EQF",
+ BlockAMD64NEF: "NEF",
+ BlockAMD64ORD: "ORD",
+ BlockAMD64NAN: "NAN",
+
+ BlockARMEQ: "EQ",
+ BlockARMNE: "NE",
+ BlockARMLT: "LT",
+ BlockARMLE: "LE",
+ BlockARMGT: "GT",
+ BlockARMGE: "GE",
+ BlockARMULT: "ULT",
+ BlockARMULE: "ULE",
+ BlockARMUGT: "UGT",
+ BlockARMUGE: "UGE",
+ BlockARMLTnoov: "LTnoov",
+ BlockARMLEnoov: "LEnoov",
+ BlockARMGTnoov: "GTnoov",
+ BlockARMGEnoov: "GEnoov",
+
+ BlockARM64EQ: "EQ",
+ BlockARM64NE: "NE",
+ BlockARM64LT: "LT",
+ BlockARM64LE: "LE",
+ BlockARM64GT: "GT",
+ BlockARM64GE: "GE",
+ BlockARM64ULT: "ULT",
+ BlockARM64ULE: "ULE",
+ BlockARM64UGT: "UGT",
+ BlockARM64UGE: "UGE",
+ BlockARM64Z: "Z",
+ BlockARM64NZ: "NZ",
+ BlockARM64ZW: "ZW",
+ BlockARM64NZW: "NZW",
+ BlockARM64TBZ: "TBZ",
+ BlockARM64TBNZ: "TBNZ",
+ BlockARM64FLT: "FLT",
+ BlockARM64FLE: "FLE",
+ BlockARM64FGT: "FGT",
+ BlockARM64FGE: "FGE",
+ BlockARM64LTnoov: "LTnoov",
+ BlockARM64LEnoov: "LEnoov",
+ BlockARM64GTnoov: "GTnoov",
+ BlockARM64GEnoov: "GEnoov",
+
+ BlockMIPSEQ: "EQ",
+ BlockMIPSNE: "NE",
+ BlockMIPSLTZ: "LTZ",
+ BlockMIPSLEZ: "LEZ",
+ BlockMIPSGTZ: "GTZ",
+ BlockMIPSGEZ: "GEZ",
+ BlockMIPSFPT: "FPT",
+ BlockMIPSFPF: "FPF",
+
+ BlockMIPS64EQ: "EQ",
+ BlockMIPS64NE: "NE",
+ BlockMIPS64LTZ: "LTZ",
+ BlockMIPS64LEZ: "LEZ",
+ BlockMIPS64GTZ: "GTZ",
+ BlockMIPS64GEZ: "GEZ",
+ BlockMIPS64FPT: "FPT",
+ BlockMIPS64FPF: "FPF",
+
+ BlockPPC64EQ: "EQ",
+ BlockPPC64NE: "NE",
+ BlockPPC64LT: "LT",
+ BlockPPC64LE: "LE",
+ BlockPPC64GT: "GT",
+ BlockPPC64GE: "GE",
+ BlockPPC64FLT: "FLT",
+ BlockPPC64FLE: "FLE",
+ BlockPPC64FGT: "FGT",
+ BlockPPC64FGE: "FGE",
+
+ BlockRISCV64BEQ: "BEQ",
+ BlockRISCV64BNE: "BNE",
+ BlockRISCV64BLT: "BLT",
+ BlockRISCV64BGE: "BGE",
+ BlockRISCV64BLTU: "BLTU",
+ BlockRISCV64BGEU: "BGEU",
+ BlockRISCV64BEQZ: "BEQZ",
+ BlockRISCV64BNEZ: "BNEZ",
+ BlockRISCV64BLEZ: "BLEZ",
+ BlockRISCV64BGEZ: "BGEZ",
+ BlockRISCV64BLTZ: "BLTZ",
+ BlockRISCV64BGTZ: "BGTZ",
+
+ BlockS390XBRC: "BRC",
+ BlockS390XCRJ: "CRJ",
+ BlockS390XCGRJ: "CGRJ",
+ BlockS390XCLRJ: "CLRJ",
+ BlockS390XCLGRJ: "CLGRJ",
+ BlockS390XCIJ: "CIJ",
+ BlockS390XCGIJ: "CGIJ",
+ BlockS390XCLIJ: "CLIJ",
+ BlockS390XCLGIJ: "CLGIJ",
+
+ BlockPlain: "Plain",
+ BlockIf: "If",
+ BlockDefer: "Defer",
+ BlockRet: "Ret",
+ BlockRetJmp: "RetJmp",
+ BlockExit: "Exit",
+ BlockFirst: "First",
+}
+
+func (k BlockKind) String() string { return blockString[k] }
+func (k BlockKind) AuxIntType() string {
+ switch k {
+ case BlockARM64TBZ:
+ return "int64"
+ case BlockARM64TBNZ:
+ return "int64"
+ case BlockS390XCIJ:
+ return "int8"
+ case BlockS390XCGIJ:
+ return "int8"
+ case BlockS390XCLIJ:
+ return "uint8"
+ case BlockS390XCLGIJ:
+ return "uint8"
+ }
+ return ""
+}
+
+const (
+ OpInvalid Op = iota
+
+ Op386ADDSS
+ Op386ADDSD
+ Op386SUBSS
+ Op386SUBSD
+ Op386MULSS
+ Op386MULSD
+ Op386DIVSS
+ Op386DIVSD
+ Op386MOVSSload
+ Op386MOVSDload
+ Op386MOVSSconst
+ Op386MOVSDconst
+ Op386MOVSSloadidx1
+ Op386MOVSSloadidx4
+ Op386MOVSDloadidx1
+ Op386MOVSDloadidx8
+ Op386MOVSSstore
+ Op386MOVSDstore
+ Op386MOVSSstoreidx1
+ Op386MOVSSstoreidx4
+ Op386MOVSDstoreidx1
+ Op386MOVSDstoreidx8
+ Op386ADDSSload
+ Op386ADDSDload
+ Op386SUBSSload
+ Op386SUBSDload
+ Op386MULSSload
+ Op386MULSDload
+ Op386DIVSSload
+ Op386DIVSDload
+ Op386ADDL
+ Op386ADDLconst
+ Op386ADDLcarry
+ Op386ADDLconstcarry
+ Op386ADCL
+ Op386ADCLconst
+ Op386SUBL
+ Op386SUBLconst
+ Op386SUBLcarry
+ Op386SUBLconstcarry
+ Op386SBBL
+ Op386SBBLconst
+ Op386MULL
+ Op386MULLconst
+ Op386MULLU
+ Op386HMULL
+ Op386HMULLU
+ Op386MULLQU
+ Op386AVGLU
+ Op386DIVL
+ Op386DIVW
+ Op386DIVLU
+ Op386DIVWU
+ Op386MODL
+ Op386MODW
+ Op386MODLU
+ Op386MODWU
+ Op386ANDL
+ Op386ANDLconst
+ Op386ORL
+ Op386ORLconst
+ Op386XORL
+ Op386XORLconst
+ Op386CMPL
+ Op386CMPW
+ Op386CMPB
+ Op386CMPLconst
+ Op386CMPWconst
+ Op386CMPBconst
+ Op386CMPLload
+ Op386CMPWload
+ Op386CMPBload
+ Op386CMPLconstload
+ Op386CMPWconstload
+ Op386CMPBconstload
+ Op386UCOMISS
+ Op386UCOMISD
+ Op386TESTL
+ Op386TESTW
+ Op386TESTB
+ Op386TESTLconst
+ Op386TESTWconst
+ Op386TESTBconst
+ Op386SHLL
+ Op386SHLLconst
+ Op386SHRL
+ Op386SHRW
+ Op386SHRB
+ Op386SHRLconst
+ Op386SHRWconst
+ Op386SHRBconst
+ Op386SARL
+ Op386SARW
+ Op386SARB
+ Op386SARLconst
+ Op386SARWconst
+ Op386SARBconst
+ Op386ROLLconst
+ Op386ROLWconst
+ Op386ROLBconst
+ Op386ADDLload
+ Op386SUBLload
+ Op386MULLload
+ Op386ANDLload
+ Op386ORLload
+ Op386XORLload
+ Op386ADDLloadidx4
+ Op386SUBLloadidx4
+ Op386MULLloadidx4
+ Op386ANDLloadidx4
+ Op386ORLloadidx4
+ Op386XORLloadidx4
+ Op386NEGL
+ Op386NOTL
+ Op386BSFL
+ Op386BSFW
+ Op386BSRL
+ Op386BSRW
+ Op386BSWAPL
+ Op386SQRTSD
+ Op386SBBLcarrymask
+ Op386SETEQ
+ Op386SETNE
+ Op386SETL
+ Op386SETLE
+ Op386SETG
+ Op386SETGE
+ Op386SETB
+ Op386SETBE
+ Op386SETA
+ Op386SETAE
+ Op386SETO
+ Op386SETEQF
+ Op386SETNEF
+ Op386SETORD
+ Op386SETNAN
+ Op386SETGF
+ Op386SETGEF
+ Op386MOVBLSX
+ Op386MOVBLZX
+ Op386MOVWLSX
+ Op386MOVWLZX
+ Op386MOVLconst
+ Op386CVTTSD2SL
+ Op386CVTTSS2SL
+ Op386CVTSL2SS
+ Op386CVTSL2SD
+ Op386CVTSD2SS
+ Op386CVTSS2SD
+ Op386PXOR
+ Op386LEAL
+ Op386LEAL1
+ Op386LEAL2
+ Op386LEAL4
+ Op386LEAL8
+ Op386MOVBload
+ Op386MOVBLSXload
+ Op386MOVWload
+ Op386MOVWLSXload
+ Op386MOVLload
+ Op386MOVBstore
+ Op386MOVWstore
+ Op386MOVLstore
+ Op386ADDLmodify
+ Op386SUBLmodify
+ Op386ANDLmodify
+ Op386ORLmodify
+ Op386XORLmodify
+ Op386ADDLmodifyidx4
+ Op386SUBLmodifyidx4
+ Op386ANDLmodifyidx4
+ Op386ORLmodifyidx4
+ Op386XORLmodifyidx4
+ Op386ADDLconstmodify
+ Op386ANDLconstmodify
+ Op386ORLconstmodify
+ Op386XORLconstmodify
+ Op386ADDLconstmodifyidx4
+ Op386ANDLconstmodifyidx4
+ Op386ORLconstmodifyidx4
+ Op386XORLconstmodifyidx4
+ Op386MOVBloadidx1
+ Op386MOVWloadidx1
+ Op386MOVWloadidx2
+ Op386MOVLloadidx1
+ Op386MOVLloadidx4
+ Op386MOVBstoreidx1
+ Op386MOVWstoreidx1
+ Op386MOVWstoreidx2
+ Op386MOVLstoreidx1
+ Op386MOVLstoreidx4
+ Op386MOVBstoreconst
+ Op386MOVWstoreconst
+ Op386MOVLstoreconst
+ Op386MOVBstoreconstidx1
+ Op386MOVWstoreconstidx1
+ Op386MOVWstoreconstidx2
+ Op386MOVLstoreconstidx1
+ Op386MOVLstoreconstidx4
+ Op386DUFFZERO
+ Op386REPSTOSL
+ Op386CALLstatic
+ Op386CALLclosure
+ Op386CALLinter
+ Op386DUFFCOPY
+ Op386REPMOVSL
+ Op386InvertFlags
+ Op386LoweredGetG
+ Op386LoweredGetClosurePtr
+ Op386LoweredGetCallerPC
+ Op386LoweredGetCallerSP
+ Op386LoweredNilCheck
+ Op386LoweredWB
+ Op386LoweredPanicBoundsA
+ Op386LoweredPanicBoundsB
+ Op386LoweredPanicBoundsC
+ Op386LoweredPanicExtendA
+ Op386LoweredPanicExtendB
+ Op386LoweredPanicExtendC
+ Op386FlagEQ
+ Op386FlagLT_ULT
+ Op386FlagLT_UGT
+ Op386FlagGT_UGT
+ Op386FlagGT_ULT
+ Op386MOVSSconst1
+ Op386MOVSDconst1
+ Op386MOVSSconst2
+ Op386MOVSDconst2
+
+ OpAMD64ADDSS
+ OpAMD64ADDSD
+ OpAMD64SUBSS
+ OpAMD64SUBSD
+ OpAMD64MULSS
+ OpAMD64MULSD
+ OpAMD64DIVSS
+ OpAMD64DIVSD
+ OpAMD64MOVSSload
+ OpAMD64MOVSDload
+ OpAMD64MOVSSconst
+ OpAMD64MOVSDconst
+ OpAMD64MOVSSloadidx1
+ OpAMD64MOVSSloadidx4
+ OpAMD64MOVSDloadidx1
+ OpAMD64MOVSDloadidx8
+ OpAMD64MOVSSstore
+ OpAMD64MOVSDstore
+ OpAMD64MOVSSstoreidx1
+ OpAMD64MOVSSstoreidx4
+ OpAMD64MOVSDstoreidx1
+ OpAMD64MOVSDstoreidx8
+ OpAMD64ADDSSload
+ OpAMD64ADDSDload
+ OpAMD64SUBSSload
+ OpAMD64SUBSDload
+ OpAMD64MULSSload
+ OpAMD64MULSDload
+ OpAMD64DIVSSload
+ OpAMD64DIVSDload
+ OpAMD64ADDSSloadidx1
+ OpAMD64ADDSSloadidx4
+ OpAMD64ADDSDloadidx1
+ OpAMD64ADDSDloadidx8
+ OpAMD64SUBSSloadidx1
+ OpAMD64SUBSSloadidx4
+ OpAMD64SUBSDloadidx1
+ OpAMD64SUBSDloadidx8
+ OpAMD64MULSSloadidx1
+ OpAMD64MULSSloadidx4
+ OpAMD64MULSDloadidx1
+ OpAMD64MULSDloadidx8
+ OpAMD64DIVSSloadidx1
+ OpAMD64DIVSSloadidx4
+ OpAMD64DIVSDloadidx1
+ OpAMD64DIVSDloadidx8
+ OpAMD64ADDQ
+ OpAMD64ADDL
+ OpAMD64ADDQconst
+ OpAMD64ADDLconst
+ OpAMD64ADDQconstmodify
+ OpAMD64ADDLconstmodify
+ OpAMD64SUBQ
+ OpAMD64SUBL
+ OpAMD64SUBQconst
+ OpAMD64SUBLconst
+ OpAMD64MULQ
+ OpAMD64MULL
+ OpAMD64MULQconst
+ OpAMD64MULLconst
+ OpAMD64MULLU
+ OpAMD64MULQU
+ OpAMD64HMULQ
+ OpAMD64HMULL
+ OpAMD64HMULQU
+ OpAMD64HMULLU
+ OpAMD64AVGQU
+ OpAMD64DIVQ
+ OpAMD64DIVL
+ OpAMD64DIVW
+ OpAMD64DIVQU
+ OpAMD64DIVLU
+ OpAMD64DIVWU
+ OpAMD64NEGLflags
+ OpAMD64ADDQcarry
+ OpAMD64ADCQ
+ OpAMD64ADDQconstcarry
+ OpAMD64ADCQconst
+ OpAMD64SUBQborrow
+ OpAMD64SBBQ
+ OpAMD64SUBQconstborrow
+ OpAMD64SBBQconst
+ OpAMD64MULQU2
+ OpAMD64DIVQU2
+ OpAMD64ANDQ
+ OpAMD64ANDL
+ OpAMD64ANDQconst
+ OpAMD64ANDLconst
+ OpAMD64ANDQconstmodify
+ OpAMD64ANDLconstmodify
+ OpAMD64ORQ
+ OpAMD64ORL
+ OpAMD64ORQconst
+ OpAMD64ORLconst
+ OpAMD64ORQconstmodify
+ OpAMD64ORLconstmodify
+ OpAMD64XORQ
+ OpAMD64XORL
+ OpAMD64XORQconst
+ OpAMD64XORLconst
+ OpAMD64XORQconstmodify
+ OpAMD64XORLconstmodify
+ OpAMD64CMPQ
+ OpAMD64CMPL
+ OpAMD64CMPW
+ OpAMD64CMPB
+ OpAMD64CMPQconst
+ OpAMD64CMPLconst
+ OpAMD64CMPWconst
+ OpAMD64CMPBconst
+ OpAMD64CMPQload
+ OpAMD64CMPLload
+ OpAMD64CMPWload
+ OpAMD64CMPBload
+ OpAMD64CMPQconstload
+ OpAMD64CMPLconstload
+ OpAMD64CMPWconstload
+ OpAMD64CMPBconstload
+ OpAMD64CMPQloadidx8
+ OpAMD64CMPQloadidx1
+ OpAMD64CMPLloadidx4
+ OpAMD64CMPLloadidx1
+ OpAMD64CMPWloadidx2
+ OpAMD64CMPWloadidx1
+ OpAMD64CMPBloadidx1
+ OpAMD64CMPQconstloadidx8
+ OpAMD64CMPQconstloadidx1
+ OpAMD64CMPLconstloadidx4
+ OpAMD64CMPLconstloadidx1
+ OpAMD64CMPWconstloadidx2
+ OpAMD64CMPWconstloadidx1
+ OpAMD64CMPBconstloadidx1
+ OpAMD64UCOMISS
+ OpAMD64UCOMISD
+ OpAMD64BTL
+ OpAMD64BTQ
+ OpAMD64BTCL
+ OpAMD64BTCQ
+ OpAMD64BTRL
+ OpAMD64BTRQ
+ OpAMD64BTSL
+ OpAMD64BTSQ
+ OpAMD64BTLconst
+ OpAMD64BTQconst
+ OpAMD64BTCLconst
+ OpAMD64BTCQconst
+ OpAMD64BTRLconst
+ OpAMD64BTRQconst
+ OpAMD64BTSLconst
+ OpAMD64BTSQconst
+ OpAMD64BTCQmodify
+ OpAMD64BTCLmodify
+ OpAMD64BTSQmodify
+ OpAMD64BTSLmodify
+ OpAMD64BTRQmodify
+ OpAMD64BTRLmodify
+ OpAMD64BTCQconstmodify
+ OpAMD64BTCLconstmodify
+ OpAMD64BTSQconstmodify
+ OpAMD64BTSLconstmodify
+ OpAMD64BTRQconstmodify
+ OpAMD64BTRLconstmodify
+ OpAMD64TESTQ
+ OpAMD64TESTL
+ OpAMD64TESTW
+ OpAMD64TESTB
+ OpAMD64TESTQconst
+ OpAMD64TESTLconst
+ OpAMD64TESTWconst
+ OpAMD64TESTBconst
+ OpAMD64SHLQ
+ OpAMD64SHLL
+ OpAMD64SHLQconst
+ OpAMD64SHLLconst
+ OpAMD64SHRQ
+ OpAMD64SHRL
+ OpAMD64SHRW
+ OpAMD64SHRB
+ OpAMD64SHRQconst
+ OpAMD64SHRLconst
+ OpAMD64SHRWconst
+ OpAMD64SHRBconst
+ OpAMD64SARQ
+ OpAMD64SARL
+ OpAMD64SARW
+ OpAMD64SARB
+ OpAMD64SARQconst
+ OpAMD64SARLconst
+ OpAMD64SARWconst
+ OpAMD64SARBconst
+ OpAMD64ROLQ
+ OpAMD64ROLL
+ OpAMD64ROLW
+ OpAMD64ROLB
+ OpAMD64RORQ
+ OpAMD64RORL
+ OpAMD64RORW
+ OpAMD64RORB
+ OpAMD64ROLQconst
+ OpAMD64ROLLconst
+ OpAMD64ROLWconst
+ OpAMD64ROLBconst
+ OpAMD64ADDLload
+ OpAMD64ADDQload
+ OpAMD64SUBQload
+ OpAMD64SUBLload
+ OpAMD64ANDLload
+ OpAMD64ANDQload
+ OpAMD64ORQload
+ OpAMD64ORLload
+ OpAMD64XORQload
+ OpAMD64XORLload
+ OpAMD64ADDLloadidx1
+ OpAMD64ADDLloadidx4
+ OpAMD64ADDLloadidx8
+ OpAMD64ADDQloadidx1
+ OpAMD64ADDQloadidx8
+ OpAMD64SUBLloadidx1
+ OpAMD64SUBLloadidx4
+ OpAMD64SUBLloadidx8
+ OpAMD64SUBQloadidx1
+ OpAMD64SUBQloadidx8
+ OpAMD64ANDLloadidx1
+ OpAMD64ANDLloadidx4
+ OpAMD64ANDLloadidx8
+ OpAMD64ANDQloadidx1
+ OpAMD64ANDQloadidx8
+ OpAMD64ORLloadidx1
+ OpAMD64ORLloadidx4
+ OpAMD64ORLloadidx8
+ OpAMD64ORQloadidx1
+ OpAMD64ORQloadidx8
+ OpAMD64XORLloadidx1
+ OpAMD64XORLloadidx4
+ OpAMD64XORLloadidx8
+ OpAMD64XORQloadidx1
+ OpAMD64XORQloadidx8
+ OpAMD64ADDQmodify
+ OpAMD64SUBQmodify
+ OpAMD64ANDQmodify
+ OpAMD64ORQmodify
+ OpAMD64XORQmodify
+ OpAMD64ADDLmodify
+ OpAMD64SUBLmodify
+ OpAMD64ANDLmodify
+ OpAMD64ORLmodify
+ OpAMD64XORLmodify
+ OpAMD64ADDQmodifyidx1
+ OpAMD64ADDQmodifyidx8
+ OpAMD64SUBQmodifyidx1
+ OpAMD64SUBQmodifyidx8
+ OpAMD64ANDQmodifyidx1
+ OpAMD64ANDQmodifyidx8
+ OpAMD64ORQmodifyidx1
+ OpAMD64ORQmodifyidx8
+ OpAMD64XORQmodifyidx1
+ OpAMD64XORQmodifyidx8
+ OpAMD64ADDLmodifyidx1
+ OpAMD64ADDLmodifyidx4
+ OpAMD64ADDLmodifyidx8
+ OpAMD64SUBLmodifyidx1
+ OpAMD64SUBLmodifyidx4
+ OpAMD64SUBLmodifyidx8
+ OpAMD64ANDLmodifyidx1
+ OpAMD64ANDLmodifyidx4
+ OpAMD64ANDLmodifyidx8
+ OpAMD64ORLmodifyidx1
+ OpAMD64ORLmodifyidx4
+ OpAMD64ORLmodifyidx8
+ OpAMD64XORLmodifyidx1
+ OpAMD64XORLmodifyidx4
+ OpAMD64XORLmodifyidx8
+ OpAMD64ADDQconstmodifyidx1
+ OpAMD64ADDQconstmodifyidx8
+ OpAMD64ANDQconstmodifyidx1
+ OpAMD64ANDQconstmodifyidx8
+ OpAMD64ORQconstmodifyidx1
+ OpAMD64ORQconstmodifyidx8
+ OpAMD64XORQconstmodifyidx1
+ OpAMD64XORQconstmodifyidx8
+ OpAMD64ADDLconstmodifyidx1
+ OpAMD64ADDLconstmodifyidx4
+ OpAMD64ADDLconstmodifyidx8
+ OpAMD64ANDLconstmodifyidx1
+ OpAMD64ANDLconstmodifyidx4
+ OpAMD64ANDLconstmodifyidx8
+ OpAMD64ORLconstmodifyidx1
+ OpAMD64ORLconstmodifyidx4
+ OpAMD64ORLconstmodifyidx8
+ OpAMD64XORLconstmodifyidx1
+ OpAMD64XORLconstmodifyidx4
+ OpAMD64XORLconstmodifyidx8
+ OpAMD64NEGQ
+ OpAMD64NEGL
+ OpAMD64NOTQ
+ OpAMD64NOTL
+ OpAMD64BSFQ
+ OpAMD64BSFL
+ OpAMD64BSRQ
+ OpAMD64BSRL
+ OpAMD64CMOVQEQ
+ OpAMD64CMOVQNE
+ OpAMD64CMOVQLT
+ OpAMD64CMOVQGT
+ OpAMD64CMOVQLE
+ OpAMD64CMOVQGE
+ OpAMD64CMOVQLS
+ OpAMD64CMOVQHI
+ OpAMD64CMOVQCC
+ OpAMD64CMOVQCS
+ OpAMD64CMOVLEQ
+ OpAMD64CMOVLNE
+ OpAMD64CMOVLLT
+ OpAMD64CMOVLGT
+ OpAMD64CMOVLLE
+ OpAMD64CMOVLGE
+ OpAMD64CMOVLLS
+ OpAMD64CMOVLHI
+ OpAMD64CMOVLCC
+ OpAMD64CMOVLCS
+ OpAMD64CMOVWEQ
+ OpAMD64CMOVWNE
+ OpAMD64CMOVWLT
+ OpAMD64CMOVWGT
+ OpAMD64CMOVWLE
+ OpAMD64CMOVWGE
+ OpAMD64CMOVWLS
+ OpAMD64CMOVWHI
+ OpAMD64CMOVWCC
+ OpAMD64CMOVWCS
+ OpAMD64CMOVQEQF
+ OpAMD64CMOVQNEF
+ OpAMD64CMOVQGTF
+ OpAMD64CMOVQGEF
+ OpAMD64CMOVLEQF
+ OpAMD64CMOVLNEF
+ OpAMD64CMOVLGTF
+ OpAMD64CMOVLGEF
+ OpAMD64CMOVWEQF
+ OpAMD64CMOVWNEF
+ OpAMD64CMOVWGTF
+ OpAMD64CMOVWGEF
+ OpAMD64BSWAPQ
+ OpAMD64BSWAPL
+ OpAMD64POPCNTQ
+ OpAMD64POPCNTL
+ OpAMD64SQRTSD
+ OpAMD64ROUNDSD
+ OpAMD64VFMADD231SD
+ OpAMD64SBBQcarrymask
+ OpAMD64SBBLcarrymask
+ OpAMD64SETEQ
+ OpAMD64SETNE
+ OpAMD64SETL
+ OpAMD64SETLE
+ OpAMD64SETG
+ OpAMD64SETGE
+ OpAMD64SETB
+ OpAMD64SETBE
+ OpAMD64SETA
+ OpAMD64SETAE
+ OpAMD64SETO
+ OpAMD64SETEQstore
+ OpAMD64SETNEstore
+ OpAMD64SETLstore
+ OpAMD64SETLEstore
+ OpAMD64SETGstore
+ OpAMD64SETGEstore
+ OpAMD64SETBstore
+ OpAMD64SETBEstore
+ OpAMD64SETAstore
+ OpAMD64SETAEstore
+ OpAMD64SETEQF
+ OpAMD64SETNEF
+ OpAMD64SETORD
+ OpAMD64SETNAN
+ OpAMD64SETGF
+ OpAMD64SETGEF
+ OpAMD64MOVBQSX
+ OpAMD64MOVBQZX
+ OpAMD64MOVWQSX
+ OpAMD64MOVWQZX
+ OpAMD64MOVLQSX
+ OpAMD64MOVLQZX
+ OpAMD64MOVLconst
+ OpAMD64MOVQconst
+ OpAMD64CVTTSD2SL
+ OpAMD64CVTTSD2SQ
+ OpAMD64CVTTSS2SL
+ OpAMD64CVTTSS2SQ
+ OpAMD64CVTSL2SS
+ OpAMD64CVTSL2SD
+ OpAMD64CVTSQ2SS
+ OpAMD64CVTSQ2SD
+ OpAMD64CVTSD2SS
+ OpAMD64CVTSS2SD
+ OpAMD64MOVQi2f
+ OpAMD64MOVQf2i
+ OpAMD64MOVLi2f
+ OpAMD64MOVLf2i
+ OpAMD64PXOR
+ OpAMD64LEAQ
+ OpAMD64LEAL
+ OpAMD64LEAW
+ OpAMD64LEAQ1
+ OpAMD64LEAL1
+ OpAMD64LEAW1
+ OpAMD64LEAQ2
+ OpAMD64LEAL2
+ OpAMD64LEAW2
+ OpAMD64LEAQ4
+ OpAMD64LEAL4
+ OpAMD64LEAW4
+ OpAMD64LEAQ8
+ OpAMD64LEAL8
+ OpAMD64LEAW8
+ OpAMD64MOVBload
+ OpAMD64MOVBQSXload
+ OpAMD64MOVWload
+ OpAMD64MOVWQSXload
+ OpAMD64MOVLload
+ OpAMD64MOVLQSXload
+ OpAMD64MOVQload
+ OpAMD64MOVBstore
+ OpAMD64MOVWstore
+ OpAMD64MOVLstore
+ OpAMD64MOVQstore
+ OpAMD64MOVOload
+ OpAMD64MOVOstore
+ OpAMD64MOVBloadidx1
+ OpAMD64MOVWloadidx1
+ OpAMD64MOVWloadidx2
+ OpAMD64MOVLloadidx1
+ OpAMD64MOVLloadidx4
+ OpAMD64MOVLloadidx8
+ OpAMD64MOVQloadidx1
+ OpAMD64MOVQloadidx8
+ OpAMD64MOVBstoreidx1
+ OpAMD64MOVWstoreidx1
+ OpAMD64MOVWstoreidx2
+ OpAMD64MOVLstoreidx1
+ OpAMD64MOVLstoreidx4
+ OpAMD64MOVLstoreidx8
+ OpAMD64MOVQstoreidx1
+ OpAMD64MOVQstoreidx8
+ OpAMD64MOVBstoreconst
+ OpAMD64MOVWstoreconst
+ OpAMD64MOVLstoreconst
+ OpAMD64MOVQstoreconst
+ OpAMD64MOVBstoreconstidx1
+ OpAMD64MOVWstoreconstidx1
+ OpAMD64MOVWstoreconstidx2
+ OpAMD64MOVLstoreconstidx1
+ OpAMD64MOVLstoreconstidx4
+ OpAMD64MOVQstoreconstidx1
+ OpAMD64MOVQstoreconstidx8
+ OpAMD64DUFFZERO
+ OpAMD64MOVOconst
+ OpAMD64REPSTOSQ
+ OpAMD64CALLstatic
+ OpAMD64CALLclosure
+ OpAMD64CALLinter
+ OpAMD64DUFFCOPY
+ OpAMD64REPMOVSQ
+ OpAMD64InvertFlags
+ OpAMD64LoweredGetG
+ OpAMD64LoweredGetClosurePtr
+ OpAMD64LoweredGetCallerPC
+ OpAMD64LoweredGetCallerSP
+ OpAMD64LoweredNilCheck
+ OpAMD64LoweredWB
+ OpAMD64LoweredHasCPUFeature
+ OpAMD64LoweredPanicBoundsA
+ OpAMD64LoweredPanicBoundsB
+ OpAMD64LoweredPanicBoundsC
+ OpAMD64FlagEQ
+ OpAMD64FlagLT_ULT
+ OpAMD64FlagLT_UGT
+ OpAMD64FlagGT_UGT
+ OpAMD64FlagGT_ULT
+ OpAMD64MOVBatomicload
+ OpAMD64MOVLatomicload
+ OpAMD64MOVQatomicload
+ OpAMD64XCHGB
+ OpAMD64XCHGL
+ OpAMD64XCHGQ
+ OpAMD64XADDLlock
+ OpAMD64XADDQlock
+ OpAMD64AddTupleFirst32
+ OpAMD64AddTupleFirst64
+ OpAMD64CMPXCHGLlock
+ OpAMD64CMPXCHGQlock
+ OpAMD64ANDBlock
+ OpAMD64ANDLlock
+ OpAMD64ORBlock
+ OpAMD64ORLlock
+
+ OpARMADD
+ OpARMADDconst
+ OpARMSUB
+ OpARMSUBconst
+ OpARMRSB
+ OpARMRSBconst
+ OpARMMUL
+ OpARMHMUL
+ OpARMHMULU
+ OpARMCALLudiv
+ OpARMADDS
+ OpARMADDSconst
+ OpARMADC
+ OpARMADCconst
+ OpARMSUBS
+ OpARMSUBSconst
+ OpARMRSBSconst
+ OpARMSBC
+ OpARMSBCconst
+ OpARMRSCconst
+ OpARMMULLU
+ OpARMMULA
+ OpARMMULS
+ OpARMADDF
+ OpARMADDD
+ OpARMSUBF
+ OpARMSUBD
+ OpARMMULF
+ OpARMMULD
+ OpARMNMULF
+ OpARMNMULD
+ OpARMDIVF
+ OpARMDIVD
+ OpARMMULAF
+ OpARMMULAD
+ OpARMMULSF
+ OpARMMULSD
+ OpARMFMULAD
+ OpARMAND
+ OpARMANDconst
+ OpARMOR
+ OpARMORconst
+ OpARMXOR
+ OpARMXORconst
+ OpARMBIC
+ OpARMBICconst
+ OpARMBFX
+ OpARMBFXU
+ OpARMMVN
+ OpARMNEGF
+ OpARMNEGD
+ OpARMSQRTD
+ OpARMABSD
+ OpARMCLZ
+ OpARMREV
+ OpARMREV16
+ OpARMRBIT
+ OpARMSLL
+ OpARMSLLconst
+ OpARMSRL
+ OpARMSRLconst
+ OpARMSRA
+ OpARMSRAconst
+ OpARMSRR
+ OpARMSRRconst
+ OpARMADDshiftLL
+ OpARMADDshiftRL
+ OpARMADDshiftRA
+ OpARMSUBshiftLL
+ OpARMSUBshiftRL
+ OpARMSUBshiftRA
+ OpARMRSBshiftLL
+ OpARMRSBshiftRL
+ OpARMRSBshiftRA
+ OpARMANDshiftLL
+ OpARMANDshiftRL
+ OpARMANDshiftRA
+ OpARMORshiftLL
+ OpARMORshiftRL
+ OpARMORshiftRA
+ OpARMXORshiftLL
+ OpARMXORshiftRL
+ OpARMXORshiftRA
+ OpARMXORshiftRR
+ OpARMBICshiftLL
+ OpARMBICshiftRL
+ OpARMBICshiftRA
+ OpARMMVNshiftLL
+ OpARMMVNshiftRL
+ OpARMMVNshiftRA
+ OpARMADCshiftLL
+ OpARMADCshiftRL
+ OpARMADCshiftRA
+ OpARMSBCshiftLL
+ OpARMSBCshiftRL
+ OpARMSBCshiftRA
+ OpARMRSCshiftLL
+ OpARMRSCshiftRL
+ OpARMRSCshiftRA
+ OpARMADDSshiftLL
+ OpARMADDSshiftRL
+ OpARMADDSshiftRA
+ OpARMSUBSshiftLL
+ OpARMSUBSshiftRL
+ OpARMSUBSshiftRA
+ OpARMRSBSshiftLL
+ OpARMRSBSshiftRL
+ OpARMRSBSshiftRA
+ OpARMADDshiftLLreg
+ OpARMADDshiftRLreg
+ OpARMADDshiftRAreg
+ OpARMSUBshiftLLreg
+ OpARMSUBshiftRLreg
+ OpARMSUBshiftRAreg
+ OpARMRSBshiftLLreg
+ OpARMRSBshiftRLreg
+ OpARMRSBshiftRAreg
+ OpARMANDshiftLLreg
+ OpARMANDshiftRLreg
+ OpARMANDshiftRAreg
+ OpARMORshiftLLreg
+ OpARMORshiftRLreg
+ OpARMORshiftRAreg
+ OpARMXORshiftLLreg
+ OpARMXORshiftRLreg
+ OpARMXORshiftRAreg
+ OpARMBICshiftLLreg
+ OpARMBICshiftRLreg
+ OpARMBICshiftRAreg
+ OpARMMVNshiftLLreg
+ OpARMMVNshiftRLreg
+ OpARMMVNshiftRAreg
+ OpARMADCshiftLLreg
+ OpARMADCshiftRLreg
+ OpARMADCshiftRAreg
+ OpARMSBCshiftLLreg
+ OpARMSBCshiftRLreg
+ OpARMSBCshiftRAreg
+ OpARMRSCshiftLLreg
+ OpARMRSCshiftRLreg
+ OpARMRSCshiftRAreg
+ OpARMADDSshiftLLreg
+ OpARMADDSshiftRLreg
+ OpARMADDSshiftRAreg
+ OpARMSUBSshiftLLreg
+ OpARMSUBSshiftRLreg
+ OpARMSUBSshiftRAreg
+ OpARMRSBSshiftLLreg
+ OpARMRSBSshiftRLreg
+ OpARMRSBSshiftRAreg
+ OpARMCMP
+ OpARMCMPconst
+ OpARMCMN
+ OpARMCMNconst
+ OpARMTST
+ OpARMTSTconst
+ OpARMTEQ
+ OpARMTEQconst
+ OpARMCMPF
+ OpARMCMPD
+ OpARMCMPshiftLL
+ OpARMCMPshiftRL
+ OpARMCMPshiftRA
+ OpARMCMNshiftLL
+ OpARMCMNshiftRL
+ OpARMCMNshiftRA
+ OpARMTSTshiftLL
+ OpARMTSTshiftRL
+ OpARMTSTshiftRA
+ OpARMTEQshiftLL
+ OpARMTEQshiftRL
+ OpARMTEQshiftRA
+ OpARMCMPshiftLLreg
+ OpARMCMPshiftRLreg
+ OpARMCMPshiftRAreg
+ OpARMCMNshiftLLreg
+ OpARMCMNshiftRLreg
+ OpARMCMNshiftRAreg
+ OpARMTSTshiftLLreg
+ OpARMTSTshiftRLreg
+ OpARMTSTshiftRAreg
+ OpARMTEQshiftLLreg
+ OpARMTEQshiftRLreg
+ OpARMTEQshiftRAreg
+ OpARMCMPF0
+ OpARMCMPD0
+ OpARMMOVWconst
+ OpARMMOVFconst
+ OpARMMOVDconst
+ OpARMMOVWaddr
+ OpARMMOVBload
+ OpARMMOVBUload
+ OpARMMOVHload
+ OpARMMOVHUload
+ OpARMMOVWload
+ OpARMMOVFload
+ OpARMMOVDload
+ OpARMMOVBstore
+ OpARMMOVHstore
+ OpARMMOVWstore
+ OpARMMOVFstore
+ OpARMMOVDstore
+ OpARMMOVWloadidx
+ OpARMMOVWloadshiftLL
+ OpARMMOVWloadshiftRL
+ OpARMMOVWloadshiftRA
+ OpARMMOVBUloadidx
+ OpARMMOVBloadidx
+ OpARMMOVHUloadidx
+ OpARMMOVHloadidx
+ OpARMMOVWstoreidx
+ OpARMMOVWstoreshiftLL
+ OpARMMOVWstoreshiftRL
+ OpARMMOVWstoreshiftRA
+ OpARMMOVBstoreidx
+ OpARMMOVHstoreidx
+ OpARMMOVBreg
+ OpARMMOVBUreg
+ OpARMMOVHreg
+ OpARMMOVHUreg
+ OpARMMOVWreg
+ OpARMMOVWnop
+ OpARMMOVWF
+ OpARMMOVWD
+ OpARMMOVWUF
+ OpARMMOVWUD
+ OpARMMOVFW
+ OpARMMOVDW
+ OpARMMOVFWU
+ OpARMMOVDWU
+ OpARMMOVFD
+ OpARMMOVDF
+ OpARMCMOVWHSconst
+ OpARMCMOVWLSconst
+ OpARMSRAcond
+ OpARMCALLstatic
+ OpARMCALLclosure
+ OpARMCALLinter
+ OpARMLoweredNilCheck
+ OpARMEqual
+ OpARMNotEqual
+ OpARMLessThan
+ OpARMLessEqual
+ OpARMGreaterThan
+ OpARMGreaterEqual
+ OpARMLessThanU
+ OpARMLessEqualU
+ OpARMGreaterThanU
+ OpARMGreaterEqualU
+ OpARMDUFFZERO
+ OpARMDUFFCOPY
+ OpARMLoweredZero
+ OpARMLoweredMove
+ OpARMLoweredGetClosurePtr
+ OpARMLoweredGetCallerSP
+ OpARMLoweredGetCallerPC
+ OpARMLoweredPanicBoundsA
+ OpARMLoweredPanicBoundsB
+ OpARMLoweredPanicBoundsC
+ OpARMLoweredPanicExtendA
+ OpARMLoweredPanicExtendB
+ OpARMLoweredPanicExtendC
+ OpARMFlagConstant
+ OpARMInvertFlags
+ OpARMLoweredWB
+
+ OpARM64ADCSflags
+ OpARM64ADCzerocarry
+ OpARM64ADD
+ OpARM64ADDconst
+ OpARM64ADDSconstflags
+ OpARM64ADDSflags
+ OpARM64SUB
+ OpARM64SUBconst
+ OpARM64SBCSflags
+ OpARM64SUBSflags
+ OpARM64MUL
+ OpARM64MULW
+ OpARM64MNEG
+ OpARM64MNEGW
+ OpARM64MULH
+ OpARM64UMULH
+ OpARM64MULL
+ OpARM64UMULL
+ OpARM64DIV
+ OpARM64UDIV
+ OpARM64DIVW
+ OpARM64UDIVW
+ OpARM64MOD
+ OpARM64UMOD
+ OpARM64MODW
+ OpARM64UMODW
+ OpARM64FADDS
+ OpARM64FADDD
+ OpARM64FSUBS
+ OpARM64FSUBD
+ OpARM64FMULS
+ OpARM64FMULD
+ OpARM64FNMULS
+ OpARM64FNMULD
+ OpARM64FDIVS
+ OpARM64FDIVD
+ OpARM64AND
+ OpARM64ANDconst
+ OpARM64OR
+ OpARM64ORconst
+ OpARM64XOR
+ OpARM64XORconst
+ OpARM64BIC
+ OpARM64EON
+ OpARM64ORN
+ OpARM64LoweredMuluhilo
+ OpARM64MVN
+ OpARM64NEG
+ OpARM64NEGSflags
+ OpARM64NGCzerocarry
+ OpARM64FABSD
+ OpARM64FNEGS
+ OpARM64FNEGD
+ OpARM64FSQRTD
+ OpARM64REV
+ OpARM64REVW
+ OpARM64REV16W
+ OpARM64RBIT
+ OpARM64RBITW
+ OpARM64CLZ
+ OpARM64CLZW
+ OpARM64VCNT
+ OpARM64VUADDLV
+ OpARM64LoweredRound32F
+ OpARM64LoweredRound64F
+ OpARM64FMADDS
+ OpARM64FMADDD
+ OpARM64FNMADDS
+ OpARM64FNMADDD
+ OpARM64FMSUBS
+ OpARM64FMSUBD
+ OpARM64FNMSUBS
+ OpARM64FNMSUBD
+ OpARM64MADD
+ OpARM64MADDW
+ OpARM64MSUB
+ OpARM64MSUBW
+ OpARM64SLL
+ OpARM64SLLconst
+ OpARM64SRL
+ OpARM64SRLconst
+ OpARM64SRA
+ OpARM64SRAconst
+ OpARM64ROR
+ OpARM64RORW
+ OpARM64RORconst
+ OpARM64RORWconst
+ OpARM64EXTRconst
+ OpARM64EXTRWconst
+ OpARM64CMP
+ OpARM64CMPconst
+ OpARM64CMPW
+ OpARM64CMPWconst
+ OpARM64CMN
+ OpARM64CMNconst
+ OpARM64CMNW
+ OpARM64CMNWconst
+ OpARM64TST
+ OpARM64TSTconst
+ OpARM64TSTW
+ OpARM64TSTWconst
+ OpARM64FCMPS
+ OpARM64FCMPD
+ OpARM64FCMPS0
+ OpARM64FCMPD0
+ OpARM64MVNshiftLL
+ OpARM64MVNshiftRL
+ OpARM64MVNshiftRA
+ OpARM64NEGshiftLL
+ OpARM64NEGshiftRL
+ OpARM64NEGshiftRA
+ OpARM64ADDshiftLL
+ OpARM64ADDshiftRL
+ OpARM64ADDshiftRA
+ OpARM64SUBshiftLL
+ OpARM64SUBshiftRL
+ OpARM64SUBshiftRA
+ OpARM64ANDshiftLL
+ OpARM64ANDshiftRL
+ OpARM64ANDshiftRA
+ OpARM64ORshiftLL
+ OpARM64ORshiftRL
+ OpARM64ORshiftRA
+ OpARM64XORshiftLL
+ OpARM64XORshiftRL
+ OpARM64XORshiftRA
+ OpARM64BICshiftLL
+ OpARM64BICshiftRL
+ OpARM64BICshiftRA
+ OpARM64EONshiftLL
+ OpARM64EONshiftRL
+ OpARM64EONshiftRA
+ OpARM64ORNshiftLL
+ OpARM64ORNshiftRL
+ OpARM64ORNshiftRA
+ OpARM64CMPshiftLL
+ OpARM64CMPshiftRL
+ OpARM64CMPshiftRA
+ OpARM64CMNshiftLL
+ OpARM64CMNshiftRL
+ OpARM64CMNshiftRA
+ OpARM64TSTshiftLL
+ OpARM64TSTshiftRL
+ OpARM64TSTshiftRA
+ OpARM64BFI
+ OpARM64BFXIL
+ OpARM64SBFIZ
+ OpARM64SBFX
+ OpARM64UBFIZ
+ OpARM64UBFX
+ OpARM64MOVDconst
+ OpARM64FMOVSconst
+ OpARM64FMOVDconst
+ OpARM64MOVDaddr
+ OpARM64MOVBload
+ OpARM64MOVBUload
+ OpARM64MOVHload
+ OpARM64MOVHUload
+ OpARM64MOVWload
+ OpARM64MOVWUload
+ OpARM64MOVDload
+ OpARM64FMOVSload
+ OpARM64FMOVDload
+ OpARM64MOVDloadidx
+ OpARM64MOVWloadidx
+ OpARM64MOVWUloadidx
+ OpARM64MOVHloadidx
+ OpARM64MOVHUloadidx
+ OpARM64MOVBloadidx
+ OpARM64MOVBUloadidx
+ OpARM64FMOVSloadidx
+ OpARM64FMOVDloadidx
+ OpARM64MOVHloadidx2
+ OpARM64MOVHUloadidx2
+ OpARM64MOVWloadidx4
+ OpARM64MOVWUloadidx4
+ OpARM64MOVDloadidx8
+ OpARM64MOVBstore
+ OpARM64MOVHstore
+ OpARM64MOVWstore
+ OpARM64MOVDstore
+ OpARM64STP
+ OpARM64FMOVSstore
+ OpARM64FMOVDstore
+ OpARM64MOVBstoreidx
+ OpARM64MOVHstoreidx
+ OpARM64MOVWstoreidx
+ OpARM64MOVDstoreidx
+ OpARM64FMOVSstoreidx
+ OpARM64FMOVDstoreidx
+ OpARM64MOVHstoreidx2
+ OpARM64MOVWstoreidx4
+ OpARM64MOVDstoreidx8
+ OpARM64MOVBstorezero
+ OpARM64MOVHstorezero
+ OpARM64MOVWstorezero
+ OpARM64MOVDstorezero
+ OpARM64MOVQstorezero
+ OpARM64MOVBstorezeroidx
+ OpARM64MOVHstorezeroidx
+ OpARM64MOVWstorezeroidx
+ OpARM64MOVDstorezeroidx
+ OpARM64MOVHstorezeroidx2
+ OpARM64MOVWstorezeroidx4
+ OpARM64MOVDstorezeroidx8
+ OpARM64FMOVDgpfp
+ OpARM64FMOVDfpgp
+ OpARM64FMOVSgpfp
+ OpARM64FMOVSfpgp
+ OpARM64MOVBreg
+ OpARM64MOVBUreg
+ OpARM64MOVHreg
+ OpARM64MOVHUreg
+ OpARM64MOVWreg
+ OpARM64MOVWUreg
+ OpARM64MOVDreg
+ OpARM64MOVDnop
+ OpARM64SCVTFWS
+ OpARM64SCVTFWD
+ OpARM64UCVTFWS
+ OpARM64UCVTFWD
+ OpARM64SCVTFS
+ OpARM64SCVTFD
+ OpARM64UCVTFS
+ OpARM64UCVTFD
+ OpARM64FCVTZSSW
+ OpARM64FCVTZSDW
+ OpARM64FCVTZUSW
+ OpARM64FCVTZUDW
+ OpARM64FCVTZSS
+ OpARM64FCVTZSD
+ OpARM64FCVTZUS
+ OpARM64FCVTZUD
+ OpARM64FCVTSD
+ OpARM64FCVTDS
+ OpARM64FRINTAD
+ OpARM64FRINTMD
+ OpARM64FRINTND
+ OpARM64FRINTPD
+ OpARM64FRINTZD
+ OpARM64CSEL
+ OpARM64CSEL0
+ OpARM64CALLstatic
+ OpARM64CALLclosure
+ OpARM64CALLinter
+ OpARM64LoweredNilCheck
+ OpARM64Equal
+ OpARM64NotEqual
+ OpARM64LessThan
+ OpARM64LessEqual
+ OpARM64GreaterThan
+ OpARM64GreaterEqual
+ OpARM64LessThanU
+ OpARM64LessEqualU
+ OpARM64GreaterThanU
+ OpARM64GreaterEqualU
+ OpARM64LessThanF
+ OpARM64LessEqualF
+ OpARM64GreaterThanF
+ OpARM64GreaterEqualF
+ OpARM64NotLessThanF
+ OpARM64NotLessEqualF
+ OpARM64NotGreaterThanF
+ OpARM64NotGreaterEqualF
+ OpARM64DUFFZERO
+ OpARM64LoweredZero
+ OpARM64DUFFCOPY
+ OpARM64LoweredMove
+ OpARM64LoweredGetClosurePtr
+ OpARM64LoweredGetCallerSP
+ OpARM64LoweredGetCallerPC
+ OpARM64FlagConstant
+ OpARM64InvertFlags
+ OpARM64LDAR
+ OpARM64LDARB
+ OpARM64LDARW
+ OpARM64STLRB
+ OpARM64STLR
+ OpARM64STLRW
+ OpARM64LoweredAtomicExchange64
+ OpARM64LoweredAtomicExchange32
+ OpARM64LoweredAtomicExchange64Variant
+ OpARM64LoweredAtomicExchange32Variant
+ OpARM64LoweredAtomicAdd64
+ OpARM64LoweredAtomicAdd32
+ OpARM64LoweredAtomicAdd64Variant
+ OpARM64LoweredAtomicAdd32Variant
+ OpARM64LoweredAtomicCas64
+ OpARM64LoweredAtomicCas32
+ OpARM64LoweredAtomicCas64Variant
+ OpARM64LoweredAtomicCas32Variant
+ OpARM64LoweredAtomicAnd8
+ OpARM64LoweredAtomicAnd32
+ OpARM64LoweredAtomicOr8
+ OpARM64LoweredAtomicOr32
+ OpARM64LoweredAtomicAnd8Variant
+ OpARM64LoweredAtomicAnd32Variant
+ OpARM64LoweredAtomicOr8Variant
+ OpARM64LoweredAtomicOr32Variant
+ OpARM64LoweredWB
+ OpARM64LoweredPanicBoundsA
+ OpARM64LoweredPanicBoundsB
+ OpARM64LoweredPanicBoundsC
+
+ OpMIPSADD
+ OpMIPSADDconst
+ OpMIPSSUB
+ OpMIPSSUBconst
+ OpMIPSMUL
+ OpMIPSMULT
+ OpMIPSMULTU
+ OpMIPSDIV
+ OpMIPSDIVU
+ OpMIPSADDF
+ OpMIPSADDD
+ OpMIPSSUBF
+ OpMIPSSUBD
+ OpMIPSMULF
+ OpMIPSMULD
+ OpMIPSDIVF
+ OpMIPSDIVD
+ OpMIPSAND
+ OpMIPSANDconst
+ OpMIPSOR
+ OpMIPSORconst
+ OpMIPSXOR
+ OpMIPSXORconst
+ OpMIPSNOR
+ OpMIPSNORconst
+ OpMIPSNEG
+ OpMIPSNEGF
+ OpMIPSNEGD
+ OpMIPSSQRTD
+ OpMIPSSLL
+ OpMIPSSLLconst
+ OpMIPSSRL
+ OpMIPSSRLconst
+ OpMIPSSRA
+ OpMIPSSRAconst
+ OpMIPSCLZ
+ OpMIPSSGT
+ OpMIPSSGTconst
+ OpMIPSSGTzero
+ OpMIPSSGTU
+ OpMIPSSGTUconst
+ OpMIPSSGTUzero
+ OpMIPSCMPEQF
+ OpMIPSCMPEQD
+ OpMIPSCMPGEF
+ OpMIPSCMPGED
+ OpMIPSCMPGTF
+ OpMIPSCMPGTD
+ OpMIPSMOVWconst
+ OpMIPSMOVFconst
+ OpMIPSMOVDconst
+ OpMIPSMOVWaddr
+ OpMIPSMOVBload
+ OpMIPSMOVBUload
+ OpMIPSMOVHload
+ OpMIPSMOVHUload
+ OpMIPSMOVWload
+ OpMIPSMOVFload
+ OpMIPSMOVDload
+ OpMIPSMOVBstore
+ OpMIPSMOVHstore
+ OpMIPSMOVWstore
+ OpMIPSMOVFstore
+ OpMIPSMOVDstore
+ OpMIPSMOVBstorezero
+ OpMIPSMOVHstorezero
+ OpMIPSMOVWstorezero
+ OpMIPSMOVBreg
+ OpMIPSMOVBUreg
+ OpMIPSMOVHreg
+ OpMIPSMOVHUreg
+ OpMIPSMOVWreg
+ OpMIPSMOVWnop
+ OpMIPSCMOVZ
+ OpMIPSCMOVZzero
+ OpMIPSMOVWF
+ OpMIPSMOVWD
+ OpMIPSTRUNCFW
+ OpMIPSTRUNCDW
+ OpMIPSMOVFD
+ OpMIPSMOVDF
+ OpMIPSCALLstatic
+ OpMIPSCALLclosure
+ OpMIPSCALLinter
+ OpMIPSLoweredAtomicLoad8
+ OpMIPSLoweredAtomicLoad32
+ OpMIPSLoweredAtomicStore8
+ OpMIPSLoweredAtomicStore32
+ OpMIPSLoweredAtomicStorezero
+ OpMIPSLoweredAtomicExchange
+ OpMIPSLoweredAtomicAdd
+ OpMIPSLoweredAtomicAddconst
+ OpMIPSLoweredAtomicCas
+ OpMIPSLoweredAtomicAnd
+ OpMIPSLoweredAtomicOr
+ OpMIPSLoweredZero
+ OpMIPSLoweredMove
+ OpMIPSLoweredNilCheck
+ OpMIPSFPFlagTrue
+ OpMIPSFPFlagFalse
+ OpMIPSLoweredGetClosurePtr
+ OpMIPSLoweredGetCallerSP
+ OpMIPSLoweredGetCallerPC
+ OpMIPSLoweredWB
+ OpMIPSLoweredPanicBoundsA
+ OpMIPSLoweredPanicBoundsB
+ OpMIPSLoweredPanicBoundsC
+ OpMIPSLoweredPanicExtendA
+ OpMIPSLoweredPanicExtendB
+ OpMIPSLoweredPanicExtendC
+
+ OpMIPS64ADDV
+ OpMIPS64ADDVconst
+ OpMIPS64SUBV
+ OpMIPS64SUBVconst
+ OpMIPS64MULV
+ OpMIPS64MULVU
+ OpMIPS64DIVV
+ OpMIPS64DIVVU
+ OpMIPS64ADDF
+ OpMIPS64ADDD
+ OpMIPS64SUBF
+ OpMIPS64SUBD
+ OpMIPS64MULF
+ OpMIPS64MULD
+ OpMIPS64DIVF
+ OpMIPS64DIVD
+ OpMIPS64AND
+ OpMIPS64ANDconst
+ OpMIPS64OR
+ OpMIPS64ORconst
+ OpMIPS64XOR
+ OpMIPS64XORconst
+ OpMIPS64NOR
+ OpMIPS64NORconst
+ OpMIPS64NEGV
+ OpMIPS64NEGF
+ OpMIPS64NEGD
+ OpMIPS64SQRTD
+ OpMIPS64SLLV
+ OpMIPS64SLLVconst
+ OpMIPS64SRLV
+ OpMIPS64SRLVconst
+ OpMIPS64SRAV
+ OpMIPS64SRAVconst
+ OpMIPS64SGT
+ OpMIPS64SGTconst
+ OpMIPS64SGTU
+ OpMIPS64SGTUconst
+ OpMIPS64CMPEQF
+ OpMIPS64CMPEQD
+ OpMIPS64CMPGEF
+ OpMIPS64CMPGED
+ OpMIPS64CMPGTF
+ OpMIPS64CMPGTD
+ OpMIPS64MOVVconst
+ OpMIPS64MOVFconst
+ OpMIPS64MOVDconst
+ OpMIPS64MOVVaddr
+ OpMIPS64MOVBload
+ OpMIPS64MOVBUload
+ OpMIPS64MOVHload
+ OpMIPS64MOVHUload
+ OpMIPS64MOVWload
+ OpMIPS64MOVWUload
+ OpMIPS64MOVVload
+ OpMIPS64MOVFload
+ OpMIPS64MOVDload
+ OpMIPS64MOVBstore
+ OpMIPS64MOVHstore
+ OpMIPS64MOVWstore
+ OpMIPS64MOVVstore
+ OpMIPS64MOVFstore
+ OpMIPS64MOVDstore
+ OpMIPS64MOVBstorezero
+ OpMIPS64MOVHstorezero
+ OpMIPS64MOVWstorezero
+ OpMIPS64MOVVstorezero
+ OpMIPS64MOVBreg
+ OpMIPS64MOVBUreg
+ OpMIPS64MOVHreg
+ OpMIPS64MOVHUreg
+ OpMIPS64MOVWreg
+ OpMIPS64MOVWUreg
+ OpMIPS64MOVVreg
+ OpMIPS64MOVVnop
+ OpMIPS64MOVWF
+ OpMIPS64MOVWD
+ OpMIPS64MOVVF
+ OpMIPS64MOVVD
+ OpMIPS64TRUNCFW
+ OpMIPS64TRUNCDW
+ OpMIPS64TRUNCFV
+ OpMIPS64TRUNCDV
+ OpMIPS64MOVFD
+ OpMIPS64MOVDF
+ OpMIPS64CALLstatic
+ OpMIPS64CALLclosure
+ OpMIPS64CALLinter
+ OpMIPS64DUFFZERO
+ OpMIPS64DUFFCOPY
+ OpMIPS64LoweredZero
+ OpMIPS64LoweredMove
+ OpMIPS64LoweredAtomicLoad8
+ OpMIPS64LoweredAtomicLoad32
+ OpMIPS64LoweredAtomicLoad64
+ OpMIPS64LoweredAtomicStore8
+ OpMIPS64LoweredAtomicStore32
+ OpMIPS64LoweredAtomicStore64
+ OpMIPS64LoweredAtomicStorezero32
+ OpMIPS64LoweredAtomicStorezero64
+ OpMIPS64LoweredAtomicExchange32
+ OpMIPS64LoweredAtomicExchange64
+ OpMIPS64LoweredAtomicAdd32
+ OpMIPS64LoweredAtomicAdd64
+ OpMIPS64LoweredAtomicAddconst32
+ OpMIPS64LoweredAtomicAddconst64
+ OpMIPS64LoweredAtomicCas32
+ OpMIPS64LoweredAtomicCas64
+ OpMIPS64LoweredNilCheck
+ OpMIPS64FPFlagTrue
+ OpMIPS64FPFlagFalse
+ OpMIPS64LoweredGetClosurePtr
+ OpMIPS64LoweredGetCallerSP
+ OpMIPS64LoweredGetCallerPC
+ OpMIPS64LoweredWB
+ OpMIPS64LoweredPanicBoundsA
+ OpMIPS64LoweredPanicBoundsB
+ OpMIPS64LoweredPanicBoundsC
+
+ OpPPC64ADD
+ OpPPC64ADDconst
+ OpPPC64FADD
+ OpPPC64FADDS
+ OpPPC64SUB
+ OpPPC64SUBFCconst
+ OpPPC64FSUB
+ OpPPC64FSUBS
+ OpPPC64MULLD
+ OpPPC64MULLW
+ OpPPC64MULLDconst
+ OpPPC64MULLWconst
+ OpPPC64MADDLD
+ OpPPC64MULHD
+ OpPPC64MULHW
+ OpPPC64MULHDU
+ OpPPC64MULHWU
+ OpPPC64LoweredMuluhilo
+ OpPPC64FMUL
+ OpPPC64FMULS
+ OpPPC64FMADD
+ OpPPC64FMADDS
+ OpPPC64FMSUB
+ OpPPC64FMSUBS
+ OpPPC64SRAD
+ OpPPC64SRAW
+ OpPPC64SRD
+ OpPPC64SRW
+ OpPPC64SLD
+ OpPPC64SLW
+ OpPPC64ROTL
+ OpPPC64ROTLW
+ OpPPC64RLDICL
+ OpPPC64CLRLSLWI
+ OpPPC64CLRLSLDI
+ OpPPC64LoweredAdd64Carry
+ OpPPC64SRADconst
+ OpPPC64SRAWconst
+ OpPPC64SRDconst
+ OpPPC64SRWconst
+ OpPPC64SLDconst
+ OpPPC64SLWconst
+ OpPPC64ROTLconst
+ OpPPC64ROTLWconst
+ OpPPC64EXTSWSLconst
+ OpPPC64RLWINM
+ OpPPC64RLWNM
+ OpPPC64RLWMI
+ OpPPC64CNTLZD
+ OpPPC64CNTLZW
+ OpPPC64CNTTZD
+ OpPPC64CNTTZW
+ OpPPC64POPCNTD
+ OpPPC64POPCNTW
+ OpPPC64POPCNTB
+ OpPPC64FDIV
+ OpPPC64FDIVS
+ OpPPC64DIVD
+ OpPPC64DIVW
+ OpPPC64DIVDU
+ OpPPC64DIVWU
+ OpPPC64MODUD
+ OpPPC64MODSD
+ OpPPC64MODUW
+ OpPPC64MODSW
+ OpPPC64FCTIDZ
+ OpPPC64FCTIWZ
+ OpPPC64FCFID
+ OpPPC64FCFIDS
+ OpPPC64FRSP
+ OpPPC64MFVSRD
+ OpPPC64MTVSRD
+ OpPPC64AND
+ OpPPC64ANDN
+ OpPPC64ANDCC
+ OpPPC64OR
+ OpPPC64ORN
+ OpPPC64ORCC
+ OpPPC64NOR
+ OpPPC64XOR
+ OpPPC64XORCC
+ OpPPC64EQV
+ OpPPC64NEG
+ OpPPC64FNEG
+ OpPPC64FSQRT
+ OpPPC64FSQRTS
+ OpPPC64FFLOOR
+ OpPPC64FCEIL
+ OpPPC64FTRUNC
+ OpPPC64FROUND
+ OpPPC64FABS
+ OpPPC64FNABS
+ OpPPC64FCPSGN
+ OpPPC64ORconst
+ OpPPC64XORconst
+ OpPPC64ANDconst
+ OpPPC64ANDCCconst
+ OpPPC64MOVBreg
+ OpPPC64MOVBZreg
+ OpPPC64MOVHreg
+ OpPPC64MOVHZreg
+ OpPPC64MOVWreg
+ OpPPC64MOVWZreg
+ OpPPC64MOVBZload
+ OpPPC64MOVHload
+ OpPPC64MOVHZload
+ OpPPC64MOVWload
+ OpPPC64MOVWZload
+ OpPPC64MOVDload
+ OpPPC64MOVDBRload
+ OpPPC64MOVWBRload
+ OpPPC64MOVHBRload
+ OpPPC64MOVBZloadidx
+ OpPPC64MOVHloadidx
+ OpPPC64MOVHZloadidx
+ OpPPC64MOVWloadidx
+ OpPPC64MOVWZloadidx
+ OpPPC64MOVDloadidx
+ OpPPC64MOVHBRloadidx
+ OpPPC64MOVWBRloadidx
+ OpPPC64MOVDBRloadidx
+ OpPPC64FMOVDloadidx
+ OpPPC64FMOVSloadidx
+ OpPPC64MOVDBRstore
+ OpPPC64MOVWBRstore
+ OpPPC64MOVHBRstore
+ OpPPC64FMOVDload
+ OpPPC64FMOVSload
+ OpPPC64MOVBstore
+ OpPPC64MOVHstore
+ OpPPC64MOVWstore
+ OpPPC64MOVDstore
+ OpPPC64FMOVDstore
+ OpPPC64FMOVSstore
+ OpPPC64MOVBstoreidx
+ OpPPC64MOVHstoreidx
+ OpPPC64MOVWstoreidx
+ OpPPC64MOVDstoreidx
+ OpPPC64FMOVDstoreidx
+ OpPPC64FMOVSstoreidx
+ OpPPC64MOVHBRstoreidx
+ OpPPC64MOVWBRstoreidx
+ OpPPC64MOVDBRstoreidx
+ OpPPC64MOVBstorezero
+ OpPPC64MOVHstorezero
+ OpPPC64MOVWstorezero
+ OpPPC64MOVDstorezero
+ OpPPC64MOVDaddr
+ OpPPC64MOVDconst
+ OpPPC64FMOVDconst
+ OpPPC64FMOVSconst
+ OpPPC64FCMPU
+ OpPPC64CMP
+ OpPPC64CMPU
+ OpPPC64CMPW
+ OpPPC64CMPWU
+ OpPPC64CMPconst
+ OpPPC64CMPUconst
+ OpPPC64CMPWconst
+ OpPPC64CMPWUconst
+ OpPPC64ISEL
+ OpPPC64ISELB
+ OpPPC64Equal
+ OpPPC64NotEqual
+ OpPPC64LessThan
+ OpPPC64FLessThan
+ OpPPC64LessEqual
+ OpPPC64FLessEqual
+ OpPPC64GreaterThan
+ OpPPC64FGreaterThan
+ OpPPC64GreaterEqual
+ OpPPC64FGreaterEqual
+ OpPPC64LoweredGetClosurePtr
+ OpPPC64LoweredGetCallerSP
+ OpPPC64LoweredGetCallerPC
+ OpPPC64LoweredNilCheck
+ OpPPC64LoweredRound32F
+ OpPPC64LoweredRound64F
+ OpPPC64CALLstatic
+ OpPPC64CALLclosure
+ OpPPC64CALLinter
+ OpPPC64LoweredZero
+ OpPPC64LoweredZeroShort
+ OpPPC64LoweredQuadZeroShort
+ OpPPC64LoweredQuadZero
+ OpPPC64LoweredMove
+ OpPPC64LoweredMoveShort
+ OpPPC64LoweredQuadMove
+ OpPPC64LoweredQuadMoveShort
+ OpPPC64LoweredAtomicStore8
+ OpPPC64LoweredAtomicStore32
+ OpPPC64LoweredAtomicStore64
+ OpPPC64LoweredAtomicLoad8
+ OpPPC64LoweredAtomicLoad32
+ OpPPC64LoweredAtomicLoad64
+ OpPPC64LoweredAtomicLoadPtr
+ OpPPC64LoweredAtomicAdd32
+ OpPPC64LoweredAtomicAdd64
+ OpPPC64LoweredAtomicExchange32
+ OpPPC64LoweredAtomicExchange64
+ OpPPC64LoweredAtomicCas64
+ OpPPC64LoweredAtomicCas32
+ OpPPC64LoweredAtomicAnd8
+ OpPPC64LoweredAtomicAnd32
+ OpPPC64LoweredAtomicOr8
+ OpPPC64LoweredAtomicOr32
+ OpPPC64LoweredWB
+ OpPPC64LoweredPanicBoundsA
+ OpPPC64LoweredPanicBoundsB
+ OpPPC64LoweredPanicBoundsC
+ OpPPC64InvertFlags
+ OpPPC64FlagEQ
+ OpPPC64FlagLT
+ OpPPC64FlagGT
+
+ OpRISCV64ADD
+ OpRISCV64ADDI
+ OpRISCV64ADDIW
+ OpRISCV64NEG
+ OpRISCV64NEGW
+ OpRISCV64SUB
+ OpRISCV64SUBW
+ OpRISCV64MUL
+ OpRISCV64MULW
+ OpRISCV64MULH
+ OpRISCV64MULHU
+ OpRISCV64DIV
+ OpRISCV64DIVU
+ OpRISCV64DIVW
+ OpRISCV64DIVUW
+ OpRISCV64REM
+ OpRISCV64REMU
+ OpRISCV64REMW
+ OpRISCV64REMUW
+ OpRISCV64MOVaddr
+ OpRISCV64MOVBconst
+ OpRISCV64MOVHconst
+ OpRISCV64MOVWconst
+ OpRISCV64MOVDconst
+ OpRISCV64MOVBload
+ OpRISCV64MOVHload
+ OpRISCV64MOVWload
+ OpRISCV64MOVDload
+ OpRISCV64MOVBUload
+ OpRISCV64MOVHUload
+ OpRISCV64MOVWUload
+ OpRISCV64MOVBstore
+ OpRISCV64MOVHstore
+ OpRISCV64MOVWstore
+ OpRISCV64MOVDstore
+ OpRISCV64MOVBstorezero
+ OpRISCV64MOVHstorezero
+ OpRISCV64MOVWstorezero
+ OpRISCV64MOVDstorezero
+ OpRISCV64MOVBreg
+ OpRISCV64MOVHreg
+ OpRISCV64MOVWreg
+ OpRISCV64MOVDreg
+ OpRISCV64MOVBUreg
+ OpRISCV64MOVHUreg
+ OpRISCV64MOVWUreg
+ OpRISCV64MOVDnop
+ OpRISCV64SLL
+ OpRISCV64SRA
+ OpRISCV64SRL
+ OpRISCV64SLLI
+ OpRISCV64SRAI
+ OpRISCV64SRLI
+ OpRISCV64XOR
+ OpRISCV64XORI
+ OpRISCV64OR
+ OpRISCV64ORI
+ OpRISCV64AND
+ OpRISCV64ANDI
+ OpRISCV64NOT
+ OpRISCV64SEQZ
+ OpRISCV64SNEZ
+ OpRISCV64SLT
+ OpRISCV64SLTI
+ OpRISCV64SLTU
+ OpRISCV64SLTIU
+ OpRISCV64MOVconvert
+ OpRISCV64CALLstatic
+ OpRISCV64CALLclosure
+ OpRISCV64CALLinter
+ OpRISCV64DUFFZERO
+ OpRISCV64DUFFCOPY
+ OpRISCV64LoweredZero
+ OpRISCV64LoweredMove
+ OpRISCV64LoweredAtomicLoad8
+ OpRISCV64LoweredAtomicLoad32
+ OpRISCV64LoweredAtomicLoad64
+ OpRISCV64LoweredAtomicStore8
+ OpRISCV64LoweredAtomicStore32
+ OpRISCV64LoweredAtomicStore64
+ OpRISCV64LoweredAtomicExchange32
+ OpRISCV64LoweredAtomicExchange64
+ OpRISCV64LoweredAtomicAdd32
+ OpRISCV64LoweredAtomicAdd64
+ OpRISCV64LoweredAtomicCas32
+ OpRISCV64LoweredAtomicCas64
+ OpRISCV64LoweredNilCheck
+ OpRISCV64LoweredGetClosurePtr
+ OpRISCV64LoweredGetCallerSP
+ OpRISCV64LoweredGetCallerPC
+ OpRISCV64LoweredWB
+ OpRISCV64LoweredPanicBoundsA
+ OpRISCV64LoweredPanicBoundsB
+ OpRISCV64LoweredPanicBoundsC
+ OpRISCV64FADDS
+ OpRISCV64FSUBS
+ OpRISCV64FMULS
+ OpRISCV64FDIVS
+ OpRISCV64FSQRTS
+ OpRISCV64FNEGS
+ OpRISCV64FMVSX
+ OpRISCV64FCVTSW
+ OpRISCV64FCVTSL
+ OpRISCV64FCVTWS
+ OpRISCV64FCVTLS
+ OpRISCV64FMOVWload
+ OpRISCV64FMOVWstore
+ OpRISCV64FEQS
+ OpRISCV64FNES
+ OpRISCV64FLTS
+ OpRISCV64FLES
+ OpRISCV64FADDD
+ OpRISCV64FSUBD
+ OpRISCV64FMULD
+ OpRISCV64FDIVD
+ OpRISCV64FSQRTD
+ OpRISCV64FNEGD
+ OpRISCV64FMVDX
+ OpRISCV64FCVTDW
+ OpRISCV64FCVTDL
+ OpRISCV64FCVTWD
+ OpRISCV64FCVTLD
+ OpRISCV64FCVTDS
+ OpRISCV64FCVTSD
+ OpRISCV64FMOVDload
+ OpRISCV64FMOVDstore
+ OpRISCV64FEQD
+ OpRISCV64FNED
+ OpRISCV64FLTD
+ OpRISCV64FLED
+
+ OpS390XFADDS
+ OpS390XFADD
+ OpS390XFSUBS
+ OpS390XFSUB
+ OpS390XFMULS
+ OpS390XFMUL
+ OpS390XFDIVS
+ OpS390XFDIV
+ OpS390XFNEGS
+ OpS390XFNEG
+ OpS390XFMADDS
+ OpS390XFMADD
+ OpS390XFMSUBS
+ OpS390XFMSUB
+ OpS390XLPDFR
+ OpS390XLNDFR
+ OpS390XCPSDR
+ OpS390XFIDBR
+ OpS390XFMOVSload
+ OpS390XFMOVDload
+ OpS390XFMOVSconst
+ OpS390XFMOVDconst
+ OpS390XFMOVSloadidx
+ OpS390XFMOVDloadidx
+ OpS390XFMOVSstore
+ OpS390XFMOVDstore
+ OpS390XFMOVSstoreidx
+ OpS390XFMOVDstoreidx
+ OpS390XADD
+ OpS390XADDW
+ OpS390XADDconst
+ OpS390XADDWconst
+ OpS390XADDload
+ OpS390XADDWload
+ OpS390XSUB
+ OpS390XSUBW
+ OpS390XSUBconst
+ OpS390XSUBWconst
+ OpS390XSUBload
+ OpS390XSUBWload
+ OpS390XMULLD
+ OpS390XMULLW
+ OpS390XMULLDconst
+ OpS390XMULLWconst
+ OpS390XMULLDload
+ OpS390XMULLWload
+ OpS390XMULHD
+ OpS390XMULHDU
+ OpS390XDIVD
+ OpS390XDIVW
+ OpS390XDIVDU
+ OpS390XDIVWU
+ OpS390XMODD
+ OpS390XMODW
+ OpS390XMODDU
+ OpS390XMODWU
+ OpS390XAND
+ OpS390XANDW
+ OpS390XANDconst
+ OpS390XANDWconst
+ OpS390XANDload
+ OpS390XANDWload
+ OpS390XOR
+ OpS390XORW
+ OpS390XORconst
+ OpS390XORWconst
+ OpS390XORload
+ OpS390XORWload
+ OpS390XXOR
+ OpS390XXORW
+ OpS390XXORconst
+ OpS390XXORWconst
+ OpS390XXORload
+ OpS390XXORWload
+ OpS390XADDC
+ OpS390XADDCconst
+ OpS390XADDE
+ OpS390XSUBC
+ OpS390XSUBE
+ OpS390XCMP
+ OpS390XCMPW
+ OpS390XCMPU
+ OpS390XCMPWU
+ OpS390XCMPconst
+ OpS390XCMPWconst
+ OpS390XCMPUconst
+ OpS390XCMPWUconst
+ OpS390XFCMPS
+ OpS390XFCMP
+ OpS390XLTDBR
+ OpS390XLTEBR
+ OpS390XSLD
+ OpS390XSLW
+ OpS390XSLDconst
+ OpS390XSLWconst
+ OpS390XSRD
+ OpS390XSRW
+ OpS390XSRDconst
+ OpS390XSRWconst
+ OpS390XSRAD
+ OpS390XSRAW
+ OpS390XSRADconst
+ OpS390XSRAWconst
+ OpS390XRLLG
+ OpS390XRLL
+ OpS390XRLLconst
+ OpS390XRXSBG
+ OpS390XRISBGZ
+ OpS390XNEG
+ OpS390XNEGW
+ OpS390XNOT
+ OpS390XNOTW
+ OpS390XFSQRT
+ OpS390XLOCGR
+ OpS390XMOVBreg
+ OpS390XMOVBZreg
+ OpS390XMOVHreg
+ OpS390XMOVHZreg
+ OpS390XMOVWreg
+ OpS390XMOVWZreg
+ OpS390XMOVDconst
+ OpS390XLDGR
+ OpS390XLGDR
+ OpS390XCFDBRA
+ OpS390XCGDBRA
+ OpS390XCFEBRA
+ OpS390XCGEBRA
+ OpS390XCEFBRA
+ OpS390XCDFBRA
+ OpS390XCEGBRA
+ OpS390XCDGBRA
+ OpS390XCLFEBR
+ OpS390XCLFDBR
+ OpS390XCLGEBR
+ OpS390XCLGDBR
+ OpS390XCELFBR
+ OpS390XCDLFBR
+ OpS390XCELGBR
+ OpS390XCDLGBR
+ OpS390XLEDBR
+ OpS390XLDEBR
+ OpS390XMOVDaddr
+ OpS390XMOVDaddridx
+ OpS390XMOVBZload
+ OpS390XMOVBload
+ OpS390XMOVHZload
+ OpS390XMOVHload
+ OpS390XMOVWZload
+ OpS390XMOVWload
+ OpS390XMOVDload
+ OpS390XMOVWBR
+ OpS390XMOVDBR
+ OpS390XMOVHBRload
+ OpS390XMOVWBRload
+ OpS390XMOVDBRload
+ OpS390XMOVBstore
+ OpS390XMOVHstore
+ OpS390XMOVWstore
+ OpS390XMOVDstore
+ OpS390XMOVHBRstore
+ OpS390XMOVWBRstore
+ OpS390XMOVDBRstore
+ OpS390XMVC
+ OpS390XMOVBZloadidx
+ OpS390XMOVBloadidx
+ OpS390XMOVHZloadidx
+ OpS390XMOVHloadidx
+ OpS390XMOVWZloadidx
+ OpS390XMOVWloadidx
+ OpS390XMOVDloadidx
+ OpS390XMOVHBRloadidx
+ OpS390XMOVWBRloadidx
+ OpS390XMOVDBRloadidx
+ OpS390XMOVBstoreidx
+ OpS390XMOVHstoreidx
+ OpS390XMOVWstoreidx
+ OpS390XMOVDstoreidx
+ OpS390XMOVHBRstoreidx
+ OpS390XMOVWBRstoreidx
+ OpS390XMOVDBRstoreidx
+ OpS390XMOVBstoreconst
+ OpS390XMOVHstoreconst
+ OpS390XMOVWstoreconst
+ OpS390XMOVDstoreconst
+ OpS390XCLEAR
+ OpS390XCALLstatic
+ OpS390XCALLclosure
+ OpS390XCALLinter
+ OpS390XInvertFlags
+ OpS390XLoweredGetG
+ OpS390XLoweredGetClosurePtr
+ OpS390XLoweredGetCallerSP
+ OpS390XLoweredGetCallerPC
+ OpS390XLoweredNilCheck
+ OpS390XLoweredRound32F
+ OpS390XLoweredRound64F
+ OpS390XLoweredWB
+ OpS390XLoweredPanicBoundsA
+ OpS390XLoweredPanicBoundsB
+ OpS390XLoweredPanicBoundsC
+ OpS390XFlagEQ
+ OpS390XFlagLT
+ OpS390XFlagGT
+ OpS390XFlagOV
+ OpS390XSYNC
+ OpS390XMOVBZatomicload
+ OpS390XMOVWZatomicload
+ OpS390XMOVDatomicload
+ OpS390XMOVBatomicstore
+ OpS390XMOVWatomicstore
+ OpS390XMOVDatomicstore
+ OpS390XLAA
+ OpS390XLAAG
+ OpS390XAddTupleFirst32
+ OpS390XAddTupleFirst64
+ OpS390XLAN
+ OpS390XLANfloor
+ OpS390XLAO
+ OpS390XLAOfloor
+ OpS390XLoweredAtomicCas32
+ OpS390XLoweredAtomicCas64
+ OpS390XLoweredAtomicExchange32
+ OpS390XLoweredAtomicExchange64
+ OpS390XFLOGR
+ OpS390XPOPCNT
+ OpS390XMLGR
+ OpS390XSumBytes2
+ OpS390XSumBytes4
+ OpS390XSumBytes8
+ OpS390XSTMG2
+ OpS390XSTMG3
+ OpS390XSTMG4
+ OpS390XSTM2
+ OpS390XSTM3
+ OpS390XSTM4
+ OpS390XLoweredMove
+ OpS390XLoweredZero
+
+ OpWasmLoweredStaticCall
+ OpWasmLoweredClosureCall
+ OpWasmLoweredInterCall
+ OpWasmLoweredAddr
+ OpWasmLoweredMove
+ OpWasmLoweredZero
+ OpWasmLoweredGetClosurePtr
+ OpWasmLoweredGetCallerPC
+ OpWasmLoweredGetCallerSP
+ OpWasmLoweredNilCheck
+ OpWasmLoweredWB
+ OpWasmLoweredConvert
+ OpWasmSelect
+ OpWasmI64Load8U
+ OpWasmI64Load8S
+ OpWasmI64Load16U
+ OpWasmI64Load16S
+ OpWasmI64Load32U
+ OpWasmI64Load32S
+ OpWasmI64Load
+ OpWasmI64Store8
+ OpWasmI64Store16
+ OpWasmI64Store32
+ OpWasmI64Store
+ OpWasmF32Load
+ OpWasmF64Load
+ OpWasmF32Store
+ OpWasmF64Store
+ OpWasmI64Const
+ OpWasmF32Const
+ OpWasmF64Const
+ OpWasmI64Eqz
+ OpWasmI64Eq
+ OpWasmI64Ne
+ OpWasmI64LtS
+ OpWasmI64LtU
+ OpWasmI64GtS
+ OpWasmI64GtU
+ OpWasmI64LeS
+ OpWasmI64LeU
+ OpWasmI64GeS
+ OpWasmI64GeU
+ OpWasmF32Eq
+ OpWasmF32Ne
+ OpWasmF32Lt
+ OpWasmF32Gt
+ OpWasmF32Le
+ OpWasmF32Ge
+ OpWasmF64Eq
+ OpWasmF64Ne
+ OpWasmF64Lt
+ OpWasmF64Gt
+ OpWasmF64Le
+ OpWasmF64Ge
+ OpWasmI64Add
+ OpWasmI64AddConst
+ OpWasmI64Sub
+ OpWasmI64Mul
+ OpWasmI64DivS
+ OpWasmI64DivU
+ OpWasmI64RemS
+ OpWasmI64RemU
+ OpWasmI64And
+ OpWasmI64Or
+ OpWasmI64Xor
+ OpWasmI64Shl
+ OpWasmI64ShrS
+ OpWasmI64ShrU
+ OpWasmF32Neg
+ OpWasmF32Add
+ OpWasmF32Sub
+ OpWasmF32Mul
+ OpWasmF32Div
+ OpWasmF64Neg
+ OpWasmF64Add
+ OpWasmF64Sub
+ OpWasmF64Mul
+ OpWasmF64Div
+ OpWasmI64TruncSatF64S
+ OpWasmI64TruncSatF64U
+ OpWasmI64TruncSatF32S
+ OpWasmI64TruncSatF32U
+ OpWasmF32ConvertI64S
+ OpWasmF32ConvertI64U
+ OpWasmF64ConvertI64S
+ OpWasmF64ConvertI64U
+ OpWasmF32DemoteF64
+ OpWasmF64PromoteF32
+ OpWasmI64Extend8S
+ OpWasmI64Extend16S
+ OpWasmI64Extend32S
+ OpWasmF32Sqrt
+ OpWasmF32Trunc
+ OpWasmF32Ceil
+ OpWasmF32Floor
+ OpWasmF32Nearest
+ OpWasmF32Abs
+ OpWasmF32Copysign
+ OpWasmF64Sqrt
+ OpWasmF64Trunc
+ OpWasmF64Ceil
+ OpWasmF64Floor
+ OpWasmF64Nearest
+ OpWasmF64Abs
+ OpWasmF64Copysign
+ OpWasmI64Ctz
+ OpWasmI64Clz
+ OpWasmI32Rotl
+ OpWasmI64Rotl
+ OpWasmI64Popcnt
+
+ OpAdd8
+ OpAdd16
+ OpAdd32
+ OpAdd64
+ OpAddPtr
+ OpAdd32F
+ OpAdd64F
+ OpSub8
+ OpSub16
+ OpSub32
+ OpSub64
+ OpSubPtr
+ OpSub32F
+ OpSub64F
+ OpMul8
+ OpMul16
+ OpMul32
+ OpMul64
+ OpMul32F
+ OpMul64F
+ OpDiv32F
+ OpDiv64F
+ OpHmul32
+ OpHmul32u
+ OpHmul64
+ OpHmul64u
+ OpMul32uhilo
+ OpMul64uhilo
+ OpMul32uover
+ OpMul64uover
+ OpAvg32u
+ OpAvg64u
+ OpDiv8
+ OpDiv8u
+ OpDiv16
+ OpDiv16u
+ OpDiv32
+ OpDiv32u
+ OpDiv64
+ OpDiv64u
+ OpDiv128u
+ OpMod8
+ OpMod8u
+ OpMod16
+ OpMod16u
+ OpMod32
+ OpMod32u
+ OpMod64
+ OpMod64u
+ OpAnd8
+ OpAnd16
+ OpAnd32
+ OpAnd64
+ OpOr8
+ OpOr16
+ OpOr32
+ OpOr64
+ OpXor8
+ OpXor16
+ OpXor32
+ OpXor64
+ OpLsh8x8
+ OpLsh8x16
+ OpLsh8x32
+ OpLsh8x64
+ OpLsh16x8
+ OpLsh16x16
+ OpLsh16x32
+ OpLsh16x64
+ OpLsh32x8
+ OpLsh32x16
+ OpLsh32x32
+ OpLsh32x64
+ OpLsh64x8
+ OpLsh64x16
+ OpLsh64x32
+ OpLsh64x64
+ OpRsh8x8
+ OpRsh8x16
+ OpRsh8x32
+ OpRsh8x64
+ OpRsh16x8
+ OpRsh16x16
+ OpRsh16x32
+ OpRsh16x64
+ OpRsh32x8
+ OpRsh32x16
+ OpRsh32x32
+ OpRsh32x64
+ OpRsh64x8
+ OpRsh64x16
+ OpRsh64x32
+ OpRsh64x64
+ OpRsh8Ux8
+ OpRsh8Ux16
+ OpRsh8Ux32
+ OpRsh8Ux64
+ OpRsh16Ux8
+ OpRsh16Ux16
+ OpRsh16Ux32
+ OpRsh16Ux64
+ OpRsh32Ux8
+ OpRsh32Ux16
+ OpRsh32Ux32
+ OpRsh32Ux64
+ OpRsh64Ux8
+ OpRsh64Ux16
+ OpRsh64Ux32
+ OpRsh64Ux64
+ OpEq8
+ OpEq16
+ OpEq32
+ OpEq64
+ OpEqPtr
+ OpEqInter
+ OpEqSlice
+ OpEq32F
+ OpEq64F
+ OpNeq8
+ OpNeq16
+ OpNeq32
+ OpNeq64
+ OpNeqPtr
+ OpNeqInter
+ OpNeqSlice
+ OpNeq32F
+ OpNeq64F
+ OpLess8
+ OpLess8U
+ OpLess16
+ OpLess16U
+ OpLess32
+ OpLess32U
+ OpLess64
+ OpLess64U
+ OpLess32F
+ OpLess64F
+ OpLeq8
+ OpLeq8U
+ OpLeq16
+ OpLeq16U
+ OpLeq32
+ OpLeq32U
+ OpLeq64
+ OpLeq64U
+ OpLeq32F
+ OpLeq64F
+ OpCondSelect
+ OpAndB
+ OpOrB
+ OpEqB
+ OpNeqB
+ OpNot
+ OpNeg8
+ OpNeg16
+ OpNeg32
+ OpNeg64
+ OpNeg32F
+ OpNeg64F
+ OpCom8
+ OpCom16
+ OpCom32
+ OpCom64
+ OpCtz8
+ OpCtz16
+ OpCtz32
+ OpCtz64
+ OpCtz8NonZero
+ OpCtz16NonZero
+ OpCtz32NonZero
+ OpCtz64NonZero
+ OpBitLen8
+ OpBitLen16
+ OpBitLen32
+ OpBitLen64
+ OpBswap32
+ OpBswap64
+ OpBitRev8
+ OpBitRev16
+ OpBitRev32
+ OpBitRev64
+ OpPopCount8
+ OpPopCount16
+ OpPopCount32
+ OpPopCount64
+ OpRotateLeft8
+ OpRotateLeft16
+ OpRotateLeft32
+ OpRotateLeft64
+ OpSqrt
+ OpFloor
+ OpCeil
+ OpTrunc
+ OpRound
+ OpRoundToEven
+ OpAbs
+ OpCopysign
+ OpFMA
+ OpPhi
+ OpCopy
+ OpConvert
+ OpConstBool
+ OpConstString
+ OpConstNil
+ OpConst8
+ OpConst16
+ OpConst32
+ OpConst64
+ OpConst32F
+ OpConst64F
+ OpConstInterface
+ OpConstSlice
+ OpInitMem
+ OpArg
+ OpAddr
+ OpLocalAddr
+ OpSP
+ OpSB
+ OpLoad
+ OpDereference
+ OpStore
+ OpMove
+ OpZero
+ OpStoreWB
+ OpMoveWB
+ OpZeroWB
+ OpWB
+ OpHasCPUFeature
+ OpPanicBounds
+ OpPanicExtend
+ OpClosureCall
+ OpStaticCall
+ OpInterCall
+ OpClosureLECall
+ OpStaticLECall
+ OpInterLECall
+ OpSignExt8to16
+ OpSignExt8to32
+ OpSignExt8to64
+ OpSignExt16to32
+ OpSignExt16to64
+ OpSignExt32to64
+ OpZeroExt8to16
+ OpZeroExt8to32
+ OpZeroExt8to64
+ OpZeroExt16to32
+ OpZeroExt16to64
+ OpZeroExt32to64
+ OpTrunc16to8
+ OpTrunc32to8
+ OpTrunc32to16
+ OpTrunc64to8
+ OpTrunc64to16
+ OpTrunc64to32
+ OpCvt32to32F
+ OpCvt32to64F
+ OpCvt64to32F
+ OpCvt64to64F
+ OpCvt32Fto32
+ OpCvt32Fto64
+ OpCvt64Fto32
+ OpCvt64Fto64
+ OpCvt32Fto64F
+ OpCvt64Fto32F
+ OpCvtBoolToUint8
+ OpRound32F
+ OpRound64F
+ OpIsNonNil
+ OpIsInBounds
+ OpIsSliceInBounds
+ OpNilCheck
+ OpGetG
+ OpGetClosurePtr
+ OpGetCallerPC
+ OpGetCallerSP
+ OpPtrIndex
+ OpOffPtr
+ OpSliceMake
+ OpSlicePtr
+ OpSliceLen
+ OpSliceCap
+ OpComplexMake
+ OpComplexReal
+ OpComplexImag
+ OpStringMake
+ OpStringPtr
+ OpStringLen
+ OpIMake
+ OpITab
+ OpIData
+ OpStructMake0
+ OpStructMake1
+ OpStructMake2
+ OpStructMake3
+ OpStructMake4
+ OpStructSelect
+ OpArrayMake0
+ OpArrayMake1
+ OpArraySelect
+ OpStoreReg
+ OpLoadReg
+ OpFwdRef
+ OpUnknown
+ OpVarDef
+ OpVarKill
+ OpVarLive
+ OpKeepAlive
+ OpInlMark
+ OpInt64Make
+ OpInt64Hi
+ OpInt64Lo
+ OpAdd32carry
+ OpAdd32withcarry
+ OpSub32carry
+ OpSub32withcarry
+ OpAdd64carry
+ OpSub64borrow
+ OpSignmask
+ OpZeromask
+ OpSlicemask
+ OpSpectreIndex
+ OpSpectreSliceIndex
+ OpCvt32Uto32F
+ OpCvt32Uto64F
+ OpCvt32Fto32U
+ OpCvt64Fto32U
+ OpCvt64Uto32F
+ OpCvt64Uto64F
+ OpCvt32Fto64U
+ OpCvt64Fto64U
+ OpSelect0
+ OpSelect1
+ OpSelectN
+ OpSelectNAddr
+ OpMakeResult
+ OpAtomicLoad8
+ OpAtomicLoad32
+ OpAtomicLoad64
+ OpAtomicLoadPtr
+ OpAtomicLoadAcq32
+ OpAtomicLoadAcq64
+ OpAtomicStore8
+ OpAtomicStore32
+ OpAtomicStore64
+ OpAtomicStorePtrNoWB
+ OpAtomicStoreRel32
+ OpAtomicStoreRel64
+ OpAtomicExchange32
+ OpAtomicExchange64
+ OpAtomicAdd32
+ OpAtomicAdd64
+ OpAtomicCompareAndSwap32
+ OpAtomicCompareAndSwap64
+ OpAtomicCompareAndSwapRel32
+ OpAtomicAnd8
+ OpAtomicAnd32
+ OpAtomicOr8
+ OpAtomicOr32
+ OpAtomicAdd32Variant
+ OpAtomicAdd64Variant
+ OpAtomicExchange32Variant
+ OpAtomicExchange64Variant
+ OpAtomicCompareAndSwap32Variant
+ OpAtomicCompareAndSwap64Variant
+ OpAtomicAnd8Variant
+ OpAtomicAnd32Variant
+ OpAtomicOr8Variant
+ OpAtomicOr32Variant
+ OpClobber
+)
+
+var opcodeTable = [...]opInfo{
+ {name: "OpInvalid"},
+
+ {
+ name: "ADDSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ usesScratch: true,
+ asm: x86.AADDSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "ADDSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SUBSS",
+ argLen: 2,
+ resultInArg0: true,
+ usesScratch: true,
+ asm: x86.ASUBSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SUBSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MULSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ usesScratch: true,
+ asm: x86.AMULSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MULSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "DIVSS",
+ argLen: 2,
+ resultInArg0: true,
+ usesScratch: true,
+ asm: x86.ADIVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "DIVSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ADIVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "ADDSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SUBSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SUBSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MULSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AMULSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MULSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "DIVSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "DIVSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "ADDL",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 239}, // AX CX DX BX BP SI DI
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLcarry",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLconstcarry",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADCL",
+ argLen: 3,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AADCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADCLconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AADCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLcarry",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLconstcarry",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SBBL",
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SBBLconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AIMUL3L,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "HMULL",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULLU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MULLQU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ {1, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "AVGLU",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "DIVL",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "MODL",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MODW",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MODLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MODWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "ANDL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ANDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPL",
+ argLen: 2,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPB",
+ argLen: 2,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPBload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPLconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPWconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPBconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "UCOMISS",
+ argLen: 2,
+ usesScratch: true,
+ asm: x86.AUCOMISS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "UCOMISD",
+ argLen: 2,
+ usesScratch: true,
+ asm: x86.AUCOMISD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "TESTL",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTW",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTB",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHLL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ANDLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ANDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "NEGL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANEGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "NOTL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANOTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSFL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSFW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSRL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSRW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSWAPL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABSWAPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SQRTSD",
+ argLen: 1,
+ asm: x86.ASQRTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SBBLcarrymask",
+ argLen: 1,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETEQ",
+ argLen: 1,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETNE",
+ argLen: 1,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETL",
+ argLen: 1,
+ asm: x86.ASETLT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETLE",
+ argLen: 1,
+ asm: x86.ASETLE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETG",
+ argLen: 1,
+ asm: x86.ASETGT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETGE",
+ argLen: 1,
+ asm: x86.ASETGE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETB",
+ argLen: 1,
+ asm: x86.ASETCS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETBE",
+ argLen: 1,
+ asm: x86.ASETLS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETA",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETAE",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETO",
+ argLen: 1,
+ asm: x86.ASETOS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETEQF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 238}, // CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETNEF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 238}, // CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETORD",
+ argLen: 1,
+ asm: x86.ASETPC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETNAN",
+ argLen: 1,
+ asm: x86.ASETPS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETGF",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETGEF",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBLSX",
+ argLen: 1,
+ asm: x86.AMOVBLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBLZX",
+ argLen: 1,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWLSX",
+ argLen: 1,
+ asm: x86.AMOVWLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWLZX",
+ argLen: 1,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CVTTSD2SL",
+ argLen: 1,
+ usesScratch: true,
+ asm: x86.ACVTTSD2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CVTTSS2SL",
+ argLen: 1,
+ usesScratch: true,
+ asm: x86.ACVTTSS2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CVTSL2SS",
+ argLen: 1,
+ usesScratch: true,
+ asm: x86.ACVTSL2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "CVTSL2SD",
+ argLen: 1,
+ usesScratch: true,
+ asm: x86.ACVTSL2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "CVTSD2SS",
+ argLen: 1,
+ usesScratch: true,
+ asm: x86.ACVTSD2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "CVTSS2SD",
+ argLen: 1,
+ asm: x86.ACVTSS2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "PXOR",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.APXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "LEAL",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL1",
+ auxType: auxSymOff,
+ argLen: 2,
+ commutative: true,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL2",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL4",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL8",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBLSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWLSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx2",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx2",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx2",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 1}, // AX
+ },
+ clobbers: 130, // CX DI
+ },
+ },
+ {
+ name: "REPSTOSL",
+ argLen: 4,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 2}, // CX
+ {2, 1}, // AX
+ },
+ clobbers: 130, // CX DI
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4}, // DX
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ },
+ clobbers: 194, // CX SI DI
+ },
+ },
+ {
+ name: "REPMOVSL",
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ {2, 2}, // CX
+ },
+ clobbers: 194, // CX SI DI
+ },
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredGetG",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 1}, // AX
+ },
+ clobbers: 65280, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // DX
+ {1, 8}, // BX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // CX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 2}, // CX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendA",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 64}, // SI
+ {1, 4}, // DX
+ {2, 8}, // BX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendB",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 64}, // SI
+ {1, 2}, // CX
+ {2, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendC",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 64}, // SI
+ {1, 1}, // AX
+ {2, 2}, // CX
+ },
+ },
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "MOVSSconst1",
+ auxType: auxFloat32,
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVSDconst1",
+ auxType: auxFloat64,
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVSSconst2",
+ argLen: 1,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDconst2",
+ argLen: 1,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+
+ {
+ name: "ADDSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "ADDSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "SUBSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "SUBSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MULSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AMULSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MULSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "DIVSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ADIVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "DIVSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ADIVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MOVSSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MOVSDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MOVSSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MOVSDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MOVSSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "ADDSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "SUBSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "SUBSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MULSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AMULSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MULSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "DIVSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "DIVSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "ADDSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AADDSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "ADDSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AADDSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "ADDSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AADDSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "ADDSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AADDSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "SUBSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "SUBSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "SUBSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "SUBSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MULSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMULSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MULSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMULSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MULSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMULSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MULSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMULSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "DIVSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "DIVSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "DIVSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "DIVSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ {2, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "ADDQ",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDL",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SUBL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SUBQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SUBLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MULQ",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AIMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MULL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MULQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AIMUL3Q,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MULLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AIMUL3L,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MULLU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "MULQU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "HMULQ",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULL",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULQU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "AVGQU",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "DIVQ",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVL",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVQU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "NEGLflags",
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ANEGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDQcarry",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADCQ",
+ argLen: 3,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDQconstcarry",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADCQconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.AADCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SUBQborrow",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SBBQ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ASBBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SUBQconstborrow",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SBBQconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASBBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MULQU2",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ {1, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVQU2",
+ argLen: 3,
+ clobberFlags: true,
+ asm: x86.ADIVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // DX
+ {1, 1}, // AX
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "ANDQ",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ANDL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ANDQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ANDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ANDQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQ",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ORQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ORQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQ",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XORQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XORQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQ",
+ argLen: 2,
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMPL",
+ argLen: 2,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMPB",
+ argLen: 2,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMPQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMPLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMPBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMPQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPBload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPBconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWloadidx2",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPBloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQconstloadidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQconstloadidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLconstloadidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLconstloadidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWconstloadidx2",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWconstloadidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPBconstloadidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "UCOMISS",
+ argLen: 2,
+ asm: x86.AUCOMISS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "UCOMISD",
+ argLen: 2,
+ asm: x86.AUCOMISD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "BTL",
+ argLen: 2,
+ asm: x86.ABTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTQ",
+ argLen: 2,
+ asm: x86.ABTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTCL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTCQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTRL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTRQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTSL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTSQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ABTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ABTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTCLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTCQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTRLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTRQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTSLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTSQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BTCQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTCLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTSQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTSLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTRQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTRLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTCQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTCLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTSQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTSLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTRQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTRLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "TESTQ",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "TESTL",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "TESTW",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "TESTB",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "TESTQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ATESTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "TESTLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "TESTWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "TESTBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SHLQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SHLL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SHLQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SHLLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SHRQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SHRL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SHRW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SHRB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SHRQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SHRLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SHRWconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SHRBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SARQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SARL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SARW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SARB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SARQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SARLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SARWconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SARBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ROLQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ROLL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ROLW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ROLB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "RORQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ARORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "RORL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ARORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "RORW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ARORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "RORB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ARORB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ROLQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ROLLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ROLWconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ROLBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SUBQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SUBLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ANDLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ANDQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ORQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ORLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XORQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XORLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SUBLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SUBLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SUBLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SUBQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SUBQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ANDLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ANDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ANDLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ANDQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ANDQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ORLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ORLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ORQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ORQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XORLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XORLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XORQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XORQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ADDQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDQconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDQconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "NEGQ",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANEGQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "NEGL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANEGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "NOTQ",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANOTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "NOTL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANOTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BSFQ",
+ argLen: 1,
+ asm: x86.ABSFQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BSFL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BSRQ",
+ argLen: 1,
+ asm: x86.ABSRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BSRL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQEQ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQNE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQLT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQLE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQLS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQHI",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQCC",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQCS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLEQ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLNE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLLT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLLE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLLS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLHI",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLCC",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLCS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWEQ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWNE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWLT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWLE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWLS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWHI",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWCC",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWCS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQEQF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQNEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGTF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLEQF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLNEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGTF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWEQF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWNEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGTF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BSWAPQ",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABSWAPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "BSWAPL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABSWAPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "POPCNTQ",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.APOPCNTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "POPCNTL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.APOPCNTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SQRTSD",
+ argLen: 1,
+ asm: x86.ASQRTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "ROUNDSD",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.AROUNDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "VFMADD231SD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD231SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "SBBQcarrymask",
+ argLen: 1,
+ asm: x86.ASBBQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SBBLcarrymask",
+ argLen: 1,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETEQ",
+ argLen: 1,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETNE",
+ argLen: 1,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETL",
+ argLen: 1,
+ asm: x86.ASETLT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETLE",
+ argLen: 1,
+ asm: x86.ASETLE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETG",
+ argLen: 1,
+ asm: x86.ASETGT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETGE",
+ argLen: 1,
+ asm: x86.ASETGE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETB",
+ argLen: 1,
+ asm: x86.ASETCS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETBE",
+ argLen: 1,
+ asm: x86.ASETLS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETA",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETAE",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETO",
+ argLen: 1,
+ asm: x86.ASETOS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETEQstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SETNEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SETLstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SETLEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SETGstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SETGEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SETBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SETBEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SETAstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SETAEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "SETEQF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETNEF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETORD",
+ argLen: 1,
+ asm: x86.ASETPC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETNAN",
+ argLen: 1,
+ asm: x86.ASETPS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETGF",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "SETGEF",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVBQSX",
+ argLen: 1,
+ asm: x86.AMOVBQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVBQZX",
+ argLen: 1,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVWQSX",
+ argLen: 1,
+ asm: x86.AMOVWQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVWQZX",
+ argLen: 1,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVLQSX",
+ argLen: 1,
+ asm: x86.AMOVLQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVLQZX",
+ argLen: 1,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVLconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVQconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CVTTSD2SL",
+ argLen: 1,
+ asm: x86.ACVTTSD2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CVTTSD2SQ",
+ argLen: 1,
+ asm: x86.ACVTTSD2SQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CVTTSS2SL",
+ argLen: 1,
+ asm: x86.ACVTTSS2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CVTTSS2SQ",
+ argLen: 1,
+ asm: x86.ACVTTSS2SQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CVTSL2SS",
+ argLen: 1,
+ asm: x86.ACVTSL2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "CVTSL2SD",
+ argLen: 1,
+ asm: x86.ACVTSL2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "CVTSQ2SS",
+ argLen: 1,
+ asm: x86.ACVTSQ2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "CVTSQ2SD",
+ argLen: 1,
+ asm: x86.ACVTSQ2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "CVTSD2SS",
+ argLen: 1,
+ asm: x86.ACVTSD2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "CVTSS2SD",
+ argLen: 1,
+ asm: x86.ACVTSS2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MOVQi2f",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MOVQf2i",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVLi2f",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MOVLf2i",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "PXOR",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.APXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "LEAQ",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAL",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAW",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAQ1",
+ auxType: auxSymOff,
+ argLen: 2,
+ commutative: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAL1",
+ auxType: auxSymOff,
+ argLen: 2,
+ commutative: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAW1",
+ auxType: auxSymOff,
+ argLen: 2,
+ commutative: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAQ2",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAL2",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAW2",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAQ4",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAL4",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAW4",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAQ8",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAL8",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LEAW8",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVBQSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVWQSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVLload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVLQSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVLQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVQload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVOload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVUPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "MOVOstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVUPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLZX,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx2",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVQloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVQloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx2",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx2",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreconstidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 65536}, // X0
+ },
+ clobbers: 128, // DI
+ },
+ },
+ {
+ name: "MOVOconst",
+ auxType: auxInt128,
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ },
+ {
+ name: "REPSTOSQ",
+ argLen: 4,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 2}, // CX
+ {2, 1}, // AX
+ },
+ clobbers: 130, // CX DI
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4}, // DX
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ },
+ clobbers: 65728, // SI DI X0
+ },
+ },
+ {
+ name: "REPMOVSQ",
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ {2, 2}, // CX
+ },
+ clobbers: 194, // CX SI DI
+ },
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredGetG",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 879}, // AX CX DX BX BP SI R8 R9
+ },
+ clobbers: 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+ },
+ },
+ {
+ name: "LoweredHasCPUFeature",
+ auxType: auxSym,
+ argLen: 0,
+ rematerializeable: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // DX
+ {1, 8}, // BX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // CX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 2}, // CX
+ },
+ },
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "MOVBatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVLatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "MOVQatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XCHGB",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXCHGB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XCHGL",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXCHGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XCHGQ",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXCHGQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XADDLlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "XADDQlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "AddTupleFirst32",
+ argLen: 2,
+ reg: regInfo{},
+ },
+ {
+ name: "AddTupleFirst64",
+ argLen: 2,
+ reg: regInfo{},
+ },
+ {
+ name: "CMPXCHGLlock",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.ACMPXCHGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1}, // AX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "CMPXCHGQlock",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.ACMPXCHGQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1}, // AX
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {2, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "ANDBlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AANDB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORBlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AORB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 30719}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSB",
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "HMUL",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "HMULU",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULLU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CALLudiv",
+ argLen: 2,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 1}, // R0
+ },
+ clobbers: 16396, // R2 R3 R14
+ outputs: []outputInfo{
+ {0, 1}, // R0
+ {1, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "ADDS",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADC",
+ argLen: 3,
+ commutative: true,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCconst",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBS",
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBC",
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCconst",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCconst",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLU",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULLU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULA",
+ argLen: 3,
+ asm: arm.AMULA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULS",
+ argLen: 3,
+ asm: arm.AMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDF",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "ADDD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SUBF",
+ argLen: 2,
+ asm: arm.ASUBF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SUBD",
+ argLen: 2,
+ asm: arm.ASUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULF",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "NMULF",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ANMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "NMULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ANMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "DIVF",
+ argLen: 2,
+ asm: arm.ADIVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: arm.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULAF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AMULAF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULAD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AMULAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULSF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AMULSF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULSD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMULAD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AFMULAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BIC",
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BFX",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ABFX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BFXU",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ABFXU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVN",
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "NEGF",
+ argLen: 1,
+ asm: arm.ANEGF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "NEGD",
+ argLen: 1,
+ asm: arm.ANEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SQRTD",
+ argLen: 1,
+ asm: arm.ASQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "ABSD",
+ argLen: 1,
+ asm: arm.AABSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CLZ",
+ argLen: 1,
+ asm: arm.ACLZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "REV",
+ argLen: 1,
+ asm: arm.AREV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "REV16",
+ argLen: 1,
+ asm: arm.AREV16,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RBIT",
+ argLen: 1,
+ asm: arm.ARBIT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: arm.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: arm.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: arm.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRR",
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRRconst",
+ auxType: auxInt32,
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRR",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftLL",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftRL",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftRA",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftLLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftRLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftRAreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftLLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftRLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftRAreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftLLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftRLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftRAreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftLLreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftRLreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftRAreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftLLreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftRLreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftRAreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftLLreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRLreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRAreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftLLreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftRLreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftRAreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftLLreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftRLreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftRAreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftLLreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftRLreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftRAreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftLLreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftRLreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftRAreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftLLreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftRLreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftRAreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftLLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRAreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftLLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRAreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftLLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRAreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMN",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TST",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQ",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPF",
+ argLen: 2,
+ asm: arm.ACMPF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMPD",
+ argLen: 2,
+ asm: arm.ACMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMPshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftLLreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftRLreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftRAreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftLLreg",
+ argLen: 3,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftRLreg",
+ argLen: 3,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftRAreg",
+ argLen: 3,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftLLreg",
+ argLen: 3,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftRLreg",
+ argLen: 3,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftRAreg",
+ argLen: 3,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftLLreg",
+ argLen: 3,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftRLreg",
+ argLen: 3,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftRAreg",
+ argLen: 3,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPF0",
+ argLen: 1,
+ asm: arm.ACMPF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMPD0",
+ argLen: 1,
+ asm: arm.ACMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVFconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm.AMOVF,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294975488}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVFload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVFstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBUloadidx",
+ argLen: 3,
+ asm: arm.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx",
+ argLen: 3,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHUloadidx",
+ argLen: 3,
+ asm: arm.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx",
+ argLen: 3,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftLL",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftRL",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftRA",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx",
+ argLen: 4,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx",
+ argLen: 4,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: arm.AMOVBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: arm.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: arm.AMOVHS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: arm.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWF",
+ argLen: 1,
+ asm: arm.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWD",
+ argLen: 1,
+ asm: arm.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWUF",
+ argLen: 1,
+ asm: arm.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWUD",
+ argLen: 1,
+ asm: arm.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVFW",
+ argLen: 1,
+ asm: arm.AMOVFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDW",
+ argLen: 1,
+ asm: arm.AMOVDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVFWU",
+ argLen: 1,
+ asm: arm.AMOVFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDWU",
+ argLen: 1,
+ asm: arm.AMOVDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVFD",
+ argLen: 1,
+ asm: arm.AMOVFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDF",
+ argLen: 1,
+ asm: arm.AMOVDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMOVWHSconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMOVWLSconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAcond",
+ argLen: 3,
+ asm: arm.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 128}, // R7
+ {0, 29695}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP R14
+ },
+ clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "Equal",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "NotEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "GreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "GreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LessThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LessEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "GreaterThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "GreaterEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 1}, // R0
+ },
+ clobbers: 16386, // R1 R14
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ },
+ clobbers: 16391, // R0 R1 R2 R14
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 6, // R1 R2
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 128}, // R7
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // R0
+ {1, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendA",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // R4
+ {1, 4}, // R2
+ {2, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendB",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // R4
+ {1, 2}, // R1
+ {2, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendC",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // R4
+ {1, 1}, // R0
+ {2, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "FlagConstant",
+ auxType: auxFlagConstant,
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ clobbers: 4294918144, // R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+
+ {
+ name: "ADCSflags",
+ argLen: 3,
+ commutative: true,
+ asm: arm64.AADCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADCzerocarry",
+ argLen: 1,
+ asm: arm64.AADC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1878786047}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDSconstflags",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDSflags",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SBCSflags",
+ argLen: 3,
+ asm: arm64.ASBCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBSflags",
+ argLen: 2,
+ asm: arm64.ASUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MULW",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMULW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MNEG",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMNEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MNEGW",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMNEGW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MULH",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ASMULH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UMULH",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AUMULH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MULL",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ASMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UMULL",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AUMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "DIV",
+ argLen: 2,
+ asm: arm64.ASDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UDIV",
+ argLen: 2,
+ asm: arm64.AUDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ asm: arm64.ASDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UDIVW",
+ argLen: 2,
+ asm: arm64.AUDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOD",
+ argLen: 2,
+ asm: arm64.AREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UMOD",
+ argLen: 2,
+ asm: arm64.AUREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MODW",
+ argLen: 2,
+ asm: arm64.AREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UMODW",
+ argLen: 2,
+ asm: arm64.AUREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FADDD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ asm: arm64.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBD",
+ argLen: 2,
+ asm: arm64.AFSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMULS",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFNMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFNMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ asm: arm64.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVD",
+ argLen: 2,
+ asm: arm64.AFDIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BIC",
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EON",
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORN",
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredMuluhilo",
+ argLen: 2,
+ resultNotInArgs: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MVN",
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGSflags",
+ argLen: 1,
+ asm: arm64.ANEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NGCzerocarry",
+ argLen: 1,
+ asm: arm64.ANGC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FABSD",
+ argLen: 1,
+ asm: arm64.AFABSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGS",
+ argLen: 1,
+ asm: arm64.AFNEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGD",
+ argLen: 1,
+ asm: arm64.AFNEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSQRTD",
+ argLen: 1,
+ asm: arm64.AFSQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "REV",
+ argLen: 1,
+ asm: arm64.AREV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "REVW",
+ argLen: 1,
+ asm: arm64.AREVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "REV16W",
+ argLen: 1,
+ asm: arm64.AREV16W,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RBIT",
+ argLen: 1,
+ asm: arm64.ARBIT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RBITW",
+ argLen: 1,
+ asm: arm64.ARBITW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CLZ",
+ argLen: 1,
+ asm: arm64.ACLZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CLZW",
+ argLen: 1,
+ asm: arm64.ACLZW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "VCNT",
+ argLen: 1,
+ asm: arm64.AVCNT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "VUADDLV",
+ argLen: 1,
+ asm: arm64.AVUADDLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "LoweredRound32F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "LoweredRound64F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMADDS",
+ argLen: 3,
+ asm: arm64.AFMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMADDD",
+ argLen: 3,
+ asm: arm64.AFMADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMADDS",
+ argLen: 3,
+ asm: arm64.AFNMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMADDD",
+ argLen: 3,
+ asm: arm64.AFNMADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMSUBS",
+ argLen: 3,
+ asm: arm64.AFMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMSUBD",
+ argLen: 3,
+ asm: arm64.AFMSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMSUBS",
+ argLen: 3,
+ asm: arm64.AFNMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMSUBD",
+ argLen: 3,
+ asm: arm64.AFNMSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MADD",
+ argLen: 3,
+ asm: arm64.AMADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MADDW",
+ argLen: 3,
+ asm: arm64.AMADDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MSUB",
+ argLen: 3,
+ asm: arm64.AMSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MSUBW",
+ argLen: 3,
+ asm: arm64.AMSUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: arm64.ALSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SLLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ALSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: arm64.ALSR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SRLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ALSR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: arm64.AASR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SRAconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AASR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ROR",
+ argLen: 2,
+ asm: arm64.AROR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RORW",
+ argLen: 2,
+ asm: arm64.ARORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AROR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RORWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ARORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EXTRconst",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEXTR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EXTRWconst",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEXTRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: arm64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMN",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNW",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ACMNW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ACMNW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TST",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTW",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ATSTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ATSTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "FCMPS",
+ argLen: 2,
+ asm: arm64.AFCMPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCMPD",
+ argLen: 2,
+ asm: arm64.AFCMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCMPS0",
+ argLen: 1,
+ asm: arm64.AFCMPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCMPD0",
+ argLen: 1,
+ asm: arm64.AFCMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MVNshiftLL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MVNshiftRL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MVNshiftRA",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGshiftLL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGshiftRL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGshiftRA",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BICshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BICshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BICshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EONshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EONshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EONshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORNshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORNshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORNshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CMPshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "BFI",
+ auxType: auxARM64BitField,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm64.ABFI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BFXIL",
+ auxType: auxARM64BitField,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm64.ABFXIL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SBFIZ",
+ auxType: auxARM64BitField,
+ argLen: 1,
+ asm: arm64.ASBFIZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SBFX",
+ auxType: auxARM64BitField,
+ argLen: 1,
+ asm: arm64.ASBFX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UBFIZ",
+ auxType: auxARM64BitField,
+ argLen: 1,
+ asm: arm64.AUBFIZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UBFX",
+ auxType: auxARM64BitField,
+ argLen: 1,
+ asm: arm64.AUBFX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FMOVSconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517632}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FMOVSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDloadidx",
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWUloadidx",
+ argLen: 3,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx",
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHUloadidx",
+ argLen: 3,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx",
+ argLen: 3,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBUloadidx",
+ argLen: 3,
+ asm: arm64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FMOVSloadidx",
+ argLen: 3,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDloadidx",
+ argLen: 3,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx2",
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHUloadidx2",
+ argLen: 3,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx4",
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWUloadidx4",
+ argLen: 3,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDloadidx8",
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "STP",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.ASTP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx",
+ argLen: 4,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx",
+ argLen: 4,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ argLen: 4,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstoreidx",
+ argLen: 4,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVSstoreidx",
+ argLen: 4,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDstoreidx",
+ argLen: 4,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx2",
+ argLen: 4,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx4",
+ argLen: 4,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstoreidx8",
+ argLen: 4,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVQstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.ASTP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVBstorezeroidx",
+ argLen: 3,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezeroidx",
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezeroidx",
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezeroidx",
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezeroidx2",
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezeroidx4",
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezeroidx8",
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVDgpfp",
+ argLen: 1,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDfpgp",
+ argLen: 1,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FMOVSgpfp",
+ argLen: 1,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVSfpgp",
+ argLen: 1,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: arm64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWUreg",
+ argLen: 1,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDreg",
+ argLen: 1,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SCVTFWS",
+ argLen: 1,
+ asm: arm64.ASCVTFWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SCVTFWD",
+ argLen: 1,
+ asm: arm64.ASCVTFWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "UCVTFWS",
+ argLen: 1,
+ asm: arm64.AUCVTFWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "UCVTFWD",
+ argLen: 1,
+ asm: arm64.AUCVTFWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SCVTFS",
+ argLen: 1,
+ asm: arm64.ASCVTFS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SCVTFD",
+ argLen: 1,
+ asm: arm64.ASCVTFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "UCVTFS",
+ argLen: 1,
+ asm: arm64.AUCVTFS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "UCVTFD",
+ argLen: 1,
+ asm: arm64.AUCVTFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTZSSW",
+ argLen: 1,
+ asm: arm64.AFCVTZSSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZSDW",
+ argLen: 1,
+ asm: arm64.AFCVTZSDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZUSW",
+ argLen: 1,
+ asm: arm64.AFCVTZUSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZUDW",
+ argLen: 1,
+ asm: arm64.AFCVTZUDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZSS",
+ argLen: 1,
+ asm: arm64.AFCVTZSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZSD",
+ argLen: 1,
+ asm: arm64.AFCVTZSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZUS",
+ argLen: 1,
+ asm: arm64.AFCVTZUS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZUD",
+ argLen: 1,
+ asm: arm64.AFCVTZUD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTSD",
+ argLen: 1,
+ asm: arm64.AFCVTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTDS",
+ argLen: 1,
+ asm: arm64.AFCVTDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTAD",
+ argLen: 1,
+ asm: arm64.AFRINTAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTMD",
+ argLen: 1,
+ asm: arm64.AFRINTMD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTND",
+ argLen: 1,
+ asm: arm64.AFRINTND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTPD",
+ argLen: 1,
+ asm: arm64.AFRINTPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTZD",
+ argLen: 1,
+ asm: arm64.AFRINTZD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CSEL",
+ auxType: auxCCop,
+ argLen: 3,
+ asm: arm64.ACSEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CSEL0",
+ auxType: auxCCop,
+ argLen: 2,
+ asm: arm64.ACSEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 67108864}, // R26
+ {0, 1744568319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 SP
+ },
+ clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "Equal",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotLessThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotLessEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotGreaterThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotGreaterEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ },
+ clobbers: 538116096, // R16 R17 R20 R30
+ },
+ },
+ {
+ name: "LoweredZero",
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65536}, // R16
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ clobbers: 65536, // R16
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2097152}, // R21
+ {1, 1048576}, // R20
+ },
+ clobbers: 607322112, // R16 R17 R20 R21 R26 R30
+ },
+ },
+ {
+ name: "LoweredMove",
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 131072}, // R17
+ {1, 65536}, // R16
+ {2, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ clobbers: 196608, // R16 R17
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 67108864}, // R26
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FlagConstant",
+ auxType: auxFlagConstant,
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LDAR",
+ argLen: 2,
+ faultOnNilArg0: true,
+ asm: arm64.ALDAR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LDARB",
+ argLen: 2,
+ faultOnNilArg0: true,
+ asm: arm64.ALDARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LDARW",
+ argLen: 2,
+ faultOnNilArg0: true,
+ asm: arm64.ALDARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "STLRB",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: arm64.ASTLRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "STLR",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: arm64.ASTLR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "STLRW",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: arm64.ASTLRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64Variant",
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32Variant",
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd8",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr8",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd8Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd32Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr8Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr32Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ clobbers: 9223372035244359680, // R16 R17 R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // R0
+ {1, 2}, // R1
+ },
+ },
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.AADDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: mips.ASUBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASUBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ clobbers: 105553116266496, // HI LO
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MULT",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 35184372088832}, // HI
+ {1, 70368744177664}, // LO
+ },
+ },
+ },
+ {
+ name: "MULTU",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 35184372088832}, // HI
+ {1, 70368744177664}, // LO
+ },
+ },
+ },
+ {
+ name: "DIV",
+ argLen: 2,
+ asm: mips.ADIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 35184372088832}, // HI
+ {1, 70368744177664}, // LO
+ },
+ },
+ },
+ {
+ name: "DIVU",
+ argLen: 2,
+ asm: mips.ADIVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 35184372088832}, // HI
+ {1, 70368744177664}, // LO
+ },
+ },
+ },
+ {
+ name: "ADDF",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "ADDD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SUBF",
+ argLen: 2,
+ asm: mips.ASUBF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SUBD",
+ argLen: 2,
+ asm: mips.ASUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MULF",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MULD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "DIVF",
+ argLen: 2,
+ asm: mips.ADIVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: mips.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "NOR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "NORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "NEGF",
+ argLen: 1,
+ asm: mips.ANEGF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "NEGD",
+ argLen: 1,
+ asm: mips.ANEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SQRTD",
+ argLen: 1,
+ asm: mips.ASQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: mips.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: mips.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SRLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: mips.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SRAconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "CLZ",
+ argLen: 1,
+ asm: mips.ACLZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGT",
+ argLen: 2,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTzero",
+ argLen: 1,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTU",
+ argLen: 2,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTUconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTUzero",
+ argLen: 1,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "CMPEQF",
+ argLen: 2,
+ asm: mips.ACMPEQF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPEQD",
+ argLen: 2,
+ asm: mips.ACMPEQD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPGEF",
+ argLen: 2,
+ asm: mips.ACMPGEF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPGED",
+ argLen: 2,
+ asm: mips.ACMPGED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPGTF",
+ argLen: 2,
+ asm: mips.ACMPGTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPGTD",
+ argLen: 2,
+ asm: mips.ACMPGTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVWconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVFconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVWaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140737555464192}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVFload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVFstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: mips.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: mips.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVWnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "CMOVZ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: mips.ACMOVZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "CMOVZzero",
+ argLen: 2,
+ resultInArg0: true,
+ asm: mips.ACMOVZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVWF",
+ argLen: 1,
+ asm: mips.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVWD",
+ argLen: 1,
+ asm: mips.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "TRUNCFW",
+ argLen: 1,
+ asm: mips.ATRUNCFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "TRUNCDW",
+ argLen: 1,
+ asm: mips.ATRUNCDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVFD",
+ argLen: 1,
+ asm: mips.AMOVFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVDF",
+ argLen: 1,
+ asm: mips.AMOVDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4194304}, // R22
+ {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31
+ },
+ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
+ name: "LoweredAtomicLoad8",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStorezero",
+ argLen: 2,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAddconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {2, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt32,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt32,
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ clobbers: 6, // R1 R2
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ },
+ },
+ {
+ name: "FPFlagTrue",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "FPFlagFalse",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4194304}, // R22
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ {1, 2097152}, // R21
+ },
+ clobbers: 140737219919872, // R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 8}, // R3
+ {1, 16}, // R4
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendA",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // R5
+ {1, 8}, // R3
+ {2, 16}, // R4
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendB",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // R5
+ {1, 4}, // R2
+ {2, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendC",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // R5
+ {1, 2}, // R1
+ {2, 4}, // R2
+ },
+ },
+ },
+
+ {
+ name: "ADDV",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "ADDVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.AADDVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268435454}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SUBV",
+ argLen: 2,
+ asm: mips.ASUBVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SUBVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASUBVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MULV",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504606846976}, // HI
+ {1, 2305843009213693952}, // LO
+ },
+ },
+ },
+ {
+ name: "MULVU",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504606846976}, // HI
+ {1, 2305843009213693952}, // LO
+ },
+ },
+ },
+ {
+ name: "DIVV",
+ argLen: 2,
+ asm: mips.ADIVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504606846976}, // HI
+ {1, 2305843009213693952}, // LO
+ },
+ },
+ },
+ {
+ name: "DIVVU",
+ argLen: 2,
+ asm: mips.ADIVVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504606846976}, // HI
+ {1, 2305843009213693952}, // LO
+ },
+ },
+ },
+ {
+ name: "ADDF",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "ADDD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SUBF",
+ argLen: 2,
+ asm: mips.ASUBF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SUBD",
+ argLen: 2,
+ asm: mips.ASUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MULF",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MULD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "DIVF",
+ argLen: 2,
+ asm: mips.ADIVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: mips.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "NOR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "NORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "NEGV",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "NEGF",
+ argLen: 1,
+ asm: mips.ANEGF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "NEGD",
+ argLen: 1,
+ asm: mips.ANEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SQRTD",
+ argLen: 1,
+ asm: mips.ASQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SLLV",
+ argLen: 2,
+ asm: mips.ASLLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SLLVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASLLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SRLV",
+ argLen: 2,
+ asm: mips.ASRLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SRLVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASRLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SRAV",
+ argLen: 2,
+ asm: mips.ASRAV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SRAVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASRAV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SGT",
+ argLen: 2,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SGTconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SGTU",
+ argLen: 2,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SGTUconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "CMPEQF",
+ argLen: 2,
+ asm: mips.ACMPEQF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPEQD",
+ argLen: 2,
+ asm: mips.ACMPEQD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGEF",
+ argLen: 2,
+ asm: mips.ACMPGEF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGED",
+ argLen: 2,
+ asm: mips.ACMPGED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGTF",
+ argLen: 2,
+ asm: mips.ACMPGTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGTD",
+ argLen: 2,
+ asm: mips.ACMPGTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVFconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018460942336}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVVload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVFload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVVstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVFstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVVstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: mips.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: mips.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWUreg",
+ argLen: 1,
+ asm: mips.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVVreg",
+ argLen: 1,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVVnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWF",
+ argLen: 1,
+ asm: mips.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVWD",
+ argLen: 1,
+ asm: mips.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVF",
+ argLen: 1,
+ asm: mips.AMOVVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVD",
+ argLen: 1,
+ asm: mips.AMOVVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCFW",
+ argLen: 1,
+ asm: mips.ATRUNCFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCDW",
+ argLen: 1,
+ asm: mips.ATRUNCDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCFV",
+ argLen: 1,
+ asm: mips.ATRUNCFV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCDV",
+ argLen: 1,
+ asm: mips.ATRUNCDV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVFD",
+ argLen: 1,
+ asm: mips.AMOVFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDF",
+ argLen: 1,
+ asm: mips.AMOVDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4194304}, // R22
+ {0, 201326590}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP R31
+ },
+ clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ clobbers: 134217730, // R1 R31
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ },
+ clobbers: 134217734, // R1 R2 R31
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ {2, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ clobbers: 6, // R1 R2
+ },
+ },
+ {
+ name: "LoweredAtomicLoad8",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad64",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore64",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStorezero32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStorezero64",
+ argLen: 2,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAddconst32",
+ auxType: auxInt32,
+ argLen: 2,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAddconst64",
+ auxType: auxInt64,
+ argLen: 2,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ },
+ },
+ {
+ name: "FPFlagTrue",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "FPFlagFalse",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4194304}, // R22
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ {1, 2097152}, // R21
+ },
+ clobbers: 4611686018293170176, // R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 8}, // R3
+ {1, 16}, // R4
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FADD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: ppc64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SUBFCconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASUBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FSUB",
+ argLen: 2,
+ asm: ppc64.AFSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ asm: ppc64.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "MULLD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULLW",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULLDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULLWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MADDLD",
+ argLen: 3,
+ asm: ppc64.AMADDLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHW",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHDU",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHWU",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredMuluhilo",
+ argLen: 2,
+ resultNotInArgs: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMUL",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMADD",
+ argLen: 3,
+ asm: ppc64.AFMADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMADDS",
+ argLen: 3,
+ asm: ppc64.AFMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMSUB",
+ argLen: 3,
+ asm: ppc64.AFMSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMSUBS",
+ argLen: 3,
+ asm: ppc64.AFMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "SRAD",
+ argLen: 2,
+ asm: ppc64.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRAW",
+ argLen: 2,
+ asm: ppc64.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRD",
+ argLen: 2,
+ asm: ppc64.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRW",
+ argLen: 2,
+ asm: ppc64.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLD",
+ argLen: 2,
+ asm: ppc64.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLW",
+ argLen: 2,
+ asm: ppc64.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ROTL",
+ argLen: 2,
+ asm: ppc64.AROTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ROTLW",
+ argLen: 2,
+ asm: ppc64.AROTLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLDICL",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ARLDICL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CLRLSLWI",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACLRLSLWI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CLRLSLDI",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACLRLSLDI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAdd64Carry",
+ argLen: 3,
+ resultNotInArgs: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRADconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRAWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ROTLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AROTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ROTLWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AROTLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "EXTSWSLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AEXTSWSLI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLWINM",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ARLWNM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLWNM",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: ppc64.ARLWNM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLWMI",
+ auxType: auxInt64,
+ argLen: 2,
+ resultInArg0: true,
+ asm: ppc64.ARLWMI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTLZD",
+ argLen: 1,
+ clobberFlags: true,
+ asm: ppc64.ACNTLZD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTLZW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: ppc64.ACNTLZW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTTZD",
+ argLen: 1,
+ asm: ppc64.ACNTTZD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTTZW",
+ argLen: 1,
+ asm: ppc64.ACNTTZW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "POPCNTD",
+ argLen: 1,
+ asm: ppc64.APOPCNTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "POPCNTW",
+ argLen: 1,
+ asm: ppc64.APOPCNTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "POPCNTB",
+ argLen: 1,
+ asm: ppc64.APOPCNTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FDIV",
+ argLen: 2,
+ asm: ppc64.AFDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ asm: ppc64.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: ppc64.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ asm: ppc64.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "DIVDU",
+ argLen: 2,
+ asm: ppc64.ADIVDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ asm: ppc64.ADIVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MODUD",
+ argLen: 2,
+ asm: ppc64.AMODUD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MODSD",
+ argLen: 2,
+ asm: ppc64.AMODSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MODUW",
+ argLen: 2,
+ asm: ppc64.AMODUW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MODSW",
+ argLen: 2,
+ asm: ppc64.AMODSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FCTIDZ",
+ argLen: 1,
+ asm: ppc64.AFCTIDZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCTIWZ",
+ argLen: 1,
+ asm: ppc64.AFCTIWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCFID",
+ argLen: 1,
+ asm: ppc64.AFCFID,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCFIDS",
+ argLen: 1,
+ asm: ppc64.AFCFIDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FRSP",
+ argLen: 1,
+ asm: ppc64.AFRSP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "MFVSRD",
+ argLen: 1,
+ asm: ppc64.AMFVSRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MTVSRD",
+ argLen: 1,
+ asm: ppc64.AMTVSRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDN",
+ argLen: 2,
+ asm: ppc64.AANDN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AANDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ORN",
+ argLen: 2,
+ asm: ppc64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ORCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AORCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "NOR",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "XORCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AXORCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "EQV",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AEQV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ asm: ppc64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FNEG",
+ argLen: 1,
+ asm: ppc64.AFNEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FSQRT",
+ argLen: 1,
+ asm: ppc64.AFSQRT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FSQRTS",
+ argLen: 1,
+ asm: ppc64.AFSQRTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FFLOOR",
+ argLen: 1,
+ asm: ppc64.AFRIM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCEIL",
+ argLen: 1,
+ asm: ppc64.AFRIP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FTRUNC",
+ argLen: 1,
+ asm: ppc64.AFRIZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FROUND",
+ argLen: 1,
+ asm: ppc64.AFRIN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FABS",
+ argLen: 1,
+ asm: ppc64.AFABS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FNABS",
+ argLen: 1,
+ asm: ppc64.AFNABS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCPSGN",
+ argLen: 2,
+ asm: ppc64.AFCPSGN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ asm: ppc64.AANDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDCCconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AANDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBZreg",
+ argLen: 1,
+ asm: ppc64.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHZreg",
+ argLen: 1,
+ asm: ppc64.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWZreg",
+ argLen: 1,
+ asm: ppc64.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBZloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHZloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWZloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHBRloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDBRloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDloadidx",
+ argLen: 3,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMOVSloadidx",
+ argLen: 3,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "MOVDBRstore",
+ auxType: auxSym,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRstore",
+ auxType: auxSym,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHBRstore",
+ auxType: auxSym,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMOVSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDstoreidx",
+ argLen: 4,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVSstoreidx",
+ argLen: 4,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHBRstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDBRstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMOVSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCMPU",
+ argLen: 2,
+ asm: ppc64.AFCMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: ppc64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPU",
+ argLen: 2,
+ asm: ppc64.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: ppc64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPWU",
+ argLen: 2,
+ asm: ppc64.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPUconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPWUconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ISEL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: ppc64.AISEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ISELB",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: ppc64.AISEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "Equal",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "NotEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FLessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FLessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "GreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FGreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "GreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FGreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 2048}, // R11
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 2147483648, // R31
+ },
+ },
+ {
+ name: "LoweredRound32F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "LoweredRound64F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4096}, // R12
+ {1, 2048}, // R11
+ },
+ clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4096}, // R12
+ },
+ clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ },
+ clobbers: 1048576, // R20
+ },
+ },
+ {
+ name: "LoweredZeroShort",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredQuadZeroShort",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredQuadZero",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ },
+ clobbers: 1048576, // R20
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ {1, 2097152}, // R21
+ },
+ clobbers: 3145728, // R20 R21
+ },
+ },
+ {
+ name: "LoweredMoveShort",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredQuadMove",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ {1, 2097152}, // R21
+ },
+ clobbers: 3145728, // R20 R21
+ },
+ },
+ {
+ name: "LoweredQuadMoveShort",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore8",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore32",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore64",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad8",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad32",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad64",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoadPtr",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64",
+ argLen: 3,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ argLen: 3,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ argLen: 3,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ auxType: auxInt64,
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ auxType: auxInt64,
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ {1, 2097152}, // R21
+ },
+ clobbers: 576460746931312640, // R11 R12 R18 R19 R22 R23 R24 R25 R26 R27 R28 R29 R31 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // R5
+ {1, 64}, // R6
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // R4
+ {1, 32}, // R5
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 8}, // R3
+ {1, 16}, // R4
+ },
+ },
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ADDI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AADDI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ADDIW",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AADDIW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ asm: riscv.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "NEGW",
+ argLen: 1,
+ asm: riscv.ANEGW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: riscv.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SUBW",
+ argLen: 2,
+ asm: riscv.ASUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MULW",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMULW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MULH",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMULH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MULHU",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMULHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIV",
+ argLen: 2,
+ asm: riscv.ADIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIVU",
+ argLen: 2,
+ asm: riscv.ADIVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ asm: riscv.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIVUW",
+ argLen: 2,
+ asm: riscv.ADIVUW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REM",
+ argLen: 2,
+ asm: riscv.AREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REMU",
+ argLen: 2,
+ asm: riscv.AREMU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REMW",
+ argLen: 2,
+ asm: riscv.AREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REMUW",
+ argLen: 2,
+ asm: riscv.AREMUW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymRdWr,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBconst",
+ auxType: auxInt8,
+ argLen: 0,
+ rematerializeable: true,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHconst",
+ auxType: auxInt16,
+ argLen: 0,
+ rematerializeable: true,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDreg",
+ argLen: 1,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: riscv.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: riscv.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWUreg",
+ argLen: 1,
+ asm: riscv.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: riscv.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: riscv.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: riscv.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLLI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASLLI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRAI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASRAI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRLI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASRLI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "XORI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AXORI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ORI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AORI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ANDI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AANDI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "NOT",
+ argLen: 1,
+ asm: riscv.ANOT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SEQZ",
+ argLen: 1,
+ asm: riscv.ASEQZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SNEZ",
+ argLen: 1,
+ asm: riscv.ASNEZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLT",
+ argLen: 2,
+ asm: riscv.ASLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLTI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASLTI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLTU",
+ argLen: 2,
+ asm: riscv.ASLTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLTIU",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASLTIU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVconvert",
+ argLen: 2,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ call: true,
+ reg: regInfo{
+ clobbers: 9223372035781033972, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 524288}, // X20
+ {0, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ clobbers: 9223372035781033972, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ clobbers: 9223372035781033972, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 512}, // X10
+ },
+ clobbers: 512, // X10
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1024}, // X11
+ {1, 512}, // X10
+ },
+ clobbers: 1536, // X10 X11
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ clobbers: 16, // X5
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 32}, // X6
+ {2, 1006632884}, // X3 X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ clobbers: 112, // X5 X6 X7
+ },
+ },
+ {
+ name: "LoweredAtomicLoad8",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad64",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore64",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {2, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {2, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632950}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 524288}, // X20
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 32}, // X6
+ },
+ clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 64}, // X7
+ {1, 134217728}, // X28
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // X6
+ {1, 64}, // X7
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 32}, // X6
+ },
+ },
+ },
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ asm: riscv.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ asm: riscv.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSQRTS",
+ argLen: 1,
+ asm: riscv.AFSQRTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGS",
+ argLen: 1,
+ asm: riscv.AFNEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMVSX",
+ argLen: 1,
+ asm: riscv.AFMVSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTSW",
+ argLen: 1,
+ asm: riscv.AFCVTSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTSL",
+ argLen: 1,
+ asm: riscv.AFCVTSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTWS",
+ argLen: 1,
+ asm: riscv.AFCVTWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FCVTLS",
+ argLen: 1,
+ asm: riscv.AFCVTLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FMOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FEQS",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFEQS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FNES",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFNES,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLTS",
+ argLen: 2,
+ asm: riscv.AFLTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLES",
+ argLen: 2,
+ asm: riscv.AFLES,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FADDD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBD",
+ argLen: 2,
+ asm: riscv.AFSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVD",
+ argLen: 2,
+ asm: riscv.AFDIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSQRTD",
+ argLen: 1,
+ asm: riscv.AFSQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGD",
+ argLen: 1,
+ asm: riscv.AFNEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMVDX",
+ argLen: 1,
+ asm: riscv.AFMVDX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTDW",
+ argLen: 1,
+ asm: riscv.AFCVTDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTDL",
+ argLen: 1,
+ asm: riscv.AFCVTDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTWD",
+ argLen: 1,
+ asm: riscv.AFCVTWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FCVTLD",
+ argLen: 1,
+ asm: riscv.AFCVTLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FCVTDS",
+ argLen: 1,
+ asm: riscv.AFCVTDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTSD",
+ argLen: 1,
+ asm: riscv.AFCVTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408758}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FEQD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFEQD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FNED",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFNED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLTD",
+ argLen: 2,
+ asm: riscv.AFLTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLED",
+ argLen: 2,
+ asm: riscv.AFLED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632948}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FADD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AFADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: s390x.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FSUB",
+ argLen: 2,
+ resultInArg0: true,
+ asm: s390x.AFSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMUL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AFMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: s390x.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FDIV",
+ argLen: 2,
+ resultInArg0: true,
+ asm: s390x.AFDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FNEGS",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AFNEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FNEG",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AFNEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMADDS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.AFMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMADD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.AFMADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMSUBS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.AFMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMSUB",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.AFMSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LPDFR",
+ argLen: 1,
+ asm: s390x.ALPDFR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LNDFR",
+ argLen: 1,
+ asm: s390x.ALNDFR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CPSDR",
+ argLen: 2,
+ asm: s390x.ACPSDR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FIDBR",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: s390x.AFIDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDW",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AADDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AADDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AADDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ clobberFlags: true,
+ asm: s390x.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBW",
+ argLen: 2,
+ clobberFlags: true,
+ asm: s390x.ASUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ASUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.ASUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLW",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULHD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULHD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULHDU",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULHDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DIVDU",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ADIVDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ADIVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MODD",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMODD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MODW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMODW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MODDU",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMODDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MODWU",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMODWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDW",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AANDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AANDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AANDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORW",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORW",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AXORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AXORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AXORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDC",
+ argLen: 2,
+ commutative: true,
+ asm: s390x.AADDC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDCconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: s390x.AADDC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDE",
+ argLen: 3,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AADDE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBC",
+ argLen: 2,
+ asm: s390x.ASUBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.ASUBE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: s390x.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: s390x.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPU",
+ argLen: 2,
+ asm: s390x.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPWU",
+ argLen: 2,
+ asm: s390x.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: s390x.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: s390x.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPUconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: s390x.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPWUconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: s390x.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "FCMPS",
+ argLen: 2,
+ asm: s390x.ACEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FCMP",
+ argLen: 2,
+ asm: s390x.AFCMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LTDBR",
+ argLen: 1,
+ asm: s390x.ALTDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LTEBR",
+ argLen: 1,
+ asm: s390x.ALTEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SLD",
+ argLen: 2,
+ asm: s390x.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLW",
+ argLen: 2,
+ asm: s390x.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLDconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLWconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRD",
+ argLen: 2,
+ asm: s390x.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRW",
+ argLen: 2,
+ asm: s390x.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRDconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRWconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAD",
+ argLen: 2,
+ clobberFlags: true,
+ asm: s390x.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAW",
+ argLen: 2,
+ clobberFlags: true,
+ asm: s390x.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRADconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAWconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RLLG",
+ argLen: 2,
+ asm: s390x.ARLLG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RLL",
+ argLen: 2,
+ asm: s390x.ARLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RLLconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ARLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RXSBG",
+ auxType: auxS390XRotateParams,
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ARXSBG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RISBGZ",
+ auxType: auxS390XRotateParams,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ARISBGZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "NEGW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ANEGW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "NOT",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "NOTW",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "FSQRT",
+ argLen: 1,
+ asm: s390x.AFSQRT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LOCGR",
+ auxType: auxS390XCCMask,
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.ALOCGR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBZreg",
+ argLen: 1,
+ asm: s390x.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHZreg",
+ argLen: 1,
+ asm: s390x.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWZreg",
+ argLen: 1,
+ asm: s390x.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LDGR",
+ argLen: 1,
+ asm: s390x.ALDGR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LGDR",
+ argLen: 1,
+ asm: s390x.ALGDR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CFDBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACFDBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CGDBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACGDBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CFEBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACFEBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CGEBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACGEBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CEFBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACEFBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDFBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACDFBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CEGBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACEGBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDGBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACDGBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CLFEBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACLFEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CLFDBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACLFDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CLGEBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACLGEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CLGDBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACLGDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CELFBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACELFBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDLFBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACDLFBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CELGBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACELGBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDLGBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACDLGBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LEDBR",
+ argLen: 1,
+ asm: s390x.ALEDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LDEBR",
+ argLen: 1,
+ asm: s390x.ALDEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymRead,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295000064}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDaddridx",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymRead,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295000064}, // SP SB
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWBR",
+ argLen: 1,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDBR",
+ argLen: 1,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVHBRstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWBRstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDBRstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MVC",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymNone,
+ asm: s390x.AMVC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVBZloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHZloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWZloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHBRloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWBRloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDBRloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVHBRstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWBRstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDBRstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ },
+ },
+ {
+ name: "CLEAR",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ACLEAR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4096}, // R12
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredGetG",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4096}, // R12
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LoweredRound32F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LoweredRound64F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ clobbers: 4294918144, // R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // R0
+ {1, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagOV",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "SYNC",
+ argLen: 1,
+ asm: s390x.ASYNC,
+ reg: regInfo{},
+ },
+ {
+ name: "MOVBZatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWZatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBatomicstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWatomicstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDatomicstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LAA",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ALAA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LAAG",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ALAAG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "AddTupleFirst32",
+ argLen: 2,
+ reg: regInfo{},
+ },
+ {
+ name: "AddTupleFirst64",
+ argLen: 2,
+ reg: regInfo{},
+ },
+ {
+ name: "LAN",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LANfloor",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LAO",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAO,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LAOfloor",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAO,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ACS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1}, // R0
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 1, // R0
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ACSG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1}, // R0
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 1, // R0
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ACS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // R0
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ACSG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // R0
+ },
+ },
+ },
+ {
+ name: "FLOGR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AFLOGR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ clobbers: 2, // R1
+ outputs: []outputInfo{
+ {0, 1}, // R0
+ },
+ },
+ },
+ {
+ name: "POPCNT",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.APOPCNT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MLGR",
+ argLen: 2,
+ asm: s390x.AMLGR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 8}, // R3
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "SumBytes2",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "SumBytes4",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "SumBytes8",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "STMG2",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STMG3",
+ auxType: auxSymOff,
+ argLen: 5,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {3, 8}, // R3
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STMG4",
+ auxType: auxSymOff,
+ argLen: 6,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {3, 8}, // R3
+ {4, 16}, // R4
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STM2",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMY,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STM3",
+ auxType: auxSymOff,
+ argLen: 5,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMY,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {3, 8}, // R3
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STM4",
+ auxType: auxSymOff,
+ argLen: 6,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMY,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {3, 8}, // R3
+ {4, 16}, // R4
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 6, // R1 R2
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 2, // R1
+ },
+ },
+
+ {
+ name: "LoweredStaticCall",
+ auxType: auxCallOff,
+ argLen: 1,
+ call: true,
+ reg: regInfo{
+ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g
+ },
+ },
+ {
+ name: "LoweredClosureCall",
+ auxType: auxCallOff,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g
+ },
+ },
+ {
+ name: "LoweredInterCall",
+ auxType: auxCallOff,
+ argLen: 2,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g
+ },
+ },
+ {
+ name: "LoweredAddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 3,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredConvert",
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "Select",
+ argLen: 3,
+ asm: wasm.ASelect,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {2, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load8U",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load8U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load8S",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load8S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load16U",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load16U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load16S",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load16S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load32U",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load32U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load32S",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load32S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Store8",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AI64Store8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "I64Store16",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AI64Store16,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "I64Store32",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AI64Store32,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "I64Store",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AI64Store,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "F32Load",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AF32Load,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64Load",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AF64Load,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F32Store",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AF32Store,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "F64Store",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AF64Store,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "I64Const",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Const",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64Const",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "I64Eqz",
+ argLen: 1,
+ asm: wasm.AI64Eqz,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Eq",
+ argLen: 2,
+ asm: wasm.AI64Eq,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Ne",
+ argLen: 2,
+ asm: wasm.AI64Ne,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64LtS",
+ argLen: 2,
+ asm: wasm.AI64LtS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64LtU",
+ argLen: 2,
+ asm: wasm.AI64LtU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64GtS",
+ argLen: 2,
+ asm: wasm.AI64GtS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64GtU",
+ argLen: 2,
+ asm: wasm.AI64GtU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64LeS",
+ argLen: 2,
+ asm: wasm.AI64LeS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64LeU",
+ argLen: 2,
+ asm: wasm.AI64LeU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64GeS",
+ argLen: 2,
+ asm: wasm.AI64GeS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64GeU",
+ argLen: 2,
+ asm: wasm.AI64GeU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Eq",
+ argLen: 2,
+ asm: wasm.AF32Eq,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Ne",
+ argLen: 2,
+ asm: wasm.AF32Ne,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Lt",
+ argLen: 2,
+ asm: wasm.AF32Lt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Gt",
+ argLen: 2,
+ asm: wasm.AF32Gt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Le",
+ argLen: 2,
+ asm: wasm.AF32Le,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Ge",
+ argLen: 2,
+ asm: wasm.AF32Ge,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Eq",
+ argLen: 2,
+ asm: wasm.AF64Eq,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Ne",
+ argLen: 2,
+ asm: wasm.AF64Ne,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Lt",
+ argLen: 2,
+ asm: wasm.AF64Lt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Gt",
+ argLen: 2,
+ asm: wasm.AF64Gt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Le",
+ argLen: 2,
+ asm: wasm.AF64Le,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Ge",
+ argLen: 2,
+ asm: wasm.AF64Ge,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Add",
+ argLen: 2,
+ asm: wasm.AI64Add,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64AddConst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: wasm.AI64Add,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Sub",
+ argLen: 2,
+ asm: wasm.AI64Sub,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Mul",
+ argLen: 2,
+ asm: wasm.AI64Mul,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64DivS",
+ argLen: 2,
+ asm: wasm.AI64DivS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64DivU",
+ argLen: 2,
+ asm: wasm.AI64DivU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64RemS",
+ argLen: 2,
+ asm: wasm.AI64RemS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64RemU",
+ argLen: 2,
+ asm: wasm.AI64RemU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64And",
+ argLen: 2,
+ asm: wasm.AI64And,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Or",
+ argLen: 2,
+ asm: wasm.AI64Or,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Xor",
+ argLen: 2,
+ asm: wasm.AI64Xor,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Shl",
+ argLen: 2,
+ asm: wasm.AI64Shl,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64ShrS",
+ argLen: 2,
+ asm: wasm.AI64ShrS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64ShrU",
+ argLen: 2,
+ asm: wasm.AI64ShrU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Neg",
+ argLen: 1,
+ asm: wasm.AF32Neg,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Add",
+ argLen: 2,
+ asm: wasm.AF32Add,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Sub",
+ argLen: 2,
+ asm: wasm.AF32Sub,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Mul",
+ argLen: 2,
+ asm: wasm.AF32Mul,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Div",
+ argLen: 2,
+ asm: wasm.AF32Div,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64Neg",
+ argLen: 1,
+ asm: wasm.AF64Neg,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Add",
+ argLen: 2,
+ asm: wasm.AF64Add,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Sub",
+ argLen: 2,
+ asm: wasm.AF64Sub,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Mul",
+ argLen: 2,
+ asm: wasm.AF64Mul,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Div",
+ argLen: 2,
+ asm: wasm.AF64Div,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "I64TruncSatF64S",
+ argLen: 1,
+ asm: wasm.AI64TruncSatF64S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64TruncSatF64U",
+ argLen: 1,
+ asm: wasm.AI64TruncSatF64U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64TruncSatF32S",
+ argLen: 1,
+ asm: wasm.AI64TruncSatF32S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64TruncSatF32U",
+ argLen: 1,
+ asm: wasm.AI64TruncSatF32U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32ConvertI64S",
+ argLen: 1,
+ asm: wasm.AF32ConvertI64S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32ConvertI64U",
+ argLen: 1,
+ asm: wasm.AF32ConvertI64U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64ConvertI64S",
+ argLen: 1,
+ asm: wasm.AF64ConvertI64S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64ConvertI64U",
+ argLen: 1,
+ asm: wasm.AF64ConvertI64U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F32DemoteF64",
+ argLen: 1,
+ asm: wasm.AF32DemoteF64,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64PromoteF32",
+ argLen: 1,
+ asm: wasm.AF64PromoteF32,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "I64Extend8S",
+ argLen: 1,
+ asm: wasm.AI64Extend8S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Extend16S",
+ argLen: 1,
+ asm: wasm.AI64Extend16S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Extend32S",
+ argLen: 1,
+ asm: wasm.AI64Extend32S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Sqrt",
+ argLen: 1,
+ asm: wasm.AF32Sqrt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F32Trunc",
+ argLen: 1,
+ asm: wasm.AF32Trunc,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F32Ceil",
+ argLen: 1,
+ asm: wasm.AF32Ceil,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F32Floor",
+ argLen: 1,
+ asm: wasm.AF32Floor,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F32Nearest",
+ argLen: 1,
+ asm: wasm.AF32Nearest,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F32Abs",
+ argLen: 1,
+ asm: wasm.AF32Abs,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F32Copysign",
+ argLen: 2,
+ asm: wasm.AF32Copysign,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Sqrt",
+ argLen: 1,
+ asm: wasm.AF64Sqrt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Trunc",
+ argLen: 1,
+ asm: wasm.AF64Trunc,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Ceil",
+ argLen: 1,
+ asm: wasm.AF64Ceil,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Floor",
+ argLen: 1,
+ asm: wasm.AF64Floor,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Nearest",
+ argLen: 1,
+ asm: wasm.AF64Nearest,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Abs",
+ argLen: 1,
+ asm: wasm.AF64Abs,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Copysign",
+ argLen: 2,
+ asm: wasm.AF64Copysign,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "I64Ctz",
+ argLen: 1,
+ asm: wasm.AI64Ctz,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Clz",
+ argLen: 1,
+ asm: wasm.AI64Clz,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I32Rotl",
+ argLen: 2,
+ asm: wasm.AI32Rotl,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Rotl",
+ argLen: 2,
+ asm: wasm.AI64Rotl,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Popcnt",
+ argLen: 1,
+ asm: wasm.AI64Popcnt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+
+ {
+ name: "Add8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "AddPtr",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Add32F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add64F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Sub8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub16",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "SubPtr",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mul8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul32F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul64F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Div32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Hmul32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Hmul32u",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Hmul64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Hmul64u",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul32uhilo",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul64uhilo",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul32uover",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul64uover",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Avg32u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Avg64u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div8u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div16u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div32u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div64u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div128u",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Mod8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod8u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod16u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod32u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod64u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "And8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "And16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "And32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "And64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Or8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Or16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Or32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Or64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Xor8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Xor16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Xor32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Xor64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Lsh8x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh8x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh8x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh8x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh16x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh16x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh16x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh16x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh32x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh32x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh32x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh32x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh64x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh64x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh64x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh64x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Eq8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Eq16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Eq32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Eq64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "EqPtr",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "EqInter",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "EqSlice",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Eq32F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Eq64F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "NeqPtr",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "NeqInter",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "NeqSlice",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Neq32F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq64F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Less8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less8U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less16",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less16U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less32U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less64U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq8U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq16",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq16U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq32U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq64U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "CondSelect",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "AndB",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "OrB",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "EqB",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "NeqB",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Not",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Com8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Com16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Com32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Com64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz8NonZero",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz16NonZero",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz32NonZero",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz64NonZero",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitLen8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitLen16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitLen32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitLen64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Bswap32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Bswap64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitRev8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitRev16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitRev32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitRev64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "PopCount8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "PopCount16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "PopCount32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "PopCount64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "RotateLeft8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "RotateLeft16",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "RotateLeft32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "RotateLeft64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sqrt",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Floor",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ceil",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Round",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "RoundToEven",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Abs",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Copysign",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "FMA",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Phi",
+ argLen: -1,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Copy",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Convert",
+ argLen: 2,
+ resultInArg0: true,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "ConstBool",
+ auxType: auxBool,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ConstString",
+ auxType: auxString,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ConstNil",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const8",
+ auxType: auxInt8,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const16",
+ auxType: auxInt16,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const32",
+ auxType: auxInt32,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const64",
+ auxType: auxInt64,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const32F",
+ auxType: auxFloat32,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const64F",
+ auxType: auxFloat64,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ConstInterface",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ConstSlice",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "InitMem",
+ argLen: 0,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Arg",
+ auxType: auxSymOff,
+ argLen: 0,
+ zeroWidth: true,
+ symEffect: SymRead,
+ generic: true,
+ },
+ {
+ name: "Addr",
+ auxType: auxSym,
+ argLen: 1,
+ symEffect: SymAddr,
+ generic: true,
+ },
+ {
+ name: "LocalAddr",
+ auxType: auxSym,
+ argLen: 2,
+ symEffect: SymAddr,
+ generic: true,
+ },
+ {
+ name: "SP",
+ argLen: 0,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "SB",
+ argLen: 0,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Load",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Dereference",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Store",
+ auxType: auxTyp,
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Move",
+ auxType: auxTypSize,
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Zero",
+ auxType: auxTypSize,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "StoreWB",
+ auxType: auxTyp,
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "MoveWB",
+ auxType: auxTypSize,
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "ZeroWB",
+ auxType: auxTypSize,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "WB",
+ auxType: auxSym,
+ argLen: 3,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "HasCPUFeature",
+ auxType: auxSym,
+ argLen: 0,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "PanicBounds",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "PanicExtend",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "ClosureCall",
+ auxType: auxCallOff,
+ argLen: 3,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "StaticCall",
+ auxType: auxCallOff,
+ argLen: 1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "InterCall",
+ auxType: auxCallOff,
+ argLen: 2,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "ClosureLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "StaticLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "InterLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "SignExt8to16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt8to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt8to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt16to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt16to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt32to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt8to16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt8to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt8to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt16to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt16to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt32to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc16to8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc32to8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc32to16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc64to8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc64to16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc64to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32to32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32to64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64to32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64to64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "CvtBoolToUint8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Round32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Round64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "IsNonNil",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "IsInBounds",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "IsSliceInBounds",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "NilCheck",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "GetG",
+ argLen: 1,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "GetClosurePtr",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "GetCallerPC",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "GetCallerSP",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "PtrIndex",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "OffPtr",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SliceMake",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "SlicePtr",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SliceLen",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SliceCap",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ComplexMake",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "ComplexReal",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ComplexImag",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StringMake",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "StringPtr",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StringLen",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "IMake",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "ITab",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "IData",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StructMake0",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "StructMake1",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StructMake2",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "StructMake3",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "StructMake4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "StructSelect",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ArrayMake0",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ArrayMake1",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ArraySelect",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StoreReg",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "LoadReg",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "FwdRef",
+ auxType: auxSym,
+ argLen: 0,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "Unknown",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "VarDef",
+ auxType: auxSym,
+ argLen: 1,
+ zeroWidth: true,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "VarKill",
+ auxType: auxSym,
+ argLen: 1,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "VarLive",
+ auxType: auxSym,
+ argLen: 1,
+ zeroWidth: true,
+ symEffect: SymRead,
+ generic: true,
+ },
+ {
+ name: "KeepAlive",
+ argLen: 2,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "InlMark",
+ auxType: auxInt32,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Int64Make",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Int64Hi",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Int64Lo",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Add32carry",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add32withcarry",
+ argLen: 3,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Sub32carry",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub32withcarry",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Add64carry",
+ argLen: 3,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Sub64borrow",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Signmask",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Zeromask",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Slicemask",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SpectreIndex",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "SpectreSliceIndex",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Cvt32Uto32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Uto64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto32U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto32U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Uto32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Uto64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto64U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto64U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Select0",
+ argLen: 1,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Select1",
+ argLen: 1,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "SelectN",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SelectNAddr",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "MakeResult",
+ argLen: -1,
+ generic: true,
+ },
+ {
+ name: "AtomicLoad8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoad32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoad64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoadPtr",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoadAcq32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoadAcq64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicStore8",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStore32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStore64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStorePtrNoWB",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStoreRel32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStoreRel64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicExchange32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicExchange64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAdd32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAdd64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwap32",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwap64",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwapRel32",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd8",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicOr8",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicOr32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAdd32Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAdd64Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicExchange32Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicExchange64Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwap32Variant",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwap64Variant",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd8Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd32Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicOr8Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicOr32Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "Clobber",
+ auxType: auxSymOff,
+ argLen: 0,
+ symEffect: SymNone,
+ generic: true,
+ },
+}
+
+func (o Op) Asm() obj.As { return opcodeTable[o].asm }
+func (o Op) Scale() int16 { return int16(opcodeTable[o].scale) }
+func (o Op) String() string { return opcodeTable[o].name }
+func (o Op) UsesScratch() bool { return opcodeTable[o].usesScratch }
+func (o Op) SymEffect() SymEffect { return opcodeTable[o].symEffect }
+func (o Op) IsCall() bool { return opcodeTable[o].call }
+func (o Op) HasSideEffects() bool { return opcodeTable[o].hasSideEffects }
+func (o Op) UnsafePoint() bool { return opcodeTable[o].unsafePoint }
+
+var registers386 = [...]Register{
+ {0, x86.REG_AX, 0, "AX"},
+ {1, x86.REG_CX, 1, "CX"},
+ {2, x86.REG_DX, 2, "DX"},
+ {3, x86.REG_BX, 3, "BX"},
+ {4, x86.REGSP, -1, "SP"},
+ {5, x86.REG_BP, 4, "BP"},
+ {6, x86.REG_SI, 5, "SI"},
+ {7, x86.REG_DI, 6, "DI"},
+ {8, x86.REG_X0, -1, "X0"},
+ {9, x86.REG_X1, -1, "X1"},
+ {10, x86.REG_X2, -1, "X2"},
+ {11, x86.REG_X3, -1, "X3"},
+ {12, x86.REG_X4, -1, "X4"},
+ {13, x86.REG_X5, -1, "X5"},
+ {14, x86.REG_X6, -1, "X6"},
+ {15, x86.REG_X7, -1, "X7"},
+ {16, 0, -1, "SB"},
+}
+var gpRegMask386 = regMask(239)
+var fpRegMask386 = regMask(65280)
+var specialRegMask386 = regMask(0)
+var framepointerReg386 = int8(5)
+var linkReg386 = int8(-1)
+var registersAMD64 = [...]Register{
+ {0, x86.REG_AX, 0, "AX"},
+ {1, x86.REG_CX, 1, "CX"},
+ {2, x86.REG_DX, 2, "DX"},
+ {3, x86.REG_BX, 3, "BX"},
+ {4, x86.REGSP, -1, "SP"},
+ {5, x86.REG_BP, 4, "BP"},
+ {6, x86.REG_SI, 5, "SI"},
+ {7, x86.REG_DI, 6, "DI"},
+ {8, x86.REG_R8, 7, "R8"},
+ {9, x86.REG_R9, 8, "R9"},
+ {10, x86.REG_R10, 9, "R10"},
+ {11, x86.REG_R11, 10, "R11"},
+ {12, x86.REG_R12, 11, "R12"},
+ {13, x86.REG_R13, 12, "R13"},
+ {14, x86.REG_R14, 13, "R14"},
+ {15, x86.REG_R15, 14, "R15"},
+ {16, x86.REG_X0, -1, "X0"},
+ {17, x86.REG_X1, -1, "X1"},
+ {18, x86.REG_X2, -1, "X2"},
+ {19, x86.REG_X3, -1, "X3"},
+ {20, x86.REG_X4, -1, "X4"},
+ {21, x86.REG_X5, -1, "X5"},
+ {22, x86.REG_X6, -1, "X6"},
+ {23, x86.REG_X7, -1, "X7"},
+ {24, x86.REG_X8, -1, "X8"},
+ {25, x86.REG_X9, -1, "X9"},
+ {26, x86.REG_X10, -1, "X10"},
+ {27, x86.REG_X11, -1, "X11"},
+ {28, x86.REG_X12, -1, "X12"},
+ {29, x86.REG_X13, -1, "X13"},
+ {30, x86.REG_X14, -1, "X14"},
+ {31, x86.REG_X15, -1, "X15"},
+ {32, 0, -1, "SB"},
+}
+var gpRegMaskAMD64 = regMask(65519)
+var fpRegMaskAMD64 = regMask(4294901760)
+var specialRegMaskAMD64 = regMask(0)
+var framepointerRegAMD64 = int8(5)
+var linkRegAMD64 = int8(-1)
+var registersARM = [...]Register{
+ {0, arm.REG_R0, 0, "R0"},
+ {1, arm.REG_R1, 1, "R1"},
+ {2, arm.REG_R2, 2, "R2"},
+ {3, arm.REG_R3, 3, "R3"},
+ {4, arm.REG_R4, 4, "R4"},
+ {5, arm.REG_R5, 5, "R5"},
+ {6, arm.REG_R6, 6, "R6"},
+ {7, arm.REG_R7, 7, "R7"},
+ {8, arm.REG_R8, 8, "R8"},
+ {9, arm.REG_R9, 9, "R9"},
+ {10, arm.REGG, -1, "g"},
+ {11, arm.REG_R11, -1, "R11"},
+ {12, arm.REG_R12, 10, "R12"},
+ {13, arm.REGSP, -1, "SP"},
+ {14, arm.REG_R14, 11, "R14"},
+ {15, arm.REG_R15, -1, "R15"},
+ {16, arm.REG_F0, -1, "F0"},
+ {17, arm.REG_F1, -1, "F1"},
+ {18, arm.REG_F2, -1, "F2"},
+ {19, arm.REG_F3, -1, "F3"},
+ {20, arm.REG_F4, -1, "F4"},
+ {21, arm.REG_F5, -1, "F5"},
+ {22, arm.REG_F6, -1, "F6"},
+ {23, arm.REG_F7, -1, "F7"},
+ {24, arm.REG_F8, -1, "F8"},
+ {25, arm.REG_F9, -1, "F9"},
+ {26, arm.REG_F10, -1, "F10"},
+ {27, arm.REG_F11, -1, "F11"},
+ {28, arm.REG_F12, -1, "F12"},
+ {29, arm.REG_F13, -1, "F13"},
+ {30, arm.REG_F14, -1, "F14"},
+ {31, arm.REG_F15, -1, "F15"},
+ {32, 0, -1, "SB"},
+}
+var gpRegMaskARM = regMask(21503)
+var fpRegMaskARM = regMask(4294901760)
+var specialRegMaskARM = regMask(0)
+var framepointerRegARM = int8(-1)
+var linkRegARM = int8(14)
+var registersARM64 = [...]Register{
+ {0, arm64.REG_R0, 0, "R0"},
+ {1, arm64.REG_R1, 1, "R1"},
+ {2, arm64.REG_R2, 2, "R2"},
+ {3, arm64.REG_R3, 3, "R3"},
+ {4, arm64.REG_R4, 4, "R4"},
+ {5, arm64.REG_R5, 5, "R5"},
+ {6, arm64.REG_R6, 6, "R6"},
+ {7, arm64.REG_R7, 7, "R7"},
+ {8, arm64.REG_R8, 8, "R8"},
+ {9, arm64.REG_R9, 9, "R9"},
+ {10, arm64.REG_R10, 10, "R10"},
+ {11, arm64.REG_R11, 11, "R11"},
+ {12, arm64.REG_R12, 12, "R12"},
+ {13, arm64.REG_R13, 13, "R13"},
+ {14, arm64.REG_R14, 14, "R14"},
+ {15, arm64.REG_R15, 15, "R15"},
+ {16, arm64.REG_R16, 16, "R16"},
+ {17, arm64.REG_R17, 17, "R17"},
+ {18, arm64.REG_R18, -1, "R18"},
+ {19, arm64.REG_R19, 18, "R19"},
+ {20, arm64.REG_R20, 19, "R20"},
+ {21, arm64.REG_R21, 20, "R21"},
+ {22, arm64.REG_R22, 21, "R22"},
+ {23, arm64.REG_R23, 22, "R23"},
+ {24, arm64.REG_R24, 23, "R24"},
+ {25, arm64.REG_R25, 24, "R25"},
+ {26, arm64.REG_R26, 25, "R26"},
+ {27, arm64.REGG, -1, "g"},
+ {28, arm64.REG_R29, -1, "R29"},
+ {29, arm64.REG_R30, 26, "R30"},
+ {30, arm64.REGSP, -1, "SP"},
+ {31, arm64.REG_F0, -1, "F0"},
+ {32, arm64.REG_F1, -1, "F1"},
+ {33, arm64.REG_F2, -1, "F2"},
+ {34, arm64.REG_F3, -1, "F3"},
+ {35, arm64.REG_F4, -1, "F4"},
+ {36, arm64.REG_F5, -1, "F5"},
+ {37, arm64.REG_F6, -1, "F6"},
+ {38, arm64.REG_F7, -1, "F7"},
+ {39, arm64.REG_F8, -1, "F8"},
+ {40, arm64.REG_F9, -1, "F9"},
+ {41, arm64.REG_F10, -1, "F10"},
+ {42, arm64.REG_F11, -1, "F11"},
+ {43, arm64.REG_F12, -1, "F12"},
+ {44, arm64.REG_F13, -1, "F13"},
+ {45, arm64.REG_F14, -1, "F14"},
+ {46, arm64.REG_F15, -1, "F15"},
+ {47, arm64.REG_F16, -1, "F16"},
+ {48, arm64.REG_F17, -1, "F17"},
+ {49, arm64.REG_F18, -1, "F18"},
+ {50, arm64.REG_F19, -1, "F19"},
+ {51, arm64.REG_F20, -1, "F20"},
+ {52, arm64.REG_F21, -1, "F21"},
+ {53, arm64.REG_F22, -1, "F22"},
+ {54, arm64.REG_F23, -1, "F23"},
+ {55, arm64.REG_F24, -1, "F24"},
+ {56, arm64.REG_F25, -1, "F25"},
+ {57, arm64.REG_F26, -1, "F26"},
+ {58, arm64.REG_F27, -1, "F27"},
+ {59, arm64.REG_F28, -1, "F28"},
+ {60, arm64.REG_F29, -1, "F29"},
+ {61, arm64.REG_F30, -1, "F30"},
+ {62, arm64.REG_F31, -1, "F31"},
+ {63, 0, -1, "SB"},
+}
+var gpRegMaskARM64 = regMask(670826495)
+var fpRegMaskARM64 = regMask(9223372034707292160)
+var specialRegMaskARM64 = regMask(0)
+var framepointerRegARM64 = int8(-1)
+var linkRegARM64 = int8(29)
+var registersMIPS = [...]Register{
+ {0, mips.REG_R0, -1, "R0"},
+ {1, mips.REG_R1, 0, "R1"},
+ {2, mips.REG_R2, 1, "R2"},
+ {3, mips.REG_R3, 2, "R3"},
+ {4, mips.REG_R4, 3, "R4"},
+ {5, mips.REG_R5, 4, "R5"},
+ {6, mips.REG_R6, 5, "R6"},
+ {7, mips.REG_R7, 6, "R7"},
+ {8, mips.REG_R8, 7, "R8"},
+ {9, mips.REG_R9, 8, "R9"},
+ {10, mips.REG_R10, 9, "R10"},
+ {11, mips.REG_R11, 10, "R11"},
+ {12, mips.REG_R12, 11, "R12"},
+ {13, mips.REG_R13, 12, "R13"},
+ {14, mips.REG_R14, 13, "R14"},
+ {15, mips.REG_R15, 14, "R15"},
+ {16, mips.REG_R16, 15, "R16"},
+ {17, mips.REG_R17, 16, "R17"},
+ {18, mips.REG_R18, 17, "R18"},
+ {19, mips.REG_R19, 18, "R19"},
+ {20, mips.REG_R20, 19, "R20"},
+ {21, mips.REG_R21, 20, "R21"},
+ {22, mips.REG_R22, 21, "R22"},
+ {23, mips.REG_R24, 22, "R24"},
+ {24, mips.REG_R25, 23, "R25"},
+ {25, mips.REG_R28, 24, "R28"},
+ {26, mips.REGSP, -1, "SP"},
+ {27, mips.REGG, -1, "g"},
+ {28, mips.REG_R31, 25, "R31"},
+ {29, mips.REG_F0, -1, "F0"},
+ {30, mips.REG_F2, -1, "F2"},
+ {31, mips.REG_F4, -1, "F4"},
+ {32, mips.REG_F6, -1, "F6"},
+ {33, mips.REG_F8, -1, "F8"},
+ {34, mips.REG_F10, -1, "F10"},
+ {35, mips.REG_F12, -1, "F12"},
+ {36, mips.REG_F14, -1, "F14"},
+ {37, mips.REG_F16, -1, "F16"},
+ {38, mips.REG_F18, -1, "F18"},
+ {39, mips.REG_F20, -1, "F20"},
+ {40, mips.REG_F22, -1, "F22"},
+ {41, mips.REG_F24, -1, "F24"},
+ {42, mips.REG_F26, -1, "F26"},
+ {43, mips.REG_F28, -1, "F28"},
+ {44, mips.REG_F30, -1, "F30"},
+ {45, mips.REG_HI, -1, "HI"},
+ {46, mips.REG_LO, -1, "LO"},
+ {47, 0, -1, "SB"},
+}
+var gpRegMaskMIPS = regMask(335544318)
+var fpRegMaskMIPS = regMask(35183835217920)
+var specialRegMaskMIPS = regMask(105553116266496)
+var framepointerRegMIPS = int8(-1)
+var linkRegMIPS = int8(28)
+var registersMIPS64 = [...]Register{
+ {0, mips.REG_R0, -1, "R0"},
+ {1, mips.REG_R1, 0, "R1"},
+ {2, mips.REG_R2, 1, "R2"},
+ {3, mips.REG_R3, 2, "R3"},
+ {4, mips.REG_R4, 3, "R4"},
+ {5, mips.REG_R5, 4, "R5"},
+ {6, mips.REG_R6, 5, "R6"},
+ {7, mips.REG_R7, 6, "R7"},
+ {8, mips.REG_R8, 7, "R8"},
+ {9, mips.REG_R9, 8, "R9"},
+ {10, mips.REG_R10, 9, "R10"},
+ {11, mips.REG_R11, 10, "R11"},
+ {12, mips.REG_R12, 11, "R12"},
+ {13, mips.REG_R13, 12, "R13"},
+ {14, mips.REG_R14, 13, "R14"},
+ {15, mips.REG_R15, 14, "R15"},
+ {16, mips.REG_R16, 15, "R16"},
+ {17, mips.REG_R17, 16, "R17"},
+ {18, mips.REG_R18, 17, "R18"},
+ {19, mips.REG_R19, 18, "R19"},
+ {20, mips.REG_R20, 19, "R20"},
+ {21, mips.REG_R21, 20, "R21"},
+ {22, mips.REG_R22, 21, "R22"},
+ {23, mips.REG_R24, 22, "R24"},
+ {24, mips.REG_R25, 23, "R25"},
+ {25, mips.REGSP, -1, "SP"},
+ {26, mips.REGG, -1, "g"},
+ {27, mips.REG_R31, 24, "R31"},
+ {28, mips.REG_F0, -1, "F0"},
+ {29, mips.REG_F1, -1, "F1"},
+ {30, mips.REG_F2, -1, "F2"},
+ {31, mips.REG_F3, -1, "F3"},
+ {32, mips.REG_F4, -1, "F4"},
+ {33, mips.REG_F5, -1, "F5"},
+ {34, mips.REG_F6, -1, "F6"},
+ {35, mips.REG_F7, -1, "F7"},
+ {36, mips.REG_F8, -1, "F8"},
+ {37, mips.REG_F9, -1, "F9"},
+ {38, mips.REG_F10, -1, "F10"},
+ {39, mips.REG_F11, -1, "F11"},
+ {40, mips.REG_F12, -1, "F12"},
+ {41, mips.REG_F13, -1, "F13"},
+ {42, mips.REG_F14, -1, "F14"},
+ {43, mips.REG_F15, -1, "F15"},
+ {44, mips.REG_F16, -1, "F16"},
+ {45, mips.REG_F17, -1, "F17"},
+ {46, mips.REG_F18, -1, "F18"},
+ {47, mips.REG_F19, -1, "F19"},
+ {48, mips.REG_F20, -1, "F20"},
+ {49, mips.REG_F21, -1, "F21"},
+ {50, mips.REG_F22, -1, "F22"},
+ {51, mips.REG_F23, -1, "F23"},
+ {52, mips.REG_F24, -1, "F24"},
+ {53, mips.REG_F25, -1, "F25"},
+ {54, mips.REG_F26, -1, "F26"},
+ {55, mips.REG_F27, -1, "F27"},
+ {56, mips.REG_F28, -1, "F28"},
+ {57, mips.REG_F29, -1, "F29"},
+ {58, mips.REG_F30, -1, "F30"},
+ {59, mips.REG_F31, -1, "F31"},
+ {60, mips.REG_HI, -1, "HI"},
+ {61, mips.REG_LO, -1, "LO"},
+ {62, 0, -1, "SB"},
+}
+var gpRegMaskMIPS64 = regMask(167772158)
+var fpRegMaskMIPS64 = regMask(1152921504338411520)
+var specialRegMaskMIPS64 = regMask(3458764513820540928)
+var framepointerRegMIPS64 = int8(-1)
+var linkRegMIPS64 = int8(27)
+var registersPPC64 = [...]Register{
+ {0, ppc64.REG_R0, -1, "R0"},
+ {1, ppc64.REGSP, -1, "SP"},
+ {2, 0, -1, "SB"},
+ {3, ppc64.REG_R3, 0, "R3"},
+ {4, ppc64.REG_R4, 1, "R4"},
+ {5, ppc64.REG_R5, 2, "R5"},
+ {6, ppc64.REG_R6, 3, "R6"},
+ {7, ppc64.REG_R7, 4, "R7"},
+ {8, ppc64.REG_R8, 5, "R8"},
+ {9, ppc64.REG_R9, 6, "R9"},
+ {10, ppc64.REG_R10, 7, "R10"},
+ {11, ppc64.REG_R11, 8, "R11"},
+ {12, ppc64.REG_R12, 9, "R12"},
+ {13, ppc64.REG_R13, -1, "R13"},
+ {14, ppc64.REG_R14, 10, "R14"},
+ {15, ppc64.REG_R15, 11, "R15"},
+ {16, ppc64.REG_R16, 12, "R16"},
+ {17, ppc64.REG_R17, 13, "R17"},
+ {18, ppc64.REG_R18, 14, "R18"},
+ {19, ppc64.REG_R19, 15, "R19"},
+ {20, ppc64.REG_R20, 16, "R20"},
+ {21, ppc64.REG_R21, 17, "R21"},
+ {22, ppc64.REG_R22, 18, "R22"},
+ {23, ppc64.REG_R23, 19, "R23"},
+ {24, ppc64.REG_R24, 20, "R24"},
+ {25, ppc64.REG_R25, 21, "R25"},
+ {26, ppc64.REG_R26, 22, "R26"},
+ {27, ppc64.REG_R27, 23, "R27"},
+ {28, ppc64.REG_R28, 24, "R28"},
+ {29, ppc64.REG_R29, 25, "R29"},
+ {30, ppc64.REGG, -1, "g"},
+ {31, ppc64.REG_R31, -1, "R31"},
+ {32, ppc64.REG_F0, -1, "F0"},
+ {33, ppc64.REG_F1, -1, "F1"},
+ {34, ppc64.REG_F2, -1, "F2"},
+ {35, ppc64.REG_F3, -1, "F3"},
+ {36, ppc64.REG_F4, -1, "F4"},
+ {37, ppc64.REG_F5, -1, "F5"},
+ {38, ppc64.REG_F6, -1, "F6"},
+ {39, ppc64.REG_F7, -1, "F7"},
+ {40, ppc64.REG_F8, -1, "F8"},
+ {41, ppc64.REG_F9, -1, "F9"},
+ {42, ppc64.REG_F10, -1, "F10"},
+ {43, ppc64.REG_F11, -1, "F11"},
+ {44, ppc64.REG_F12, -1, "F12"},
+ {45, ppc64.REG_F13, -1, "F13"},
+ {46, ppc64.REG_F14, -1, "F14"},
+ {47, ppc64.REG_F15, -1, "F15"},
+ {48, ppc64.REG_F16, -1, "F16"},
+ {49, ppc64.REG_F17, -1, "F17"},
+ {50, ppc64.REG_F18, -1, "F18"},
+ {51, ppc64.REG_F19, -1, "F19"},
+ {52, ppc64.REG_F20, -1, "F20"},
+ {53, ppc64.REG_F21, -1, "F21"},
+ {54, ppc64.REG_F22, -1, "F22"},
+ {55, ppc64.REG_F23, -1, "F23"},
+ {56, ppc64.REG_F24, -1, "F24"},
+ {57, ppc64.REG_F25, -1, "F25"},
+ {58, ppc64.REG_F26, -1, "F26"},
+ {59, ppc64.REG_F27, -1, "F27"},
+ {60, ppc64.REG_F28, -1, "F28"},
+ {61, ppc64.REG_F29, -1, "F29"},
+ {62, ppc64.REG_F30, -1, "F30"},
+ {63, ppc64.REG_F31, -1, "F31"},
+}
+var gpRegMaskPPC64 = regMask(1073733624)
+var fpRegMaskPPC64 = regMask(576460743713488896)
+var specialRegMaskPPC64 = regMask(0)
+var framepointerRegPPC64 = int8(1)
+var linkRegPPC64 = int8(-1)
+var registersRISCV64 = [...]Register{
+ {0, riscv.REG_X0, -1, "X0"},
+ {1, riscv.REGSP, -1, "SP"},
+ {2, riscv.REG_X3, 0, "X3"},
+ {3, riscv.REG_X4, -1, "X4"},
+ {4, riscv.REG_X5, 1, "X5"},
+ {5, riscv.REG_X6, 2, "X6"},
+ {6, riscv.REG_X7, 3, "X7"},
+ {7, riscv.REG_X8, 4, "X8"},
+ {8, riscv.REG_X9, 5, "X9"},
+ {9, riscv.REG_X10, 6, "X10"},
+ {10, riscv.REG_X11, 7, "X11"},
+ {11, riscv.REG_X12, 8, "X12"},
+ {12, riscv.REG_X13, 9, "X13"},
+ {13, riscv.REG_X14, 10, "X14"},
+ {14, riscv.REG_X15, 11, "X15"},
+ {15, riscv.REG_X16, 12, "X16"},
+ {16, riscv.REG_X17, 13, "X17"},
+ {17, riscv.REG_X18, 14, "X18"},
+ {18, riscv.REG_X19, 15, "X19"},
+ {19, riscv.REG_X20, 16, "X20"},
+ {20, riscv.REG_X21, 17, "X21"},
+ {21, riscv.REG_X22, 18, "X22"},
+ {22, riscv.REG_X23, 19, "X23"},
+ {23, riscv.REG_X24, 20, "X24"},
+ {24, riscv.REG_X25, 21, "X25"},
+ {25, riscv.REG_X26, 22, "X26"},
+ {26, riscv.REGG, -1, "g"},
+ {27, riscv.REG_X28, 23, "X28"},
+ {28, riscv.REG_X29, 24, "X29"},
+ {29, riscv.REG_X30, 25, "X30"},
+ {30, riscv.REG_X31, -1, "X31"},
+ {31, riscv.REG_F0, -1, "F0"},
+ {32, riscv.REG_F1, -1, "F1"},
+ {33, riscv.REG_F2, -1, "F2"},
+ {34, riscv.REG_F3, -1, "F3"},
+ {35, riscv.REG_F4, -1, "F4"},
+ {36, riscv.REG_F5, -1, "F5"},
+ {37, riscv.REG_F6, -1, "F6"},
+ {38, riscv.REG_F7, -1, "F7"},
+ {39, riscv.REG_F8, -1, "F8"},
+ {40, riscv.REG_F9, -1, "F9"},
+ {41, riscv.REG_F10, -1, "F10"},
+ {42, riscv.REG_F11, -1, "F11"},
+ {43, riscv.REG_F12, -1, "F12"},
+ {44, riscv.REG_F13, -1, "F13"},
+ {45, riscv.REG_F14, -1, "F14"},
+ {46, riscv.REG_F15, -1, "F15"},
+ {47, riscv.REG_F16, -1, "F16"},
+ {48, riscv.REG_F17, -1, "F17"},
+ {49, riscv.REG_F18, -1, "F18"},
+ {50, riscv.REG_F19, -1, "F19"},
+ {51, riscv.REG_F20, -1, "F20"},
+ {52, riscv.REG_F21, -1, "F21"},
+ {53, riscv.REG_F22, -1, "F22"},
+ {54, riscv.REG_F23, -1, "F23"},
+ {55, riscv.REG_F24, -1, "F24"},
+ {56, riscv.REG_F25, -1, "F25"},
+ {57, riscv.REG_F26, -1, "F26"},
+ {58, riscv.REG_F27, -1, "F27"},
+ {59, riscv.REG_F28, -1, "F28"},
+ {60, riscv.REG_F29, -1, "F29"},
+ {61, riscv.REG_F30, -1, "F30"},
+ {62, riscv.REG_F31, -1, "F31"},
+ {63, 0, -1, "SB"},
+}
+var gpRegMaskRISCV64 = regMask(1006632948)
+var fpRegMaskRISCV64 = regMask(9223372034707292160)
+var specialRegMaskRISCV64 = regMask(0)
+var framepointerRegRISCV64 = int8(-1)
+var linkRegRISCV64 = int8(0)
+var registersS390X = [...]Register{
+ {0, s390x.REG_R0, 0, "R0"},
+ {1, s390x.REG_R1, 1, "R1"},
+ {2, s390x.REG_R2, 2, "R2"},
+ {3, s390x.REG_R3, 3, "R3"},
+ {4, s390x.REG_R4, 4, "R4"},
+ {5, s390x.REG_R5, 5, "R5"},
+ {6, s390x.REG_R6, 6, "R6"},
+ {7, s390x.REG_R7, 7, "R7"},
+ {8, s390x.REG_R8, 8, "R8"},
+ {9, s390x.REG_R9, 9, "R9"},
+ {10, s390x.REG_R10, -1, "R10"},
+ {11, s390x.REG_R11, 10, "R11"},
+ {12, s390x.REG_R12, 11, "R12"},
+ {13, s390x.REGG, -1, "g"},
+ {14, s390x.REG_R14, 12, "R14"},
+ {15, s390x.REGSP, -1, "SP"},
+ {16, s390x.REG_F0, -1, "F0"},
+ {17, s390x.REG_F1, -1, "F1"},
+ {18, s390x.REG_F2, -1, "F2"},
+ {19, s390x.REG_F3, -1, "F3"},
+ {20, s390x.REG_F4, -1, "F4"},
+ {21, s390x.REG_F5, -1, "F5"},
+ {22, s390x.REG_F6, -1, "F6"},
+ {23, s390x.REG_F7, -1, "F7"},
+ {24, s390x.REG_F8, -1, "F8"},
+ {25, s390x.REG_F9, -1, "F9"},
+ {26, s390x.REG_F10, -1, "F10"},
+ {27, s390x.REG_F11, -1, "F11"},
+ {28, s390x.REG_F12, -1, "F12"},
+ {29, s390x.REG_F13, -1, "F13"},
+ {30, s390x.REG_F14, -1, "F14"},
+ {31, s390x.REG_F15, -1, "F15"},
+ {32, 0, -1, "SB"},
+}
+var gpRegMaskS390X = regMask(23551)
+var fpRegMaskS390X = regMask(4294901760)
+var specialRegMaskS390X = regMask(0)
+var framepointerRegS390X = int8(-1)
+var linkRegS390X = int8(14)
+var registersWasm = [...]Register{
+ {0, wasm.REG_R0, 0, "R0"},
+ {1, wasm.REG_R1, 1, "R1"},
+ {2, wasm.REG_R2, 2, "R2"},
+ {3, wasm.REG_R3, 3, "R3"},
+ {4, wasm.REG_R4, 4, "R4"},
+ {5, wasm.REG_R5, 5, "R5"},
+ {6, wasm.REG_R6, 6, "R6"},
+ {7, wasm.REG_R7, 7, "R7"},
+ {8, wasm.REG_R8, 8, "R8"},
+ {9, wasm.REG_R9, 9, "R9"},
+ {10, wasm.REG_R10, 10, "R10"},
+ {11, wasm.REG_R11, 11, "R11"},
+ {12, wasm.REG_R12, 12, "R12"},
+ {13, wasm.REG_R13, 13, "R13"},
+ {14, wasm.REG_R14, 14, "R14"},
+ {15, wasm.REG_R15, 15, "R15"},
+ {16, wasm.REG_F0, -1, "F0"},
+ {17, wasm.REG_F1, -1, "F1"},
+ {18, wasm.REG_F2, -1, "F2"},
+ {19, wasm.REG_F3, -1, "F3"},
+ {20, wasm.REG_F4, -1, "F4"},
+ {21, wasm.REG_F5, -1, "F5"},
+ {22, wasm.REG_F6, -1, "F6"},
+ {23, wasm.REG_F7, -1, "F7"},
+ {24, wasm.REG_F8, -1, "F8"},
+ {25, wasm.REG_F9, -1, "F9"},
+ {26, wasm.REG_F10, -1, "F10"},
+ {27, wasm.REG_F11, -1, "F11"},
+ {28, wasm.REG_F12, -1, "F12"},
+ {29, wasm.REG_F13, -1, "F13"},
+ {30, wasm.REG_F14, -1, "F14"},
+ {31, wasm.REG_F15, -1, "F15"},
+ {32, wasm.REG_F16, -1, "F16"},
+ {33, wasm.REG_F17, -1, "F17"},
+ {34, wasm.REG_F18, -1, "F18"},
+ {35, wasm.REG_F19, -1, "F19"},
+ {36, wasm.REG_F20, -1, "F20"},
+ {37, wasm.REG_F21, -1, "F21"},
+ {38, wasm.REG_F22, -1, "F22"},
+ {39, wasm.REG_F23, -1, "F23"},
+ {40, wasm.REG_F24, -1, "F24"},
+ {41, wasm.REG_F25, -1, "F25"},
+ {42, wasm.REG_F26, -1, "F26"},
+ {43, wasm.REG_F27, -1, "F27"},
+ {44, wasm.REG_F28, -1, "F28"},
+ {45, wasm.REG_F29, -1, "F29"},
+ {46, wasm.REG_F30, -1, "F30"},
+ {47, wasm.REG_F31, -1, "F31"},
+ {48, wasm.REGSP, -1, "SP"},
+ {49, wasm.REGG, -1, "g"},
+ {50, 0, -1, "SB"},
+}
+var gpRegMaskWasm = regMask(65535)
+var fpRegMaskWasm = regMask(281474976645120)
+var fp32RegMaskWasm = regMask(4294901760)
+var fp64RegMaskWasm = regMask(281470681743360)
+var specialRegMaskWasm = regMask(0)
+var framepointerRegWasm = int8(-1)
+var linkRegWasm = int8(-1)
diff --git a/src/cmd/compile/internal/ssa/opt.go b/src/cmd/compile/internal/ssa/opt.go
new file mode 100644
index 0000000..128e614
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/opt.go
@@ -0,0 +1,10 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// machine-independent optimization
+func opt(f *Func) {
+ applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric, removeDeadValues)
+}
diff --git a/src/cmd/compile/internal/ssa/passbm_test.go b/src/cmd/compile/internal/ssa/passbm_test.go
new file mode 100644
index 0000000..3fd3eb5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/passbm_test.go
@@ -0,0 +1,101 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+ "testing"
+)
+
+const (
+ blockCount = 1000
+ passCount = 15000
+)
+
+type passFunc func(*Func)
+
+func BenchmarkDSEPass(b *testing.B) { benchFnPass(b, dse, blockCount, genFunction) }
+func BenchmarkDSEPassBlock(b *testing.B) { benchFnBlock(b, dse, genFunction) }
+func BenchmarkCSEPass(b *testing.B) { benchFnPass(b, cse, blockCount, genFunction) }
+func BenchmarkCSEPassBlock(b *testing.B) { benchFnBlock(b, cse, genFunction) }
+func BenchmarkDeadcodePass(b *testing.B) { benchFnPass(b, deadcode, blockCount, genFunction) }
+func BenchmarkDeadcodePassBlock(b *testing.B) { benchFnBlock(b, deadcode, genFunction) }
+
+func multi(f *Func) {
+ cse(f)
+ dse(f)
+ deadcode(f)
+}
+func BenchmarkMultiPass(b *testing.B) { benchFnPass(b, multi, blockCount, genFunction) }
+func BenchmarkMultiPassBlock(b *testing.B) { benchFnBlock(b, multi, genFunction) }
+
+// benchFnPass runs passFunc b.N times across a single function.
+func benchFnPass(b *testing.B, fn passFunc, size int, bg blockGen) {
+ b.ReportAllocs()
+ c := testConfig(b)
+ fun := c.Fun("entry", bg(size)...)
+ CheckFunc(fun.f)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ fn(fun.f)
+ b.StopTimer()
+ CheckFunc(fun.f)
+ b.StartTimer()
+ }
+}
+
+// benchFnPass runs passFunc across a function with b.N blocks.
+func benchFnBlock(b *testing.B, fn passFunc, bg blockGen) {
+ b.ReportAllocs()
+ c := testConfig(b)
+ fun := c.Fun("entry", bg(b.N)...)
+ CheckFunc(fun.f)
+ b.ResetTimer()
+ for i := 0; i < passCount; i++ {
+ fn(fun.f)
+ }
+ b.StopTimer()
+}
+
+func genFunction(size int) []bloc {
+ var blocs []bloc
+ elemType := types.Types[types.TINT64]
+ ptrType := elemType.PtrTo()
+
+ valn := func(s string, m, n int) string { return fmt.Sprintf("%s%d-%d", s, m, n) }
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu(valn("store", 0, 4), OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.Types[types.TUINTPTR], 0, nil),
+ Goto(blockn(1)),
+ ),
+ )
+ for i := 1; i < size+1; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu(valn("v", i, 0), OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Valu(valn("addr", i, 1), OpAddr, ptrType, 0, nil, "sb"),
+ Valu(valn("addr", i, 2), OpAddr, ptrType, 0, nil, "sb"),
+ Valu(valn("addr", i, 3), OpAddr, ptrType, 0, nil, "sb"),
+ Valu(valn("zero", i, 1), OpZero, types.TypeMem, 8, elemType, valn("addr", i, 3),
+ valn("store", i-1, 4)),
+ Valu(valn("store", i, 1), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 1),
+ valn("v", i, 0), valn("zero", i, 1)),
+ Valu(valn("store", i, 2), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 2),
+ valn("v", i, 0), valn("store", i, 1)),
+ Valu(valn("store", i, 3), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 1),
+ valn("v", i, 0), valn("store", i, 2)),
+ Valu(valn("store", i, 4), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 3),
+ valn("v", i, 0), valn("store", i, 3)),
+ Goto(blockn(i+1))))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size+1), Goto("exit")),
+ Bloc("exit", Exit("store0-4")),
+ )
+
+ return blocs
+}
diff --git a/src/cmd/compile/internal/ssa/phielim.go b/src/cmd/compile/internal/ssa/phielim.go
new file mode 100644
index 0000000..761cb7a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/phielim.go
@@ -0,0 +1,69 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// phielim eliminates redundant phi values from f.
+// A phi is redundant if its arguments are all equal. For
+// purposes of counting, ignore the phi itself. Both of
+// these phis are redundant:
+// v = phi(x,x,x)
+// v = phi(x,v,x,v)
+// We repeat this process to also catch situations like:
+// v = phi(x, phi(x, x), phi(x, v))
+// TODO: Can we also simplify cases like:
+// v = phi(v, w, x)
+// w = phi(v, w, x)
+// and would that be useful?
+func phielim(f *Func) {
+ for {
+ change := false
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ copyelimValue(v)
+ change = phielimValue(v) || change
+ }
+ }
+ if !change {
+ break
+ }
+ }
+}
+
+// phielimValue tries to convert the phi v to a copy.
+func phielimValue(v *Value) bool {
+ if v.Op != OpPhi {
+ return false
+ }
+
+ // If there are two distinct args of v which
+ // are not v itself, then the phi must remain.
+ // Otherwise, we can replace it with a copy.
+ var w *Value
+ for _, x := range v.Args {
+ if x == v {
+ continue
+ }
+ if x == w {
+ continue
+ }
+ if w != nil {
+ return false
+ }
+ w = x
+ }
+
+ if w == nil {
+ // v references only itself. It must be in
+ // a dead code loop. Don't bother modifying it.
+ return false
+ }
+ v.Op = OpCopy
+ v.SetArgs1(w)
+ f := v.Block.Func
+ if f.pass.debug > 0 {
+ f.Warnl(v.Pos, "eliminated phi")
+ }
+ return true
+}
diff --git a/src/cmd/compile/internal/ssa/phiopt.go b/src/cmd/compile/internal/ssa/phiopt.go
new file mode 100644
index 0000000..db7b022
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/phiopt.go
@@ -0,0 +1,176 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// phiopt eliminates boolean Phis based on the previous if.
+//
+// Main use case is to transform:
+// x := false
+// if b {
+// x = true
+// }
+// into x = b.
+//
+// In SSA code this appears as
+//
+// b0
+// If b -> b1 b2
+// b1
+// Plain -> b2
+// b2
+// x = (OpPhi (ConstBool [true]) (ConstBool [false]))
+//
+// In this case we can replace x with a copy of b.
+func phiopt(f *Func) {
+ sdom := f.Sdom()
+ for _, b := range f.Blocks {
+ if len(b.Preds) != 2 || len(b.Values) == 0 {
+ // TODO: handle more than 2 predecessors, e.g. a || b || c.
+ continue
+ }
+
+ pb0, b0 := b, b.Preds[0].b
+ for len(b0.Succs) == 1 && len(b0.Preds) == 1 {
+ pb0, b0 = b0, b0.Preds[0].b
+ }
+ if b0.Kind != BlockIf {
+ continue
+ }
+ pb1, b1 := b, b.Preds[1].b
+ for len(b1.Succs) == 1 && len(b1.Preds) == 1 {
+ pb1, b1 = b1, b1.Preds[0].b
+ }
+ if b1 != b0 {
+ continue
+ }
+ // b0 is the if block giving the boolean value.
+
+ // reverse is the predecessor from which the truth value comes.
+ var reverse int
+ if b0.Succs[0].b == pb0 && b0.Succs[1].b == pb1 {
+ reverse = 0
+ } else if b0.Succs[0].b == pb1 && b0.Succs[1].b == pb0 {
+ reverse = 1
+ } else {
+ b.Fatalf("invalid predecessors\n")
+ }
+
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+
+ // Look for conversions from bool to 0/1.
+ if v.Type.IsInteger() {
+ phioptint(v, b0, reverse)
+ }
+
+ if !v.Type.IsBoolean() {
+ continue
+ }
+
+ // Replaces
+ // if a { x = true } else { x = false } with x = a
+ // and
+ // if a { x = false } else { x = true } with x = !a
+ if v.Args[0].Op == OpConstBool && v.Args[1].Op == OpConstBool {
+ if v.Args[reverse].AuxInt != v.Args[1-reverse].AuxInt {
+ ops := [2]Op{OpNot, OpCopy}
+ v.reset(ops[v.Args[reverse].AuxInt])
+ v.AddArg(b0.Controls[0])
+ if f.pass.debug > 0 {
+ f.Warnl(b.Pos, "converted OpPhi to %v", v.Op)
+ }
+ continue
+ }
+ }
+
+ // Replaces
+ // if a { x = true } else { x = value } with x = a || value.
+ // Requires that value dominates x, meaning that regardless of a,
+ // value is always computed. This guarantees that the side effects
+ // of value are not seen if a is false.
+ if v.Args[reverse].Op == OpConstBool && v.Args[reverse].AuxInt == 1 {
+ if tmp := v.Args[1-reverse]; sdom.IsAncestorEq(tmp.Block, b) {
+ v.reset(OpOrB)
+ v.SetArgs2(b0.Controls[0], tmp)
+ if f.pass.debug > 0 {
+ f.Warnl(b.Pos, "converted OpPhi to %v", v.Op)
+ }
+ continue
+ }
+ }
+
+ // Replaces
+ // if a { x = value } else { x = false } with x = a && value.
+ // Requires that value dominates x, meaning that regardless of a,
+ // value is always computed. This guarantees that the side effects
+ // of value are not seen if a is false.
+ if v.Args[1-reverse].Op == OpConstBool && v.Args[1-reverse].AuxInt == 0 {
+ if tmp := v.Args[reverse]; sdom.IsAncestorEq(tmp.Block, b) {
+ v.reset(OpAndB)
+ v.SetArgs2(b0.Controls[0], tmp)
+ if f.pass.debug > 0 {
+ f.Warnl(b.Pos, "converted OpPhi to %v", v.Op)
+ }
+ continue
+ }
+ }
+ }
+ }
+}
+
+func phioptint(v *Value, b0 *Block, reverse int) {
+ a0 := v.Args[0]
+ a1 := v.Args[1]
+ if a0.Op != a1.Op {
+ return
+ }
+
+ switch a0.Op {
+ case OpConst8, OpConst16, OpConst32, OpConst64:
+ default:
+ return
+ }
+
+ negate := false
+ switch {
+ case a0.AuxInt == 0 && a1.AuxInt == 1:
+ negate = true
+ case a0.AuxInt == 1 && a1.AuxInt == 0:
+ default:
+ return
+ }
+
+ if reverse == 1 {
+ negate = !negate
+ }
+
+ a := b0.Controls[0]
+ if negate {
+ a = v.Block.NewValue1(v.Pos, OpNot, a.Type, a)
+ }
+ v.AddArg(a)
+
+ cvt := v.Block.NewValue1(v.Pos, OpCvtBoolToUint8, v.Block.Func.Config.Types.UInt8, a)
+ switch v.Type.Size() {
+ case 1:
+ v.reset(OpCopy)
+ case 2:
+ v.reset(OpZeroExt8to16)
+ case 4:
+ v.reset(OpZeroExt8to32)
+ case 8:
+ v.reset(OpZeroExt8to64)
+ default:
+ v.Fatalf("bad int size %d", v.Type.Size())
+ }
+ v.AddArg(cvt)
+
+ f := b0.Func
+ if f.pass.debug > 0 {
+ f.Warnl(v.Block.Pos, "converted OpPhi bool -> int%d", v.Type.Size()*8)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go
new file mode 100644
index 0000000..f5a2b3a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/poset.go
@@ -0,0 +1,1359 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "os"
+)
+
+// If true, check poset integrity after every mutation
+var debugPoset = false
+
+const uintSize = 32 << (^uint(0) >> 32 & 1) // 32 or 64
+
+// bitset is a bit array for dense indexes.
+type bitset []uint
+
+func newBitset(n int) bitset {
+ return make(bitset, (n+uintSize-1)/uintSize)
+}
+
+func (bs bitset) Reset() {
+ for i := range bs {
+ bs[i] = 0
+ }
+}
+
+func (bs bitset) Set(idx uint32) {
+ bs[idx/uintSize] |= 1 << (idx % uintSize)
+}
+
+func (bs bitset) Clear(idx uint32) {
+ bs[idx/uintSize] &^= 1 << (idx % uintSize)
+}
+
+func (bs bitset) Test(idx uint32) bool {
+ return bs[idx/uintSize]&(1<<(idx%uintSize)) != 0
+}
+
+type undoType uint8
+
+const (
+ undoInvalid undoType = iota
+ undoCheckpoint // a checkpoint to group undo passes
+ undoSetChl // change back left child of undo.idx to undo.edge
+ undoSetChr // change back right child of undo.idx to undo.edge
+ undoNonEqual // forget that SSA value undo.ID is non-equal to undo.idx (another ID)
+ undoNewNode // remove new node created for SSA value undo.ID
+ undoNewConstant // remove the constant node idx from the constants map
+ undoAliasNode // unalias SSA value undo.ID so that it points back to node index undo.idx
+ undoNewRoot // remove node undo.idx from root list
+ undoChangeRoot // remove node undo.idx from root list, and put back undo.edge.Target instead
+ undoMergeRoot // remove node undo.idx from root list, and put back its children instead
+)
+
+// posetUndo represents an undo pass to be performed.
+// It's an union of fields that can be used to store information,
+// and typ is the discriminant, that specifies which kind
+// of operation must be performed. Not all fields are always used.
+type posetUndo struct {
+ typ undoType
+ idx uint32
+ ID ID
+ edge posetEdge
+}
+
+const (
+ // Make poset handle constants as unsigned numbers.
+ posetFlagUnsigned = 1 << iota
+)
+
+// A poset edge. The zero value is the null/empty edge.
+// Packs target node index (31 bits) and strict flag (1 bit).
+type posetEdge uint32
+
+func newedge(t uint32, strict bool) posetEdge {
+ s := uint32(0)
+ if strict {
+ s = 1
+ }
+ return posetEdge(t<<1 | s)
+}
+func (e posetEdge) Target() uint32 { return uint32(e) >> 1 }
+func (e posetEdge) Strict() bool { return uint32(e)&1 != 0 }
+func (e posetEdge) String() string {
+ s := fmt.Sprint(e.Target())
+ if e.Strict() {
+ s += "*"
+ }
+ return s
+}
+
+// posetNode is a node of a DAG within the poset.
+type posetNode struct {
+ l, r posetEdge
+}
+
+// poset is a union-find data structure that can represent a partially ordered set
+// of SSA values. Given a binary relation that creates a partial order (eg: '<'),
+// clients can record relations between SSA values using SetOrder, and later
+// check relations (in the transitive closure) with Ordered. For instance,
+// if SetOrder is called to record that A<B and B<C, Ordered will later confirm
+// that A<C.
+//
+// It is possible to record equality relations between SSA values with SetEqual and check
+// equality with Equal. Equality propagates into the transitive closure for the partial
+// order so that if we know that A<B<C and later learn that A==D, Ordered will return
+// true for D<C.
+//
+// It is also possible to record inequality relations between nodes with SetNonEqual;
+// non-equality relations are not transitive, but they can still be useful: for instance
+// if we know that A<=B and later we learn that A!=B, we can deduce that A<B.
+// NonEqual can be used to check whether it is known that the nodes are different, either
+// because SetNonEqual was called before, or because we know that they are strictly ordered.
+//
+// poset will refuse to record new relations that contradict existing relations:
+// for instance if A<B<C, calling SetOrder for C<A will fail returning false; also
+// calling SetEqual for C==A will fail.
+//
+// poset is implemented as a forest of DAGs; in each DAG, if there is a path (directed)
+// from node A to B, it means that A<B (or A<=B). Equality is represented by mapping
+// two SSA values to the same DAG node; when a new equality relation is recorded
+// between two existing nodes,the nodes are merged, adjusting incoming and outgoing edges.
+//
+// Constants are specially treated. When a constant is added to the poset, it is
+// immediately linked to other constants already present; so for instance if the
+// poset knows that x<=3, and then x is tested against 5, 5 is first added and linked
+// 3 (using 3<5), so that the poset knows that x<=3<5; at that point, it is able
+// to answer x<5 correctly. This means that all constants are always within the same
+// DAG; as an implementation detail, we enfoce that the DAG containtining the constants
+// is always the first in the forest.
+//
+// poset is designed to be memory efficient and do little allocations during normal usage.
+// Most internal data structures are pre-allocated and flat, so for instance adding a
+// new relation does not cause any allocation. For performance reasons,
+// each node has only up to two outgoing edges (like a binary tree), so intermediate
+// "dummy" nodes are required to represent more than two relations. For instance,
+// to record that A<I, A<J, A<K (with no known relation between I,J,K), we create the
+// following DAG:
+//
+// A
+// / \
+// I dummy
+// / \
+// J K
+//
+type poset struct {
+ lastidx uint32 // last generated dense index
+ flags uint8 // internal flags
+ values map[ID]uint32 // map SSA values to dense indexes
+ constants map[int64]uint32 // record SSA constants together with their value
+ nodes []posetNode // nodes (in all DAGs)
+ roots []uint32 // list of root nodes (forest)
+ noneq map[uint32]bitset // non-equal relations
+ undo []posetUndo // undo chain
+}
+
+func newPoset() *poset {
+ return &poset{
+ values: make(map[ID]uint32),
+ constants: make(map[int64]uint32, 8),
+ nodes: make([]posetNode, 1, 16),
+ roots: make([]uint32, 0, 4),
+ noneq: make(map[uint32]bitset),
+ undo: make([]posetUndo, 0, 4),
+ }
+}
+
+func (po *poset) SetUnsigned(uns bool) {
+ if uns {
+ po.flags |= posetFlagUnsigned
+ } else {
+ po.flags &^= posetFlagUnsigned
+ }
+}
+
+// Handle children
+func (po *poset) setchl(i uint32, l posetEdge) { po.nodes[i].l = l }
+func (po *poset) setchr(i uint32, r posetEdge) { po.nodes[i].r = r }
+func (po *poset) chl(i uint32) uint32 { return po.nodes[i].l.Target() }
+func (po *poset) chr(i uint32) uint32 { return po.nodes[i].r.Target() }
+func (po *poset) children(i uint32) (posetEdge, posetEdge) {
+ return po.nodes[i].l, po.nodes[i].r
+}
+
+// upush records a new undo step. It can be used for simple
+// undo passes that record up to one index and one edge.
+func (po *poset) upush(typ undoType, p uint32, e posetEdge) {
+ po.undo = append(po.undo, posetUndo{typ: typ, idx: p, edge: e})
+}
+
+// upushnew pushes an undo pass for a new node
+func (po *poset) upushnew(id ID, idx uint32) {
+ po.undo = append(po.undo, posetUndo{typ: undoNewNode, ID: id, idx: idx})
+}
+
+// upushneq pushes a new undo pass for a nonequal relation
+func (po *poset) upushneq(idx1 uint32, idx2 uint32) {
+ po.undo = append(po.undo, posetUndo{typ: undoNonEqual, ID: ID(idx1), idx: idx2})
+}
+
+// upushalias pushes a new undo pass for aliasing two nodes
+func (po *poset) upushalias(id ID, i2 uint32) {
+ po.undo = append(po.undo, posetUndo{typ: undoAliasNode, ID: id, idx: i2})
+}
+
+// upushconst pushes a new undo pass for a new constant
+func (po *poset) upushconst(idx uint32, old uint32) {
+ po.undo = append(po.undo, posetUndo{typ: undoNewConstant, idx: idx, ID: ID(old)})
+}
+
+// addchild adds i2 as direct child of i1.
+func (po *poset) addchild(i1, i2 uint32, strict bool) {
+ i1l, i1r := po.children(i1)
+ e2 := newedge(i2, strict)
+
+ if i1l == 0 {
+ po.setchl(i1, e2)
+ po.upush(undoSetChl, i1, 0)
+ } else if i1r == 0 {
+ po.setchr(i1, e2)
+ po.upush(undoSetChr, i1, 0)
+ } else {
+ // If n1 already has two children, add an intermediate dummy
+ // node to record the relation correctly (without relating
+ // n2 to other existing nodes). Use a non-deterministic value
+ // to decide whether to append on the left or the right, to avoid
+ // creating degenerated chains.
+ //
+ // n1
+ // / \
+ // i1l dummy
+ // / \
+ // i1r n2
+ //
+ dummy := po.newnode(nil)
+ if (i1^i2)&1 != 0 { // non-deterministic
+ po.setchl(dummy, i1r)
+ po.setchr(dummy, e2)
+ po.setchr(i1, newedge(dummy, false))
+ po.upush(undoSetChr, i1, i1r)
+ } else {
+ po.setchl(dummy, i1l)
+ po.setchr(dummy, e2)
+ po.setchl(i1, newedge(dummy, false))
+ po.upush(undoSetChl, i1, i1l)
+ }
+ }
+}
+
+// newnode allocates a new node bound to SSA value n.
+// If n is nil, this is a dummy node (= only used internally).
+func (po *poset) newnode(n *Value) uint32 {
+ i := po.lastidx + 1
+ po.lastidx++
+ po.nodes = append(po.nodes, posetNode{})
+ if n != nil {
+ if po.values[n.ID] != 0 {
+ panic("newnode for Value already inserted")
+ }
+ po.values[n.ID] = i
+ po.upushnew(n.ID, i)
+ } else {
+ po.upushnew(0, i)
+ }
+ return i
+}
+
+// lookup searches for a SSA value into the forest of DAGS, and return its node.
+// Constants are materialized on the fly during lookup.
+func (po *poset) lookup(n *Value) (uint32, bool) {
+ i, f := po.values[n.ID]
+ if !f && n.isGenericIntConst() {
+ po.newconst(n)
+ i, f = po.values[n.ID]
+ }
+ return i, f
+}
+
+// newconst creates a node for a constant. It links it to other constants, so
+// that n<=5 is detected true when n<=3 is known to be true.
+// TODO: this is O(N), fix it.
+func (po *poset) newconst(n *Value) {
+ if !n.isGenericIntConst() {
+ panic("newconst on non-constant")
+ }
+
+ // If the same constant is already present in the poset through a different
+ // Value, just alias to it without allocating a new node.
+ val := n.AuxInt
+ if po.flags&posetFlagUnsigned != 0 {
+ val = int64(n.AuxUnsigned())
+ }
+ if c, found := po.constants[val]; found {
+ po.values[n.ID] = c
+ po.upushalias(n.ID, 0)
+ return
+ }
+
+ // Create the new node for this constant
+ i := po.newnode(n)
+
+ // If this is the first constant, put it as a new root, as
+ // we can't record an existing connection so we don't have
+ // a specific DAG to add it to. Notice that we want all
+ // constants to be in root #0, so make sure the new root
+ // goes there.
+ if len(po.constants) == 0 {
+ idx := len(po.roots)
+ po.roots = append(po.roots, i)
+ po.roots[0], po.roots[idx] = po.roots[idx], po.roots[0]
+ po.upush(undoNewRoot, i, 0)
+ po.constants[val] = i
+ po.upushconst(i, 0)
+ return
+ }
+
+ // Find the lower and upper bound among existing constants. That is,
+ // find the higher constant that is lower than the one that we're adding,
+ // and the lower constant that is higher.
+ // The loop is duplicated to handle signed and unsigned comparison,
+ // depending on how the poset was configured.
+ var lowerptr, higherptr uint32
+
+ if po.flags&posetFlagUnsigned != 0 {
+ var lower, higher uint64
+ val1 := n.AuxUnsigned()
+ for val2, ptr := range po.constants {
+ val2 := uint64(val2)
+ if val1 == val2 {
+ panic("unreachable")
+ }
+ if val2 < val1 && (lowerptr == 0 || val2 > lower) {
+ lower = val2
+ lowerptr = ptr
+ } else if val2 > val1 && (higherptr == 0 || val2 < higher) {
+ higher = val2
+ higherptr = ptr
+ }
+ }
+ } else {
+ var lower, higher int64
+ val1 := n.AuxInt
+ for val2, ptr := range po.constants {
+ if val1 == val2 {
+ panic("unreachable")
+ }
+ if val2 < val1 && (lowerptr == 0 || val2 > lower) {
+ lower = val2
+ lowerptr = ptr
+ } else if val2 > val1 && (higherptr == 0 || val2 < higher) {
+ higher = val2
+ higherptr = ptr
+ }
+ }
+ }
+
+ if lowerptr == 0 && higherptr == 0 {
+ // This should not happen, as at least one
+ // other constant must exist if we get here.
+ panic("no constant found")
+ }
+
+ // Create the new node and connect it to the bounds, so that
+ // lower < n < higher. We could have found both bounds or only one
+ // of them, depending on what other constants are present in the poset.
+ // Notice that we always link constants together, so they
+ // are always part of the same DAG.
+ switch {
+ case lowerptr != 0 && higherptr != 0:
+ // Both bounds are present, record lower < n < higher.
+ po.addchild(lowerptr, i, true)
+ po.addchild(i, higherptr, true)
+
+ case lowerptr != 0:
+ // Lower bound only, record lower < n.
+ po.addchild(lowerptr, i, true)
+
+ case higherptr != 0:
+ // Higher bound only. To record n < higher, we need
+ // a dummy root:
+ //
+ // dummy
+ // / \
+ // root \
+ // / n
+ // .... /
+ // \ /
+ // higher
+ //
+ i2 := higherptr
+ r2 := po.findroot(i2)
+ if r2 != po.roots[0] { // all constants should be in root #0
+ panic("constant not in root #0")
+ }
+ dummy := po.newnode(nil)
+ po.changeroot(r2, dummy)
+ po.upush(undoChangeRoot, dummy, newedge(r2, false))
+ po.addchild(dummy, r2, false)
+ po.addchild(dummy, i, false)
+ po.addchild(i, i2, true)
+ }
+
+ po.constants[val] = i
+ po.upushconst(i, 0)
+}
+
+// aliasnewnode records that a single node n2 (not in the poset yet) is an alias
+// of the master node n1.
+func (po *poset) aliasnewnode(n1, n2 *Value) {
+ i1, i2 := po.values[n1.ID], po.values[n2.ID]
+ if i1 == 0 || i2 != 0 {
+ panic("aliasnewnode invalid arguments")
+ }
+
+ po.values[n2.ID] = i1
+ po.upushalias(n2.ID, 0)
+}
+
+// aliasnodes records that all the nodes i2s are aliases of a single master node n1.
+// aliasnodes takes care of rearranging the DAG, changing references of parent/children
+// of nodes in i2s, so that they point to n1 instead.
+// Complexity is O(n) (with n being the total number of nodes in the poset, not just
+// the number of nodes being aliased).
+func (po *poset) aliasnodes(n1 *Value, i2s bitset) {
+ i1 := po.values[n1.ID]
+ if i1 == 0 {
+ panic("aliasnode for non-existing node")
+ }
+ if i2s.Test(i1) {
+ panic("aliasnode i2s contains n1 node")
+ }
+
+ // Go through all the nodes to adjust parent/chidlren of nodes in i2s
+ for idx, n := range po.nodes {
+ // Do not touch i1 itself, otherwise we can create useless self-loops
+ if uint32(idx) == i1 {
+ continue
+ }
+ l, r := n.l, n.r
+
+ // Rename all references to i2s into i1
+ if i2s.Test(l.Target()) {
+ po.setchl(uint32(idx), newedge(i1, l.Strict()))
+ po.upush(undoSetChl, uint32(idx), l)
+ }
+ if i2s.Test(r.Target()) {
+ po.setchr(uint32(idx), newedge(i1, r.Strict()))
+ po.upush(undoSetChr, uint32(idx), r)
+ }
+
+ // Connect all chidren of i2s to i1 (unless those children
+ // are in i2s as well, in which case it would be useless)
+ if i2s.Test(uint32(idx)) {
+ if l != 0 && !i2s.Test(l.Target()) {
+ po.addchild(i1, l.Target(), l.Strict())
+ }
+ if r != 0 && !i2s.Test(r.Target()) {
+ po.addchild(i1, r.Target(), r.Strict())
+ }
+ po.setchl(uint32(idx), 0)
+ po.setchr(uint32(idx), 0)
+ po.upush(undoSetChl, uint32(idx), l)
+ po.upush(undoSetChr, uint32(idx), r)
+ }
+ }
+
+ // Reassign all existing IDs that point to i2 to i1.
+ // This includes n2.ID.
+ for k, v := range po.values {
+ if i2s.Test(v) {
+ po.values[k] = i1
+ po.upushalias(k, v)
+ }
+ }
+
+ // If one of the aliased nodes is a constant, then make sure
+ // po.constants is updated to point to the master node.
+ for val, idx := range po.constants {
+ if i2s.Test(idx) {
+ po.constants[val] = i1
+ po.upushconst(i1, idx)
+ }
+ }
+}
+
+func (po *poset) isroot(r uint32) bool {
+ for i := range po.roots {
+ if po.roots[i] == r {
+ return true
+ }
+ }
+ return false
+}
+
+func (po *poset) changeroot(oldr, newr uint32) {
+ for i := range po.roots {
+ if po.roots[i] == oldr {
+ po.roots[i] = newr
+ return
+ }
+ }
+ panic("changeroot on non-root")
+}
+
+func (po *poset) removeroot(r uint32) {
+ for i := range po.roots {
+ if po.roots[i] == r {
+ po.roots = append(po.roots[:i], po.roots[i+1:]...)
+ return
+ }
+ }
+ panic("removeroot on non-root")
+}
+
+// dfs performs a depth-first search within the DAG whose root is r.
+// f is the visit function called for each node; if it returns true,
+// the search is aborted and true is returned. The root node is
+// visited too.
+// If strict, ignore edges across a path until at least one
+// strict edge is found. For instance, for a chain A<=B<=C<D<=E<F,
+// a strict walk visits D,E,F.
+// If the visit ends, false is returned.
+func (po *poset) dfs(r uint32, strict bool, f func(i uint32) bool) bool {
+ closed := newBitset(int(po.lastidx + 1))
+ open := make([]uint32, 1, 64)
+ open[0] = r
+
+ if strict {
+ // Do a first DFS; walk all paths and stop when we find a strict
+ // edge, building a "next" list of nodes reachable through strict
+ // edges. This will be the bootstrap open list for the real DFS.
+ next := make([]uint32, 0, 64)
+
+ for len(open) > 0 {
+ i := open[len(open)-1]
+ open = open[:len(open)-1]
+
+ // Don't visit the same node twice. Notice that all nodes
+ // across non-strict paths are still visited at least once, so
+ // a non-strict path can never obscure a strict path to the
+ // same node.
+ if !closed.Test(i) {
+ closed.Set(i)
+
+ l, r := po.children(i)
+ if l != 0 {
+ if l.Strict() {
+ next = append(next, l.Target())
+ } else {
+ open = append(open, l.Target())
+ }
+ }
+ if r != 0 {
+ if r.Strict() {
+ next = append(next, r.Target())
+ } else {
+ open = append(open, r.Target())
+ }
+ }
+ }
+ }
+ open = next
+ closed.Reset()
+ }
+
+ for len(open) > 0 {
+ i := open[len(open)-1]
+ open = open[:len(open)-1]
+
+ if !closed.Test(i) {
+ if f(i) {
+ return true
+ }
+ closed.Set(i)
+ l, r := po.children(i)
+ if l != 0 {
+ open = append(open, l.Target())
+ }
+ if r != 0 {
+ open = append(open, r.Target())
+ }
+ }
+ }
+ return false
+}
+
+// Returns true if there is a path from i1 to i2.
+// If strict == true: if the function returns true, then i1 < i2.
+// If strict == false: if the function returns true, then i1 <= i2.
+// If the function returns false, no relation is known.
+func (po *poset) reaches(i1, i2 uint32, strict bool) bool {
+ return po.dfs(i1, strict, func(n uint32) bool {
+ return n == i2
+ })
+}
+
+// findroot finds i's root, that is which DAG contains i.
+// Returns the root; if i is itself a root, it is returned.
+// Panic if i is not in any DAG.
+func (po *poset) findroot(i uint32) uint32 {
+ // TODO(rasky): if needed, a way to speed up this search is
+ // storing a bitset for each root using it as a mini bloom filter
+ // of nodes present under that root.
+ for _, r := range po.roots {
+ if po.reaches(r, i, false) {
+ return r
+ }
+ }
+ panic("findroot didn't find any root")
+}
+
+// mergeroot merges two DAGs into one DAG by creating a new dummy root
+func (po *poset) mergeroot(r1, r2 uint32) uint32 {
+ // Root #0 is special as it contains all constants. Since mergeroot
+ // discards r2 as root and keeps r1, make sure that r2 is not root #0,
+ // otherwise constants would move to a different root.
+ if r2 == po.roots[0] {
+ r1, r2 = r2, r1
+ }
+ r := po.newnode(nil)
+ po.setchl(r, newedge(r1, false))
+ po.setchr(r, newedge(r2, false))
+ po.changeroot(r1, r)
+ po.removeroot(r2)
+ po.upush(undoMergeRoot, r, 0)
+ return r
+}
+
+// collapsepath marks n1 and n2 as equal and collapses as equal all
+// nodes across all paths between n1 and n2. If a strict edge is
+// found, the function does not modify the DAG and returns false.
+// Complexity is O(n).
+func (po *poset) collapsepath(n1, n2 *Value) bool {
+ i1, i2 := po.values[n1.ID], po.values[n2.ID]
+ if po.reaches(i1, i2, true) {
+ return false
+ }
+
+ // Find all the paths from i1 to i2
+ paths := po.findpaths(i1, i2)
+ // Mark all nodes in all the paths as aliases of n1
+ // (excluding n1 itself)
+ paths.Clear(i1)
+ po.aliasnodes(n1, paths)
+ return true
+}
+
+// findpaths is a recursive function that calculates all paths from cur to dst
+// and return them as a bitset (the index of a node is set in the bitset if
+// that node is on at least one path from cur to dst).
+// We do a DFS from cur (stopping going deep any time we reach dst, if ever),
+// and mark as part of the paths any node that has a children which is already
+// part of the path (or is dst itself).
+func (po *poset) findpaths(cur, dst uint32) bitset {
+ seen := newBitset(int(po.lastidx + 1))
+ path := newBitset(int(po.lastidx + 1))
+ path.Set(dst)
+ po.findpaths1(cur, dst, seen, path)
+ return path
+}
+
+func (po *poset) findpaths1(cur, dst uint32, seen bitset, path bitset) {
+ if cur == dst {
+ return
+ }
+ seen.Set(cur)
+ l, r := po.chl(cur), po.chr(cur)
+ if !seen.Test(l) {
+ po.findpaths1(l, dst, seen, path)
+ }
+ if !seen.Test(r) {
+ po.findpaths1(r, dst, seen, path)
+ }
+ if path.Test(l) || path.Test(r) {
+ path.Set(cur)
+ }
+}
+
+// Check whether it is recorded that i1!=i2
+func (po *poset) isnoneq(i1, i2 uint32) bool {
+ if i1 == i2 {
+ return false
+ }
+ if i1 < i2 {
+ i1, i2 = i2, i1
+ }
+
+ // Check if we recorded a non-equal relation before
+ if bs, ok := po.noneq[i1]; ok && bs.Test(i2) {
+ return true
+ }
+ return false
+}
+
+// Record that i1!=i2
+func (po *poset) setnoneq(n1, n2 *Value) {
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+
+ // If any of the nodes do not exist in the poset, allocate them. Since
+ // we don't know any relation (in the partial order) about them, they must
+ // become independent roots.
+ if !f1 {
+ i1 = po.newnode(n1)
+ po.roots = append(po.roots, i1)
+ po.upush(undoNewRoot, i1, 0)
+ }
+ if !f2 {
+ i2 = po.newnode(n2)
+ po.roots = append(po.roots, i2)
+ po.upush(undoNewRoot, i2, 0)
+ }
+
+ if i1 == i2 {
+ panic("setnoneq on same node")
+ }
+ if i1 < i2 {
+ i1, i2 = i2, i1
+ }
+ bs := po.noneq[i1]
+ if bs == nil {
+ // Given that we record non-equality relations using the
+ // higher index as a key, the bitsize will never change size.
+ // TODO(rasky): if memory is a problem, consider allocating
+ // a small bitset and lazily grow it when higher indices arrive.
+ bs = newBitset(int(i1))
+ po.noneq[i1] = bs
+ } else if bs.Test(i2) {
+ // Already recorded
+ return
+ }
+ bs.Set(i2)
+ po.upushneq(i1, i2)
+}
+
+// CheckIntegrity verifies internal integrity of a poset. It is intended
+// for debugging purposes.
+func (po *poset) CheckIntegrity() {
+ // Record which index is a constant
+ constants := newBitset(int(po.lastidx + 1))
+ for _, c := range po.constants {
+ constants.Set(c)
+ }
+
+ // Verify that each node appears in a single DAG, and that
+ // all constants are within the first DAG
+ seen := newBitset(int(po.lastidx + 1))
+ for ridx, r := range po.roots {
+ if r == 0 {
+ panic("empty root")
+ }
+
+ po.dfs(r, false, func(i uint32) bool {
+ if seen.Test(i) {
+ panic("duplicate node")
+ }
+ seen.Set(i)
+ if constants.Test(i) {
+ if ridx != 0 {
+ panic("constants not in the first DAG")
+ }
+ }
+ return false
+ })
+ }
+
+ // Verify that values contain the minimum set
+ for id, idx := range po.values {
+ if !seen.Test(idx) {
+ panic(fmt.Errorf("spurious value [%d]=%d", id, idx))
+ }
+ }
+
+ // Verify that only existing nodes have non-zero children
+ for i, n := range po.nodes {
+ if n.l|n.r != 0 {
+ if !seen.Test(uint32(i)) {
+ panic(fmt.Errorf("children of unknown node %d->%v", i, n))
+ }
+ if n.l.Target() == uint32(i) || n.r.Target() == uint32(i) {
+ panic(fmt.Errorf("self-loop on node %d", i))
+ }
+ }
+ }
+}
+
+// CheckEmpty checks that a poset is completely empty.
+// It can be used for debugging purposes, as a poset is supposed to
+// be empty after it's fully rolled back through Undo.
+func (po *poset) CheckEmpty() error {
+ if len(po.nodes) != 1 {
+ return fmt.Errorf("non-empty nodes list: %v", po.nodes)
+ }
+ if len(po.values) != 0 {
+ return fmt.Errorf("non-empty value map: %v", po.values)
+ }
+ if len(po.roots) != 0 {
+ return fmt.Errorf("non-empty root list: %v", po.roots)
+ }
+ if len(po.constants) != 0 {
+ return fmt.Errorf("non-empty constants: %v", po.constants)
+ }
+ if len(po.undo) != 0 {
+ return fmt.Errorf("non-empty undo list: %v", po.undo)
+ }
+ if po.lastidx != 0 {
+ return fmt.Errorf("lastidx index is not zero: %v", po.lastidx)
+ }
+ for _, bs := range po.noneq {
+ for _, x := range bs {
+ if x != 0 {
+ return fmt.Errorf("non-empty noneq map")
+ }
+ }
+ }
+ return nil
+}
+
+// DotDump dumps the poset in graphviz format to file fn, with the specified title.
+func (po *poset) DotDump(fn string, title string) error {
+ f, err := os.Create(fn)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Create reverse index mapping (taking aliases into account)
+ names := make(map[uint32]string)
+ for id, i := range po.values {
+ s := names[i]
+ if s == "" {
+ s = fmt.Sprintf("v%d", id)
+ } else {
+ s += fmt.Sprintf(", v%d", id)
+ }
+ names[i] = s
+ }
+
+ // Create reverse constant mapping
+ consts := make(map[uint32]int64)
+ for val, idx := range po.constants {
+ consts[idx] = val
+ }
+
+ fmt.Fprintf(f, "digraph poset {\n")
+ fmt.Fprintf(f, "\tedge [ fontsize=10 ]\n")
+ for ridx, r := range po.roots {
+ fmt.Fprintf(f, "\tsubgraph root%d {\n", ridx)
+ po.dfs(r, false, func(i uint32) bool {
+ if val, ok := consts[i]; ok {
+ // Constant
+ var vals string
+ if po.flags&posetFlagUnsigned != 0 {
+ vals = fmt.Sprint(uint64(val))
+ } else {
+ vals = fmt.Sprint(int64(val))
+ }
+ fmt.Fprintf(f, "\t\tnode%d [shape=box style=filled fillcolor=cadetblue1 label=<%s <font point-size=\"6\">%s [%d]</font>>]\n",
+ i, vals, names[i], i)
+ } else {
+ // Normal SSA value
+ fmt.Fprintf(f, "\t\tnode%d [label=<%s <font point-size=\"6\">[%d]</font>>]\n", i, names[i], i)
+ }
+ chl, chr := po.children(i)
+ for _, ch := range []posetEdge{chl, chr} {
+ if ch != 0 {
+ if ch.Strict() {
+ fmt.Fprintf(f, "\t\tnode%d -> node%d [label=\" <\" color=\"red\"]\n", i, ch.Target())
+ } else {
+ fmt.Fprintf(f, "\t\tnode%d -> node%d [label=\" <=\" color=\"green\"]\n", i, ch.Target())
+ }
+ }
+ }
+ return false
+ })
+ fmt.Fprintf(f, "\t}\n")
+ }
+ fmt.Fprintf(f, "\tlabelloc=\"t\"\n")
+ fmt.Fprintf(f, "\tlabeldistance=\"3.0\"\n")
+ fmt.Fprintf(f, "\tlabel=%q\n", title)
+ fmt.Fprintf(f, "}\n")
+ return nil
+}
+
+// Ordered reports whether n1<n2. It returns false either when it is
+// certain that n1<n2 is false, or if there is not enough information
+// to tell.
+// Complexity is O(n).
+func (po *poset) Ordered(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call Ordered with n1==n2")
+ }
+
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+ if !f1 || !f2 {
+ return false
+ }
+
+ return i1 != i2 && po.reaches(i1, i2, true)
+}
+
+// Ordered reports whether n1<=n2. It returns false either when it is
+// certain that n1<=n2 is false, or if there is not enough information
+// to tell.
+// Complexity is O(n).
+func (po *poset) OrderedOrEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call Ordered with n1==n2")
+ }
+
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+ if !f1 || !f2 {
+ return false
+ }
+
+ return i1 == i2 || po.reaches(i1, i2, false)
+}
+
+// Equal reports whether n1==n2. It returns false either when it is
+// certain that n1==n2 is false, or if there is not enough information
+// to tell.
+// Complexity is O(1).
+func (po *poset) Equal(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call Equal with n1==n2")
+ }
+
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+ return f1 && f2 && i1 == i2
+}
+
+// NonEqual reports whether n1!=n2. It returns false either when it is
+// certain that n1!=n2 is false, or if there is not enough information
+// to tell.
+// Complexity is O(n) (because it internally calls Ordered to see if we
+// can infer n1!=n2 from n1<n2 or n2<n1).
+func (po *poset) NonEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call NonEqual with n1==n2")
+ }
+
+ // If we never saw the nodes before, we don't
+ // have a recorded non-equality.
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+ if !f1 || !f2 {
+ return false
+ }
+
+ // Check if we recored inequality
+ if po.isnoneq(i1, i2) {
+ return true
+ }
+
+ // Check if n1<n2 or n2<n1, in which case we can infer that n1!=n2
+ if po.Ordered(n1, n2) || po.Ordered(n2, n1) {
+ return true
+ }
+
+ return false
+}
+
+// setOrder records that n1<n2 or n1<=n2 (depending on strict). Returns false
+// if this is a contradiction.
+// Implements SetOrder() and SetOrderOrEqual()
+func (po *poset) setOrder(n1, n2 *Value, strict bool) bool {
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+
+ switch {
+ case !f1 && !f2:
+ // Neither n1 nor n2 are in the poset, so they are not related
+ // in any way to existing nodes.
+ // Create a new DAG to record the relation.
+ i1, i2 = po.newnode(n1), po.newnode(n2)
+ po.roots = append(po.roots, i1)
+ po.upush(undoNewRoot, i1, 0)
+ po.addchild(i1, i2, strict)
+
+ case f1 && !f2:
+ // n1 is in one of the DAGs, while n2 is not. Add n2 as children
+ // of n1.
+ i2 = po.newnode(n2)
+ po.addchild(i1, i2, strict)
+
+ case !f1 && f2:
+ // n1 is not in any DAG but n2 is. If n2 is a root, we can put
+ // n1 in its place as a root; otherwise, we need to create a new
+ // dummy root to record the relation.
+ i1 = po.newnode(n1)
+
+ if po.isroot(i2) {
+ po.changeroot(i2, i1)
+ po.upush(undoChangeRoot, i1, newedge(i2, strict))
+ po.addchild(i1, i2, strict)
+ return true
+ }
+
+ // Search for i2's root; this requires a O(n) search on all
+ // DAGs
+ r := po.findroot(i2)
+
+ // Re-parent as follows:
+ //
+ // dummy
+ // r / \
+ // \ ===> r i1
+ // i2 \ /
+ // i2
+ //
+ dummy := po.newnode(nil)
+ po.changeroot(r, dummy)
+ po.upush(undoChangeRoot, dummy, newedge(r, false))
+ po.addchild(dummy, r, false)
+ po.addchild(dummy, i1, false)
+ po.addchild(i1, i2, strict)
+
+ case f1 && f2:
+ // If the nodes are aliased, fail only if we're setting a strict order
+ // (that is, we cannot set n1<n2 if n1==n2).
+ if i1 == i2 {
+ return !strict
+ }
+
+ // If we are trying to record n1<=n2 but we learned that n1!=n2,
+ // record n1<n2, as it provides more information.
+ if !strict && po.isnoneq(i1, i2) {
+ strict = true
+ }
+
+ // Both n1 and n2 are in the poset. This is the complex part of the algorithm
+ // as we need to find many different cases and DAG shapes.
+
+ // Check if n1 somehow reaches n2
+ if po.reaches(i1, i2, false) {
+ // This is the table of all cases we need to handle:
+ //
+ // DAG New Action
+ // ---------------------------------------------------
+ // #1: N1<=X<=N2 | N1<=N2 | do nothing
+ // #2: N1<=X<=N2 | N1<N2 | add strict edge (N1<N2)
+ // #3: N1<X<N2 | N1<=N2 | do nothing (we already know more)
+ // #4: N1<X<N2 | N1<N2 | do nothing
+
+ // Check if we're in case #2
+ if strict && !po.reaches(i1, i2, true) {
+ po.addchild(i1, i2, true)
+ return true
+ }
+
+ // Case #1, #3 o #4: nothing to do
+ return true
+ }
+
+ // Check if n2 somehow reaches n1
+ if po.reaches(i2, i1, false) {
+ // This is the table of all cases we need to handle:
+ //
+ // DAG New Action
+ // ---------------------------------------------------
+ // #5: N2<=X<=N1 | N1<=N2 | collapse path (learn that N1=X=N2)
+ // #6: N2<=X<=N1 | N1<N2 | contradiction
+ // #7: N2<X<N1 | N1<=N2 | contradiction in the path
+ // #8: N2<X<N1 | N1<N2 | contradiction
+
+ if strict {
+ // Cases #6 and #8: contradiction
+ return false
+ }
+
+ // We're in case #5 or #7. Try to collapse path, and that will
+ // fail if it realizes that we are in case #7.
+ return po.collapsepath(n2, n1)
+ }
+
+ // We don't know of any existing relation between n1 and n2. They could
+ // be part of the same DAG or not.
+ // Find their roots to check whether they are in the same DAG.
+ r1, r2 := po.findroot(i1), po.findroot(i2)
+ if r1 != r2 {
+ // We need to merge the two DAGs to record a relation between the nodes
+ po.mergeroot(r1, r2)
+ }
+
+ // Connect n1 and n2
+ po.addchild(i1, i2, strict)
+ }
+
+ return true
+}
+
+// SetOrder records that n1<n2. Returns false if this is a contradiction
+// Complexity is O(1) if n2 was never seen before, or O(n) otherwise.
+func (po *poset) SetOrder(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call SetOrder with n1==n2")
+ }
+ return po.setOrder(n1, n2, true)
+}
+
+// SetOrderOrEqual records that n1<=n2. Returns false if this is a contradiction
+// Complexity is O(1) if n2 was never seen before, or O(n) otherwise.
+func (po *poset) SetOrderOrEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call SetOrder with n1==n2")
+ }
+ return po.setOrder(n1, n2, false)
+}
+
+// SetEqual records that n1==n2. Returns false if this is a contradiction
+// (that is, if it is already recorded that n1<n2 or n2<n1).
+// Complexity is O(1) if n2 was never seen before, or O(n) otherwise.
+func (po *poset) SetEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call Add with n1==n2")
+ }
+
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+
+ switch {
+ case !f1 && !f2:
+ i1 = po.newnode(n1)
+ po.roots = append(po.roots, i1)
+ po.upush(undoNewRoot, i1, 0)
+ po.aliasnewnode(n1, n2)
+ case f1 && !f2:
+ po.aliasnewnode(n1, n2)
+ case !f1 && f2:
+ po.aliasnewnode(n2, n1)
+ case f1 && f2:
+ if i1 == i2 {
+ // Already aliased, ignore
+ return true
+ }
+
+ // If we recorded that n1!=n2, this is a contradiction.
+ if po.isnoneq(i1, i2) {
+ return false
+ }
+
+ // If we already knew that n1<=n2, we can collapse the path to
+ // record n1==n2 (and viceversa).
+ if po.reaches(i1, i2, false) {
+ return po.collapsepath(n1, n2)
+ }
+ if po.reaches(i2, i1, false) {
+ return po.collapsepath(n2, n1)
+ }
+
+ r1 := po.findroot(i1)
+ r2 := po.findroot(i2)
+ if r1 != r2 {
+ // Merge the two DAGs so we can record relations between the nodes
+ po.mergeroot(r1, r2)
+ }
+
+ // Set n2 as alias of n1. This will also update all the references
+ // to n2 to become references to n1
+ i2s := newBitset(int(po.lastidx) + 1)
+ i2s.Set(i2)
+ po.aliasnodes(n1, i2s)
+ }
+ return true
+}
+
+// SetNonEqual records that n1!=n2. Returns false if this is a contradiction
+// (that is, if it is already recorded that n1==n2).
+// Complexity is O(n).
+func (po *poset) SetNonEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call SetNonEqual with n1==n2")
+ }
+
+ // Check whether the nodes are already in the poset
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+
+ // If either node wasn't present, we just record the new relation
+ // and exit.
+ if !f1 || !f2 {
+ po.setnoneq(n1, n2)
+ return true
+ }
+
+ // See if we already know this, in which case there's nothing to do.
+ if po.isnoneq(i1, i2) {
+ return true
+ }
+
+ // Check if we're contradicting an existing equality relation
+ if po.Equal(n1, n2) {
+ return false
+ }
+
+ // Record non-equality
+ po.setnoneq(n1, n2)
+
+ // If we know that i1<=i2 but not i1<i2, learn that as we
+ // now know that they are not equal. Do the same for i2<=i1.
+ // Do this check only if both nodes were already in the DAG,
+ // otherwise there cannot be an existing relation.
+ if po.reaches(i1, i2, false) && !po.reaches(i1, i2, true) {
+ po.addchild(i1, i2, true)
+ }
+ if po.reaches(i2, i1, false) && !po.reaches(i2, i1, true) {
+ po.addchild(i2, i1, true)
+ }
+
+ return true
+}
+
+// Checkpoint saves the current state of the DAG so that it's possible
+// to later undo this state.
+// Complexity is O(1).
+func (po *poset) Checkpoint() {
+ po.undo = append(po.undo, posetUndo{typ: undoCheckpoint})
+}
+
+// Undo restores the state of the poset to the previous checkpoint.
+// Complexity depends on the type of operations that were performed
+// since the last checkpoint; each Set* operation creates an undo
+// pass which Undo has to revert with a worst-case complexity of O(n).
+func (po *poset) Undo() {
+ if len(po.undo) == 0 {
+ panic("empty undo stack")
+ }
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+
+ for len(po.undo) > 0 {
+ pass := po.undo[len(po.undo)-1]
+ po.undo = po.undo[:len(po.undo)-1]
+
+ switch pass.typ {
+ case undoCheckpoint:
+ return
+
+ case undoSetChl:
+ po.setchl(pass.idx, pass.edge)
+
+ case undoSetChr:
+ po.setchr(pass.idx, pass.edge)
+
+ case undoNonEqual:
+ po.noneq[uint32(pass.ID)].Clear(pass.idx)
+
+ case undoNewNode:
+ if pass.idx != po.lastidx {
+ panic("invalid newnode index")
+ }
+ if pass.ID != 0 {
+ if po.values[pass.ID] != pass.idx {
+ panic("invalid newnode undo pass")
+ }
+ delete(po.values, pass.ID)
+ }
+ po.setchl(pass.idx, 0)
+ po.setchr(pass.idx, 0)
+ po.nodes = po.nodes[:pass.idx]
+ po.lastidx--
+
+ case undoNewConstant:
+ // FIXME: remove this O(n) loop
+ var val int64
+ var i uint32
+ for val, i = range po.constants {
+ if i == pass.idx {
+ break
+ }
+ }
+ if i != pass.idx {
+ panic("constant not found in undo pass")
+ }
+ if pass.ID == 0 {
+ delete(po.constants, val)
+ } else {
+ // Restore previous index as constant node
+ // (also restoring the invariant on correct bounds)
+ oldidx := uint32(pass.ID)
+ po.constants[val] = oldidx
+ }
+
+ case undoAliasNode:
+ ID, prev := pass.ID, pass.idx
+ cur := po.values[ID]
+ if prev == 0 {
+ // Born as an alias, die as an alias
+ delete(po.values, ID)
+ } else {
+ if cur == prev {
+ panic("invalid aliasnode undo pass")
+ }
+ // Give it back previous value
+ po.values[ID] = prev
+ }
+
+ case undoNewRoot:
+ i := pass.idx
+ l, r := po.children(i)
+ if l|r != 0 {
+ panic("non-empty root in undo newroot")
+ }
+ po.removeroot(i)
+
+ case undoChangeRoot:
+ i := pass.idx
+ l, r := po.children(i)
+ if l|r != 0 {
+ panic("non-empty root in undo changeroot")
+ }
+ po.changeroot(i, pass.edge.Target())
+
+ case undoMergeRoot:
+ i := pass.idx
+ l, r := po.children(i)
+ po.changeroot(i, l.Target())
+ po.roots = append(po.roots, r.Target())
+
+ default:
+ panic(pass.typ)
+ }
+ }
+
+ if debugPoset && po.CheckEmpty() != nil {
+ panic("poset not empty at the end of undo")
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/poset_test.go b/src/cmd/compile/internal/ssa/poset_test.go
new file mode 100644
index 0000000..a6db1d1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/poset_test.go
@@ -0,0 +1,800 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "testing"
+)
+
+const (
+ SetOrder = "SetOrder"
+ SetOrder_Fail = "SetOrder_Fail"
+ SetOrderOrEqual = "SetOrderOrEqual"
+ SetOrderOrEqual_Fail = "SetOrderOrEqual_Fail"
+ Ordered = "Ordered"
+ Ordered_Fail = "Ordered_Fail"
+ OrderedOrEqual = "OrderedOrEqual"
+ OrderedOrEqual_Fail = "OrderedOrEqual_Fail"
+ SetEqual = "SetEqual"
+ SetEqual_Fail = "SetEqual_Fail"
+ Equal = "Equal"
+ Equal_Fail = "Equal_Fail"
+ SetNonEqual = "SetNonEqual"
+ SetNonEqual_Fail = "SetNonEqual_Fail"
+ NonEqual = "NonEqual"
+ NonEqual_Fail = "NonEqual_Fail"
+ Checkpoint = "Checkpoint"
+ Undo = "Undo"
+)
+
+type posetTestOp struct {
+ typ string
+ a, b int
+}
+
+func vconst(i int) int {
+ if i < -128 || i >= 128 {
+ panic("invalid const")
+ }
+ return 1000 + 128 + i
+}
+
+func vconst2(i int) int {
+ if i < -128 || i >= 128 {
+ panic("invalid const")
+ }
+ return 1000 + 256 + i
+}
+
+func testPosetOps(t *testing.T, unsigned bool, ops []posetTestOp) {
+ var v [1512]*Value
+ for i := range v {
+ v[i] = new(Value)
+ v[i].ID = ID(i)
+ if i >= 1000 && i < 1256 {
+ v[i].Op = OpConst64
+ v[i].AuxInt = int64(i - 1000 - 128)
+ }
+ if i >= 1256 && i < 1512 {
+ v[i].Op = OpConst64
+ v[i].AuxInt = int64(i - 1000 - 256)
+ }
+ }
+
+ po := newPoset()
+ po.SetUnsigned(unsigned)
+ for idx, op := range ops {
+ t.Logf("op%d%v", idx, op)
+ switch op.typ {
+ case SetOrder:
+ if !po.SetOrder(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case SetOrder_Fail:
+ if po.SetOrder(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case SetOrderOrEqual:
+ if !po.SetOrderOrEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case SetOrderOrEqual_Fail:
+ if po.SetOrderOrEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case Ordered:
+ if !po.Ordered(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case Ordered_Fail:
+ if po.Ordered(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case OrderedOrEqual:
+ if !po.OrderedOrEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case OrderedOrEqual_Fail:
+ if po.OrderedOrEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case SetEqual:
+ if !po.SetEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case SetEqual_Fail:
+ if po.SetEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case Equal:
+ if !po.Equal(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case Equal_Fail:
+ if po.Equal(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case SetNonEqual:
+ if !po.SetNonEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case SetNonEqual_Fail:
+ if po.SetNonEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case NonEqual:
+ if !po.NonEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case NonEqual_Fail:
+ if po.NonEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case Checkpoint:
+ po.Checkpoint()
+ case Undo:
+ t.Log("Undo stack", po.undo)
+ po.Undo()
+ default:
+ panic("unimplemented")
+ }
+
+ if false {
+ po.DotDump(fmt.Sprintf("op%d.dot", idx), fmt.Sprintf("Last op: %v", op))
+ }
+
+ po.CheckIntegrity()
+ }
+
+ // Check that the poset is completely empty
+ if err := po.CheckEmpty(); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPoset(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ {Ordered_Fail, 123, 124},
+
+ // Dag #0: 100<101
+ {Checkpoint, 0, 0},
+ {SetOrder, 100, 101},
+ {Ordered, 100, 101},
+ {Ordered_Fail, 101, 100},
+ {SetOrder_Fail, 101, 100},
+ {SetOrder, 100, 101}, // repeat
+ {NonEqual, 100, 101},
+ {NonEqual, 101, 100},
+ {SetEqual_Fail, 100, 101},
+
+ // Dag #1: 4<=7<12
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 4, 7},
+ {OrderedOrEqual, 4, 7},
+ {SetOrder, 7, 12},
+ {Ordered, 7, 12},
+ {Ordered, 4, 12},
+ {Ordered_Fail, 12, 4},
+ {NonEqual, 4, 12},
+ {NonEqual, 12, 4},
+ {NonEqual_Fail, 4, 100},
+ {OrderedOrEqual, 4, 12},
+ {OrderedOrEqual_Fail, 12, 4},
+ {OrderedOrEqual, 4, 7},
+ {OrderedOrEqual_Fail, 7, 4},
+
+ // Dag #1: 1<4<=7<12
+ {Checkpoint, 0, 0},
+ {SetOrder, 1, 4},
+ {Ordered, 1, 4},
+ {Ordered, 1, 12},
+ {Ordered_Fail, 12, 1},
+
+ // Dag #1: 1<4<=7<12, 6<7
+ {Checkpoint, 0, 0},
+ {SetOrder, 6, 7},
+ {Ordered, 6, 7},
+ {Ordered, 6, 12},
+ {SetOrder_Fail, 7, 4},
+ {SetOrder_Fail, 7, 6},
+ {SetOrder_Fail, 7, 1},
+
+ // Dag #1: 1<4<=7<12, 1<6<7
+ {Checkpoint, 0, 0},
+ {Ordered_Fail, 1, 6},
+ {SetOrder, 1, 6},
+ {Ordered, 1, 6},
+ {SetOrder_Fail, 6, 1},
+
+ // Dag #1: 1<4<=7<12, 1<4<6<7
+ {Checkpoint, 0, 0},
+ {Ordered_Fail, 4, 6},
+ {Ordered_Fail, 4, 7},
+ {SetOrder, 4, 6},
+ {Ordered, 4, 6},
+ {OrderedOrEqual, 4, 6},
+ {Ordered, 4, 7},
+ {OrderedOrEqual, 4, 7},
+ {SetOrder_Fail, 6, 4},
+ {Ordered_Fail, 7, 6},
+ {Ordered_Fail, 7, 4},
+ {OrderedOrEqual_Fail, 7, 6},
+ {OrderedOrEqual_Fail, 7, 4},
+
+ // Merge: 1<4<6, 4<=7<12, 6<101
+ {Checkpoint, 0, 0},
+ {Ordered_Fail, 6, 101},
+ {SetOrder, 6, 101},
+ {Ordered, 6, 101},
+ {Ordered, 1, 101},
+
+ // Merge: 1<4<6, 4<=7<12, 6<100<101
+ {Checkpoint, 0, 0},
+ {Ordered_Fail, 6, 100},
+ {SetOrder, 6, 100},
+ {Ordered, 1, 100},
+
+ // Undo: 1<4<6<7<12, 6<101
+ {Ordered, 100, 101},
+ {Undo, 0, 0},
+ {Ordered, 100, 101},
+ {Ordered_Fail, 6, 100},
+ {Ordered, 6, 101},
+ {Ordered, 1, 101},
+
+ // Undo: 1<4<6<7<12, 100<101
+ {Undo, 0, 0},
+ {Ordered_Fail, 1, 100},
+ {Ordered_Fail, 1, 101},
+ {Ordered_Fail, 6, 100},
+ {Ordered_Fail, 6, 101},
+
+ // Merge: 1<4<6<7<12, 6<100<101
+ {Checkpoint, 0, 0},
+ {Ordered, 100, 101},
+ {SetOrder, 6, 100},
+ {Ordered, 6, 100},
+ {Ordered, 6, 101},
+ {Ordered, 1, 101},
+
+ // Undo 2 times: 1<4<7<12, 1<6<7
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ {Ordered, 1, 6},
+ {Ordered, 4, 12},
+ {Ordered_Fail, 4, 6},
+ {SetOrder_Fail, 6, 1},
+
+ // Undo 2 times: 1<4<7<12
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ {Ordered, 1, 12},
+ {Ordered, 7, 12},
+ {Ordered_Fail, 1, 6},
+ {Ordered_Fail, 6, 7},
+ {Ordered, 100, 101},
+ {Ordered_Fail, 1, 101},
+
+ // Undo: 4<7<12
+ {Undo, 0, 0},
+ {Ordered_Fail, 1, 12},
+ {Ordered_Fail, 1, 4},
+ {Ordered, 4, 12},
+ {Ordered, 100, 101},
+
+ // Undo: 100<101
+ {Undo, 0, 0},
+ {Ordered_Fail, 4, 7},
+ {Ordered_Fail, 7, 12},
+ {Ordered, 100, 101},
+
+ // Recreated DAG #1 from scratch, reusing same nodes.
+ // This also stresses that Undo has done its job correctly.
+ // DAG: 1<2<(5|6), 101<102<(105|106<107)
+ {Checkpoint, 0, 0},
+ {SetOrder, 101, 102},
+ {SetOrder, 102, 105},
+ {SetOrder, 102, 106},
+ {SetOrder, 106, 107},
+ {SetOrder, 1, 2},
+ {SetOrder, 2, 5},
+ {SetOrder, 2, 6},
+ {SetEqual_Fail, 1, 6},
+ {SetEqual_Fail, 107, 102},
+
+ // Now Set 2 == 102
+ // New DAG: (1|101)<2==102<(5|6|105|106<107)
+ {Checkpoint, 0, 0},
+ {SetEqual, 2, 102},
+ {Equal, 2, 102},
+ {SetEqual, 2, 102}, // trivially pass
+ {SetNonEqual_Fail, 2, 102}, // trivially fail
+ {Ordered, 1, 107},
+ {Ordered, 101, 6},
+ {Ordered, 101, 105},
+ {Ordered, 2, 106},
+ {Ordered, 102, 6},
+
+ // Undo SetEqual
+ {Undo, 0, 0},
+ {Equal_Fail, 2, 102},
+ {Ordered_Fail, 2, 102},
+ {Ordered_Fail, 1, 107},
+ {Ordered_Fail, 101, 6},
+ {Checkpoint, 0, 0},
+ {SetEqual, 2, 100},
+ {Ordered, 1, 107},
+ {Ordered, 100, 6},
+
+ // SetEqual with new node
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetEqual, 2, 400},
+ {SetEqual, 401, 2},
+ {Equal, 400, 401},
+ {Ordered, 1, 400},
+ {Ordered, 400, 6},
+ {Ordered, 1, 401},
+ {Ordered, 401, 6},
+ {Ordered_Fail, 2, 401},
+
+ // SetEqual unseen nodes and then connect
+ {Checkpoint, 0, 0},
+ {SetEqual, 500, 501},
+ {SetEqual, 102, 501},
+ {Equal, 500, 102},
+ {Ordered, 501, 106},
+ {Ordered, 100, 500},
+ {SetEqual, 500, 501},
+ {Ordered_Fail, 500, 501},
+ {Ordered_Fail, 102, 501},
+
+ // SetNonEqual relations
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 600, 601},
+ {NonEqual, 600, 601},
+ {SetNonEqual, 601, 602},
+ {NonEqual, 601, 602},
+ {NonEqual_Fail, 600, 602}, // non-transitive
+ {SetEqual_Fail, 601, 602},
+
+ // Undo back to beginning, leave the poset empty
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetStrict(t *testing.T) {
+
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ // Build: 20!=30, 10<20<=30<40. The 20<=30 will become 20<30.
+ {SetNonEqual, 20, 30},
+ {SetOrder, 10, 20},
+ {SetOrderOrEqual, 20, 30}, // this is affected by 20!=30
+ {SetOrder, 30, 40},
+
+ {Ordered, 10, 30},
+ {Ordered, 20, 30},
+ {Ordered, 10, 40},
+ {OrderedOrEqual, 10, 30},
+ {OrderedOrEqual, 20, 30},
+ {OrderedOrEqual, 10, 40},
+
+ {Undo, 0, 0},
+
+ // Now do the opposite: first build the DAG and then learn non-equality
+ {Checkpoint, 0, 0},
+ {SetOrder, 10, 20},
+ {SetOrderOrEqual, 20, 30}, // this is affected by 20!=30
+ {SetOrder, 30, 40},
+
+ {Ordered, 10, 30},
+ {Ordered_Fail, 20, 30},
+ {Ordered, 10, 40},
+ {OrderedOrEqual, 10, 30},
+ {OrderedOrEqual, 20, 30},
+ {OrderedOrEqual, 10, 40},
+
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 20, 30},
+ {Ordered, 10, 30},
+ {Ordered, 20, 30},
+ {Ordered, 10, 40},
+ {OrderedOrEqual, 10, 30},
+ {OrderedOrEqual, 20, 30},
+ {OrderedOrEqual, 10, 40},
+ {Undo, 0, 0},
+
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 30, 35},
+ {OrderedOrEqual, 20, 35},
+ {Ordered_Fail, 20, 35},
+ {SetNonEqual, 20, 35},
+ {Ordered, 20, 35},
+ {Undo, 0, 0},
+
+ // Learn <= and >=
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 50, 60},
+ {SetOrderOrEqual, 60, 50},
+ {OrderedOrEqual, 50, 60},
+ {OrderedOrEqual, 60, 50},
+ {Ordered_Fail, 50, 60},
+ {Ordered_Fail, 60, 50},
+ {Equal, 50, 60},
+ {Equal, 60, 50},
+ {NonEqual_Fail, 50, 60},
+ {NonEqual_Fail, 60, 50},
+ {Undo, 0, 0},
+
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetCollapse(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ // Create a complex graph of <= relations among nodes between 10 and 25.
+ {SetOrderOrEqual, 10, 15},
+ {SetOrderOrEqual, 15, 20},
+ {SetOrderOrEqual, 20, vconst(20)},
+ {SetOrderOrEqual, vconst(20), 25},
+ {SetOrderOrEqual, 10, 12},
+ {SetOrderOrEqual, 12, 16},
+ {SetOrderOrEqual, 16, vconst(20)},
+ {SetOrderOrEqual, 10, 17},
+ {SetOrderOrEqual, 17, 25},
+ {SetOrderOrEqual, 15, 18},
+ {SetOrderOrEqual, 18, vconst(20)},
+ {SetOrderOrEqual, 15, 19},
+ {SetOrderOrEqual, 19, 25},
+
+ // These are other paths not part of the main collapsing path
+ {SetOrderOrEqual, 10, 11},
+ {SetOrderOrEqual, 11, 26},
+ {SetOrderOrEqual, 13, 25},
+ {SetOrderOrEqual, 100, 25},
+ {SetOrderOrEqual, 101, 15},
+ {SetOrderOrEqual, 102, 10},
+ {SetOrderOrEqual, 25, 103},
+ {SetOrderOrEqual, 20, 104},
+
+ {Checkpoint, 0, 0},
+ // Collapse everything by setting 10 >= 25: this should make everything equal
+ {SetOrderOrEqual, 25, 10},
+
+ // Check that all nodes are pairwise equal now
+ {Equal, 10, 12},
+ {Equal, 10, 15},
+ {Equal, 10, 16},
+ {Equal, 10, 17},
+ {Equal, 10, 18},
+ {Equal, 10, 19},
+ {Equal, 10, vconst(20)},
+ {Equal, 10, vconst2(20)},
+ {Equal, 10, 25},
+
+ {Equal, 12, 15},
+ {Equal, 12, 16},
+ {Equal, 12, 17},
+ {Equal, 12, 18},
+ {Equal, 12, 19},
+ {Equal, 12, vconst(20)},
+ {Equal, 12, vconst2(20)},
+ {Equal, 12, 25},
+
+ {Equal, 15, 16},
+ {Equal, 15, 17},
+ {Equal, 15, 18},
+ {Equal, 15, 19},
+ {Equal, 15, vconst(20)},
+ {Equal, 15, vconst2(20)},
+ {Equal, 15, 25},
+
+ {Equal, 16, 17},
+ {Equal, 16, 18},
+ {Equal, 16, 19},
+ {Equal, 16, vconst(20)},
+ {Equal, 16, vconst2(20)},
+ {Equal, 16, 25},
+
+ {Equal, 17, 18},
+ {Equal, 17, 19},
+ {Equal, 17, vconst(20)},
+ {Equal, 17, vconst2(20)},
+ {Equal, 17, 25},
+
+ {Equal, 18, 19},
+ {Equal, 18, vconst(20)},
+ {Equal, 18, vconst2(20)},
+ {Equal, 18, 25},
+
+ {Equal, 19, vconst(20)},
+ {Equal, 19, vconst2(20)},
+ {Equal, 19, 25},
+
+ {Equal, vconst(20), vconst2(20)},
+ {Equal, vconst(20), 25},
+
+ {Equal, vconst2(20), 25},
+
+ // ... but not 11/26/100/101/102, which were on a different path
+ {Equal_Fail, 10, 11},
+ {Equal_Fail, 10, 26},
+ {Equal_Fail, 10, 100},
+ {Equal_Fail, 10, 101},
+ {Equal_Fail, 10, 102},
+ {OrderedOrEqual, 10, 26},
+ {OrderedOrEqual, 25, 26},
+ {OrderedOrEqual, 13, 25},
+ {OrderedOrEqual, 13, 10},
+
+ {Undo, 0, 0},
+ {OrderedOrEqual, 10, 25},
+ {Equal_Fail, 10, 12},
+ {Equal_Fail, 10, 15},
+ {Equal_Fail, 10, 25},
+
+ {Undo, 0, 0},
+ })
+
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 10, 15},
+ {SetOrderOrEqual, 15, 20},
+ {SetOrderOrEqual, 20, 25},
+ {SetOrder, 10, 16},
+ {SetOrderOrEqual, 16, 20},
+ // Check that we cannot collapse here because of the strict relation 10<16
+ {SetOrderOrEqual_Fail, 20, 10},
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetSetEqual(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ // 10<=20<=30<40, 20<=100<110
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 10, 20},
+ {SetOrderOrEqual, 20, 30},
+ {SetOrder, 30, 40},
+ {SetOrderOrEqual, 20, 100},
+ {SetOrder, 100, 110},
+ {OrderedOrEqual, 10, 30},
+ {OrderedOrEqual_Fail, 30, 10},
+ {Ordered_Fail, 10, 30},
+ {Ordered_Fail, 30, 10},
+ {Ordered, 10, 40},
+ {Ordered_Fail, 40, 10},
+
+ // Try learning 10==20.
+ {Checkpoint, 0, 0},
+ {SetEqual, 10, 20},
+ {OrderedOrEqual, 10, 20},
+ {Ordered_Fail, 10, 20},
+ {Equal, 10, 20},
+ {SetOrderOrEqual, 10, 20},
+ {SetOrderOrEqual, 20, 10},
+ {SetOrder_Fail, 10, 20},
+ {SetOrder_Fail, 20, 10},
+ {Undo, 0, 0},
+
+ // Try learning 20==10.
+ {Checkpoint, 0, 0},
+ {SetEqual, 20, 10},
+ {OrderedOrEqual, 10, 20},
+ {Ordered_Fail, 10, 20},
+ {Equal, 10, 20},
+ {Undo, 0, 0},
+
+ // Try learning 10==40 or 30==40 or 10==110.
+ {Checkpoint, 0, 0},
+ {SetEqual_Fail, 10, 40},
+ {SetEqual_Fail, 40, 10},
+ {SetEqual_Fail, 30, 40},
+ {SetEqual_Fail, 40, 30},
+ {SetEqual_Fail, 10, 110},
+ {SetEqual_Fail, 110, 10},
+ {Undo, 0, 0},
+
+ // Try learning 40==110, and then 10==40 or 10=110
+ {Checkpoint, 0, 0},
+ {SetEqual, 40, 110},
+ {SetEqual_Fail, 10, 40},
+ {SetEqual_Fail, 40, 10},
+ {SetEqual_Fail, 10, 110},
+ {SetEqual_Fail, 110, 10},
+ {Undo, 0, 0},
+
+ // Try learning 40<20 or 30<20 or 110<10
+ {Checkpoint, 0, 0},
+ {SetOrder_Fail, 40, 20},
+ {SetOrder_Fail, 30, 20},
+ {SetOrder_Fail, 110, 10},
+ {Undo, 0, 0},
+
+ // Try learning 30<=20
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 30, 20},
+ {Equal, 30, 20},
+ {OrderedOrEqual, 30, 100},
+ {Ordered, 30, 110},
+ {Undo, 0, 0},
+
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetConst(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ {SetOrder, 1, vconst(15)},
+ {SetOrderOrEqual, 100, vconst(120)},
+ {Ordered, 1, vconst(15)},
+ {Ordered, 1, vconst(120)},
+ {OrderedOrEqual, 1, vconst(120)},
+ {OrderedOrEqual, 100, vconst(120)},
+ {Ordered_Fail, 100, vconst(15)},
+ {Ordered_Fail, vconst(15), 100},
+
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 1, 5},
+ {SetOrderOrEqual, 5, 25},
+ {SetEqual, 20, vconst(20)},
+ {SetEqual, 25, vconst(25)},
+ {Ordered, 1, 20},
+ {Ordered, 1, vconst(30)},
+ {Undo, 0, 0},
+
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 1, 5},
+ {SetOrderOrEqual, 5, 25},
+ {SetEqual, vconst(-20), 5},
+ {SetEqual, vconst(-25), 1},
+ {Ordered, 1, 5},
+ {Ordered, vconst(-30), 1},
+ {Undo, 0, 0},
+
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 1, vconst(4)},
+ {SetNonEqual, 1, vconst(6)},
+ {NonEqual, 1, vconst(4)},
+ {NonEqual_Fail, 1, vconst(5)},
+ {NonEqual, 1, vconst(6)},
+ {Equal_Fail, 1, vconst(4)},
+ {Equal_Fail, 1, vconst(5)},
+ {Equal_Fail, 1, vconst(6)},
+ {Equal_Fail, 1, vconst(7)},
+ {Undo, 0, 0},
+
+ {Undo, 0, 0},
+ })
+
+ testPosetOps(t, true, []posetTestOp{
+ {Checkpoint, 0, 0},
+ {SetOrder, 1, vconst(15)},
+ {SetOrderOrEqual, 100, vconst(-5)}, // -5 is a very big number in unsigned
+ {Ordered, 1, vconst(15)},
+ {Ordered, 1, vconst(-5)},
+ {OrderedOrEqual, 1, vconst(-5)},
+ {OrderedOrEqual, 100, vconst(-5)},
+ {Ordered_Fail, 100, vconst(15)},
+ {Ordered_Fail, vconst(15), 100},
+
+ {Undo, 0, 0},
+ })
+
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 1, vconst(3)},
+ {SetNonEqual, 1, vconst(0)},
+ {Ordered_Fail, 1, vconst(0)},
+ {Undo, 0, 0},
+ })
+
+ testPosetOps(t, false, []posetTestOp{
+ // Check relations of a constant with itself
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, vconst(3), vconst2(3)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetEqual, vconst(3), vconst2(3)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetNonEqual_Fail, vconst(3), vconst2(3)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetOrder_Fail, vconst(3), vconst2(3)},
+ {Undo, 0, 0},
+
+ // Check relations of two constants among them, using
+ // different instances of the same constant
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, vconst(3), vconst(4)},
+ {OrderedOrEqual, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetOrder, vconst(3), vconst(4)},
+ {Ordered, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetEqual_Fail, vconst(3), vconst(4)},
+ {SetEqual_Fail, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {NonEqual, vconst(3), vconst(4)},
+ {NonEqual, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {Equal_Fail, vconst(3), vconst(4)},
+ {Equal_Fail, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetNonEqual, vconst(3), vconst(4)},
+ {SetNonEqual, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetNonEqual(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ {Equal_Fail, 10, 20},
+ {NonEqual_Fail, 10, 20},
+
+ // Learn 10!=20
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 10, 20},
+ {Equal_Fail, 10, 20},
+ {NonEqual, 10, 20},
+ {SetEqual_Fail, 10, 20},
+
+ // Learn again 10!=20
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 10, 20},
+ {Equal_Fail, 10, 20},
+ {NonEqual, 10, 20},
+
+ // Undo. We still know 10!=20
+ {Undo, 0, 0},
+ {Equal_Fail, 10, 20},
+ {NonEqual, 10, 20},
+ {SetEqual_Fail, 10, 20},
+
+ // Undo again. Now we know nothing
+ {Undo, 0, 0},
+ {Equal_Fail, 10, 20},
+ {NonEqual_Fail, 10, 20},
+
+ // Learn 10==20
+ {Checkpoint, 0, 0},
+ {SetEqual, 10, 20},
+ {Equal, 10, 20},
+ {NonEqual_Fail, 10, 20},
+ {SetNonEqual_Fail, 10, 20},
+
+ // Learn again 10==20
+ {Checkpoint, 0, 0},
+ {SetEqual, 10, 20},
+ {Equal, 10, 20},
+ {NonEqual_Fail, 10, 20},
+ {SetNonEqual_Fail, 10, 20},
+
+ // Undo. We still know 10==20
+ {Undo, 0, 0},
+ {Equal, 10, 20},
+ {NonEqual_Fail, 10, 20},
+ {SetNonEqual_Fail, 10, 20},
+
+ // Undo. We know nothing
+ {Undo, 0, 0},
+ {Equal_Fail, 10, 20},
+ {NonEqual_Fail, 10, 20},
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/print.go b/src/cmd/compile/internal/ssa/print.go
new file mode 100644
index 0000000..36f09c3
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/print.go
@@ -0,0 +1,159 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "fmt"
+ "io"
+)
+
+func printFunc(f *Func) {
+ f.Logf("%s", f)
+}
+
+func hashFunc(f *Func) []byte {
+ h := sha256.New()
+ p := stringFuncPrinter{w: h}
+ fprintFunc(p, f)
+ return h.Sum(nil)
+}
+
+func (f *Func) String() string {
+ var buf bytes.Buffer
+ p := stringFuncPrinter{w: &buf}
+ fprintFunc(p, f)
+ return buf.String()
+}
+
+type funcPrinter interface {
+ header(f *Func)
+ startBlock(b *Block, reachable bool)
+ endBlock(b *Block)
+ value(v *Value, live bool)
+ startDepCycle()
+ endDepCycle()
+ named(n LocalSlot, vals []*Value)
+}
+
+type stringFuncPrinter struct {
+ w io.Writer
+}
+
+func (p stringFuncPrinter) header(f *Func) {
+ fmt.Fprint(p.w, f.Name)
+ fmt.Fprint(p.w, " ")
+ fmt.Fprintln(p.w, f.Type)
+}
+
+func (p stringFuncPrinter) startBlock(b *Block, reachable bool) {
+ fmt.Fprintf(p.w, " b%d:", b.ID)
+ if len(b.Preds) > 0 {
+ io.WriteString(p.w, " <-")
+ for _, e := range b.Preds {
+ pred := e.b
+ fmt.Fprintf(p.w, " b%d", pred.ID)
+ }
+ }
+ if !reachable {
+ fmt.Fprint(p.w, " DEAD")
+ }
+ io.WriteString(p.w, "\n")
+}
+
+func (p stringFuncPrinter) endBlock(b *Block) {
+ fmt.Fprintln(p.w, " "+b.LongString())
+}
+
+func (p stringFuncPrinter) value(v *Value, live bool) {
+ fmt.Fprint(p.w, " ")
+ //fmt.Fprint(p.w, v.Block.Func.fe.Pos(v.Pos))
+ //fmt.Fprint(p.w, ": ")
+ fmt.Fprint(p.w, v.LongString())
+ if !live {
+ fmt.Fprint(p.w, " DEAD")
+ }
+ fmt.Fprintln(p.w)
+}
+
+func (p stringFuncPrinter) startDepCycle() {
+ fmt.Fprintln(p.w, "dependency cycle!")
+}
+
+func (p stringFuncPrinter) endDepCycle() {}
+
+func (p stringFuncPrinter) named(n LocalSlot, vals []*Value) {
+ fmt.Fprintf(p.w, "name %s: %v\n", n, vals)
+}
+
+func fprintFunc(p funcPrinter, f *Func) {
+ reachable, live := findlive(f)
+ defer f.retDeadcodeLive(live)
+ p.header(f)
+ printed := make([]bool, f.NumValues())
+ for _, b := range f.Blocks {
+ p.startBlock(b, reachable[b.ID])
+
+ if f.scheduled {
+ // Order of Values has been decided - print in that order.
+ for _, v := range b.Values {
+ p.value(v, live[v.ID])
+ printed[v.ID] = true
+ }
+ p.endBlock(b)
+ continue
+ }
+
+ // print phis first since all value cycles contain a phi
+ n := 0
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ p.value(v, live[v.ID])
+ printed[v.ID] = true
+ n++
+ }
+
+ // print rest of values in dependency order
+ for n < len(b.Values) {
+ m := n
+ outer:
+ for _, v := range b.Values {
+ if printed[v.ID] {
+ continue
+ }
+ for _, w := range v.Args {
+ // w == nil shouldn't happen, but if it does,
+ // don't panic; we'll get a better diagnosis later.
+ if w != nil && w.Block == b && !printed[w.ID] {
+ continue outer
+ }
+ }
+ p.value(v, live[v.ID])
+ printed[v.ID] = true
+ n++
+ }
+ if m == n {
+ p.startDepCycle()
+ for _, v := range b.Values {
+ if printed[v.ID] {
+ continue
+ }
+ p.value(v, live[v.ID])
+ printed[v.ID] = true
+ n++
+ }
+ p.endDepCycle()
+ }
+ }
+
+ p.endBlock(b)
+ }
+ for _, name := range f.Names {
+ p.named(name, f.NamedValues[name])
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go
new file mode 100644
index 0000000..8a2e7c0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/prove.go
@@ -0,0 +1,1426 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "fmt"
+ "math"
+)
+
+type branch int
+
+const (
+ unknown branch = iota
+ positive
+ negative
+)
+
+// relation represents the set of possible relations between
+// pairs of variables (v, w). Without a priori knowledge the
+// mask is lt | eq | gt meaning v can be less than, equal to or
+// greater than w. When the execution path branches on the condition
+// `v op w` the set of relations is updated to exclude any
+// relation not possible due to `v op w` being true (or false).
+//
+// E.g.
+//
+// r := relation(...)
+//
+// if v < w {
+// newR := r & lt
+// }
+// if v >= w {
+// newR := r & (eq|gt)
+// }
+// if v != w {
+// newR := r & (lt|gt)
+// }
+type relation uint
+
+const (
+ lt relation = 1 << iota
+ eq
+ gt
+)
+
+var relationStrings = [...]string{
+ 0: "none", lt: "<", eq: "==", lt | eq: "<=",
+ gt: ">", gt | lt: "!=", gt | eq: ">=", gt | eq | lt: "any",
+}
+
+func (r relation) String() string {
+ if r < relation(len(relationStrings)) {
+ return relationStrings[r]
+ }
+ return fmt.Sprintf("relation(%d)", uint(r))
+}
+
+// domain represents the domain of a variable pair in which a set
+// of relations is known. For example, relations learned for unsigned
+// pairs cannot be transferred to signed pairs because the same bit
+// representation can mean something else.
+type domain uint
+
+const (
+ signed domain = 1 << iota
+ unsigned
+ pointer
+ boolean
+)
+
+var domainStrings = [...]string{
+ "signed", "unsigned", "pointer", "boolean",
+}
+
+func (d domain) String() string {
+ s := ""
+ for i, ds := range domainStrings {
+ if d&(1<<uint(i)) != 0 {
+ if len(s) != 0 {
+ s += "|"
+ }
+ s += ds
+ d &^= 1 << uint(i)
+ }
+ }
+ if d != 0 {
+ if len(s) != 0 {
+ s += "|"
+ }
+ s += fmt.Sprintf("0x%x", uint(d))
+ }
+ return s
+}
+
+type pair struct {
+ v, w *Value // a pair of values, ordered by ID.
+ // v can be nil, to mean the zero value.
+ // for booleans the zero value (v == nil) is false.
+ d domain
+}
+
+// fact is a pair plus a relation for that pair.
+type fact struct {
+ p pair
+ r relation
+}
+
+// a limit records known upper and lower bounds for a value.
+type limit struct {
+ min, max int64 // min <= value <= max, signed
+ umin, umax uint64 // umin <= value <= umax, unsigned
+}
+
+func (l limit) String() string {
+ return fmt.Sprintf("sm,SM,um,UM=%d,%d,%d,%d", l.min, l.max, l.umin, l.umax)
+}
+
+func (l limit) intersect(l2 limit) limit {
+ if l.min < l2.min {
+ l.min = l2.min
+ }
+ if l.umin < l2.umin {
+ l.umin = l2.umin
+ }
+ if l.max > l2.max {
+ l.max = l2.max
+ }
+ if l.umax > l2.umax {
+ l.umax = l2.umax
+ }
+ return l
+}
+
+var noLimit = limit{math.MinInt64, math.MaxInt64, 0, math.MaxUint64}
+
+// a limitFact is a limit known for a particular value.
+type limitFact struct {
+ vid ID
+ limit limit
+}
+
+// factsTable keeps track of relations between pairs of values.
+//
+// The fact table logic is sound, but incomplete. Outside of a few
+// special cases, it performs no deduction or arithmetic. While there
+// are known decision procedures for this, the ad hoc approach taken
+// by the facts table is effective for real code while remaining very
+// efficient.
+type factsTable struct {
+ // unsat is true if facts contains a contradiction.
+ //
+ // Note that the factsTable logic is incomplete, so if unsat
+ // is false, the assertions in factsTable could be satisfiable
+ // *or* unsatisfiable.
+ unsat bool // true if facts contains a contradiction
+ unsatDepth int // number of unsat checkpoints
+
+ facts map[pair]relation // current known set of relation
+ stack []fact // previous sets of relations
+
+ // order is a couple of partial order sets that record information
+ // about relations between SSA values in the signed and unsigned
+ // domain.
+ orderS *poset
+ orderU *poset
+
+ // known lower and upper bounds on individual values.
+ limits map[ID]limit
+ limitStack []limitFact // previous entries
+
+ // For each slice s, a map from s to a len(s)/cap(s) value (if any)
+ // TODO: check if there are cases that matter where we have
+ // more than one len(s) for a slice. We could keep a list if necessary.
+ lens map[ID]*Value
+ caps map[ID]*Value
+
+ // zero is a zero-valued constant
+ zero *Value
+}
+
+// checkpointFact is an invalid value used for checkpointing
+// and restoring factsTable.
+var checkpointFact = fact{}
+var checkpointBound = limitFact{}
+
+func newFactsTable(f *Func) *factsTable {
+ ft := &factsTable{}
+ ft.orderS = f.newPoset()
+ ft.orderU = f.newPoset()
+ ft.orderS.SetUnsigned(false)
+ ft.orderU.SetUnsigned(true)
+ ft.facts = make(map[pair]relation)
+ ft.stack = make([]fact, 4)
+ ft.limits = make(map[ID]limit)
+ ft.limitStack = make([]limitFact, 4)
+ ft.zero = f.ConstInt64(f.Config.Types.Int64, 0)
+ return ft
+}
+
+// update updates the set of relations between v and w in domain d
+// restricting it to r.
+func (ft *factsTable) update(parent *Block, v, w *Value, d domain, r relation) {
+ if parent.Func.pass.debug > 2 {
+ parent.Func.Warnl(parent.Pos, "parent=%s, update %s %s %s", parent, v, w, r)
+ }
+ // No need to do anything else if we already found unsat.
+ if ft.unsat {
+ return
+ }
+
+ // Self-fact. It's wasteful to register it into the facts
+ // table, so just note whether it's satisfiable
+ if v == w {
+ if r&eq == 0 {
+ ft.unsat = true
+ }
+ return
+ }
+
+ if d == signed || d == unsigned {
+ var ok bool
+ order := ft.orderS
+ if d == unsigned {
+ order = ft.orderU
+ }
+ switch r {
+ case lt:
+ ok = order.SetOrder(v, w)
+ case gt:
+ ok = order.SetOrder(w, v)
+ case lt | eq:
+ ok = order.SetOrderOrEqual(v, w)
+ case gt | eq:
+ ok = order.SetOrderOrEqual(w, v)
+ case eq:
+ ok = order.SetEqual(v, w)
+ case lt | gt:
+ ok = order.SetNonEqual(v, w)
+ default:
+ panic("unknown relation")
+ }
+ if !ok {
+ if parent.Func.pass.debug > 2 {
+ parent.Func.Warnl(parent.Pos, "unsat %s %s %s", v, w, r)
+ }
+ ft.unsat = true
+ return
+ }
+ } else {
+ if lessByID(w, v) {
+ v, w = w, v
+ r = reverseBits[r]
+ }
+
+ p := pair{v, w, d}
+ oldR, ok := ft.facts[p]
+ if !ok {
+ if v == w {
+ oldR = eq
+ } else {
+ oldR = lt | eq | gt
+ }
+ }
+ // No changes compared to information already in facts table.
+ if oldR == r {
+ return
+ }
+ ft.stack = append(ft.stack, fact{p, oldR})
+ ft.facts[p] = oldR & r
+ // If this relation is not satisfiable, mark it and exit right away
+ if oldR&r == 0 {
+ if parent.Func.pass.debug > 2 {
+ parent.Func.Warnl(parent.Pos, "unsat %s %s %s", v, w, r)
+ }
+ ft.unsat = true
+ return
+ }
+ }
+
+ // Extract bounds when comparing against constants
+ if v.isGenericIntConst() {
+ v, w = w, v
+ r = reverseBits[r]
+ }
+ if v != nil && w.isGenericIntConst() {
+ // Note: all the +1/-1 below could overflow/underflow. Either will
+ // still generate correct results, it will just lead to imprecision.
+ // In fact if there is overflow/underflow, the corresponding
+ // code is unreachable because the known range is outside the range
+ // of the value's type.
+ old, ok := ft.limits[v.ID]
+ if !ok {
+ old = noLimit
+ if v.isGenericIntConst() {
+ switch d {
+ case signed:
+ old.min, old.max = v.AuxInt, v.AuxInt
+ if v.AuxInt >= 0 {
+ old.umin, old.umax = uint64(v.AuxInt), uint64(v.AuxInt)
+ }
+ case unsigned:
+ old.umin = v.AuxUnsigned()
+ old.umax = old.umin
+ if int64(old.umin) >= 0 {
+ old.min, old.max = int64(old.umin), int64(old.umin)
+ }
+ }
+ }
+ }
+ lim := noLimit
+ switch d {
+ case signed:
+ c := w.AuxInt
+ switch r {
+ case lt:
+ lim.max = c - 1
+ case lt | eq:
+ lim.max = c
+ case gt | eq:
+ lim.min = c
+ case gt:
+ lim.min = c + 1
+ case lt | gt:
+ lim = old
+ if c == lim.min {
+ lim.min++
+ }
+ if c == lim.max {
+ lim.max--
+ }
+ case eq:
+ lim.min = c
+ lim.max = c
+ }
+ if lim.min >= 0 {
+ // int(x) >= 0 && int(x) >= N ⇒ uint(x) >= N
+ lim.umin = uint64(lim.min)
+ }
+ if lim.max != noLimit.max && old.min >= 0 && lim.max >= 0 {
+ // 0 <= int(x) <= N ⇒ 0 <= uint(x) <= N
+ // This is for a max update, so the lower bound
+ // comes from what we already know (old).
+ lim.umax = uint64(lim.max)
+ }
+ case unsigned:
+ uc := w.AuxUnsigned()
+ switch r {
+ case lt:
+ lim.umax = uc - 1
+ case lt | eq:
+ lim.umax = uc
+ case gt | eq:
+ lim.umin = uc
+ case gt:
+ lim.umin = uc + 1
+ case lt | gt:
+ lim = old
+ if uc == lim.umin {
+ lim.umin++
+ }
+ if uc == lim.umax {
+ lim.umax--
+ }
+ case eq:
+ lim.umin = uc
+ lim.umax = uc
+ }
+ // We could use the contrapositives of the
+ // signed implications to derive signed facts,
+ // but it turns out not to matter.
+ }
+ ft.limitStack = append(ft.limitStack, limitFact{v.ID, old})
+ lim = old.intersect(lim)
+ ft.limits[v.ID] = lim
+ if v.Block.Func.pass.debug > 2 {
+ v.Block.Func.Warnl(parent.Pos, "parent=%s, new limits %s %s %s %s", parent, v, w, r, lim.String())
+ }
+ if lim.min > lim.max || lim.umin > lim.umax {
+ ft.unsat = true
+ return
+ }
+ }
+
+ // Derived facts below here are only about numbers.
+ if d != signed && d != unsigned {
+ return
+ }
+
+ // Additional facts we know given the relationship between len and cap.
+ //
+ // TODO: Since prove now derives transitive relations, it
+ // should be sufficient to learn that len(w) <= cap(w) at the
+ // beginning of prove where we look for all len/cap ops.
+ if v.Op == OpSliceLen && r&lt == 0 && ft.caps[v.Args[0].ID] != nil {
+ // len(s) > w implies cap(s) > w
+ // len(s) >= w implies cap(s) >= w
+ // len(s) == w implies cap(s) >= w
+ ft.update(parent, ft.caps[v.Args[0].ID], w, d, r|gt)
+ }
+ if w.Op == OpSliceLen && r&gt == 0 && ft.caps[w.Args[0].ID] != nil {
+ // same, length on the RHS.
+ ft.update(parent, v, ft.caps[w.Args[0].ID], d, r|lt)
+ }
+ if v.Op == OpSliceCap && r&gt == 0 && ft.lens[v.Args[0].ID] != nil {
+ // cap(s) < w implies len(s) < w
+ // cap(s) <= w implies len(s) <= w
+ // cap(s) == w implies len(s) <= w
+ ft.update(parent, ft.lens[v.Args[0].ID], w, d, r|lt)
+ }
+ if w.Op == OpSliceCap && r&lt == 0 && ft.lens[w.Args[0].ID] != nil {
+ // same, capacity on the RHS.
+ ft.update(parent, v, ft.lens[w.Args[0].ID], d, r|gt)
+ }
+
+ // Process fence-post implications.
+ //
+ // First, make the condition > or >=.
+ if r == lt || r == lt|eq {
+ v, w = w, v
+ r = reverseBits[r]
+ }
+ switch r {
+ case gt:
+ if x, delta := isConstDelta(v); x != nil && delta == 1 {
+ // x+1 > w ⇒ x >= w
+ //
+ // This is useful for eliminating the
+ // growslice branch of append.
+ ft.update(parent, x, w, d, gt|eq)
+ } else if x, delta := isConstDelta(w); x != nil && delta == -1 {
+ // v > x-1 ⇒ v >= x
+ ft.update(parent, v, x, d, gt|eq)
+ }
+ case gt | eq:
+ if x, delta := isConstDelta(v); x != nil && delta == -1 {
+ // x-1 >= w && x > min ⇒ x > w
+ //
+ // Useful for i > 0; s[i-1].
+ lim, ok := ft.limits[x.ID]
+ if ok && ((d == signed && lim.min > opMin[v.Op]) || (d == unsigned && lim.umin > 0)) {
+ ft.update(parent, x, w, d, gt)
+ }
+ } else if x, delta := isConstDelta(w); x != nil && delta == 1 {
+ // v >= x+1 && x < max ⇒ v > x
+ lim, ok := ft.limits[x.ID]
+ if ok && ((d == signed && lim.max < opMax[w.Op]) || (d == unsigned && lim.umax < opUMax[w.Op])) {
+ ft.update(parent, v, x, d, gt)
+ }
+ }
+ }
+
+ // Process: x+delta > w (with delta constant)
+ // Only signed domain for now (useful for accesses to slices in loops).
+ if r == gt || r == gt|eq {
+ if x, delta := isConstDelta(v); x != nil && d == signed {
+ if parent.Func.pass.debug > 1 {
+ parent.Func.Warnl(parent.Pos, "x+d %s w; x:%v %v delta:%v w:%v d:%v", r, x, parent.String(), delta, w.AuxInt, d)
+ }
+ if !w.isGenericIntConst() {
+ // If we know that x+delta > w but w is not constant, we can derive:
+ // if delta < 0 and x > MinInt - delta, then x > w (because x+delta cannot underflow)
+ // This is useful for loops with bounds "len(slice)-K" (delta = -K)
+ if l, has := ft.limits[x.ID]; has && delta < 0 {
+ if (x.Type.Size() == 8 && l.min >= math.MinInt64-delta) ||
+ (x.Type.Size() == 4 && l.min >= math.MinInt32-delta) {
+ ft.update(parent, x, w, signed, r)
+ }
+ }
+ } else {
+ // With w,delta constants, we want to derive: x+delta > w ⇒ x > w-delta
+ //
+ // We compute (using integers of the correct size):
+ // min = w - delta
+ // max = MaxInt - delta
+ //
+ // And we prove that:
+ // if min<max: min < x AND x <= max
+ // if min>max: min < x OR x <= max
+ //
+ // This is always correct, even in case of overflow.
+ //
+ // If the initial fact is x+delta >= w instead, the derived conditions are:
+ // if min<max: min <= x AND x <= max
+ // if min>max: min <= x OR x <= max
+ //
+ // Notice the conditions for max are still <=, as they handle overflows.
+ var min, max int64
+ var vmin, vmax *Value
+ switch x.Type.Size() {
+ case 8:
+ min = w.AuxInt - delta
+ max = int64(^uint64(0)>>1) - delta
+
+ vmin = parent.NewValue0I(parent.Pos, OpConst64, parent.Func.Config.Types.Int64, min)
+ vmax = parent.NewValue0I(parent.Pos, OpConst64, parent.Func.Config.Types.Int64, max)
+
+ case 4:
+ min = int64(int32(w.AuxInt) - int32(delta))
+ max = int64(int32(^uint32(0)>>1) - int32(delta))
+
+ vmin = parent.NewValue0I(parent.Pos, OpConst32, parent.Func.Config.Types.Int32, min)
+ vmax = parent.NewValue0I(parent.Pos, OpConst32, parent.Func.Config.Types.Int32, max)
+
+ default:
+ panic("unimplemented")
+ }
+
+ if min < max {
+ // Record that x > min and max >= x
+ ft.update(parent, x, vmin, d, r)
+ ft.update(parent, vmax, x, d, r|eq)
+ } else {
+ // We know that either x>min OR x<=max. factsTable cannot record OR conditions,
+ // so let's see if we can already prove that one of them is false, in which case
+ // the other must be true
+ if l, has := ft.limits[x.ID]; has {
+ if l.max <= min {
+ if r&eq == 0 || l.max < min {
+ // x>min (x>=min) is impossible, so it must be x<=max
+ ft.update(parent, vmax, x, d, r|eq)
+ }
+ } else if l.min > max {
+ // x<=max is impossible, so it must be x>min
+ ft.update(parent, x, vmin, d, r)
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Look through value-preserving extensions.
+ // If the domain is appropriate for the pre-extension Type,
+ // repeat the update with the pre-extension Value.
+ if isCleanExt(v) {
+ switch {
+ case d == signed && v.Args[0].Type.IsSigned():
+ fallthrough
+ case d == unsigned && !v.Args[0].Type.IsSigned():
+ ft.update(parent, v.Args[0], w, d, r)
+ }
+ }
+ if isCleanExt(w) {
+ switch {
+ case d == signed && w.Args[0].Type.IsSigned():
+ fallthrough
+ case d == unsigned && !w.Args[0].Type.IsSigned():
+ ft.update(parent, v, w.Args[0], d, r)
+ }
+ }
+}
+
+var opMin = map[Op]int64{
+ OpAdd64: math.MinInt64, OpSub64: math.MinInt64,
+ OpAdd32: math.MinInt32, OpSub32: math.MinInt32,
+}
+
+var opMax = map[Op]int64{
+ OpAdd64: math.MaxInt64, OpSub64: math.MaxInt64,
+ OpAdd32: math.MaxInt32, OpSub32: math.MaxInt32,
+}
+
+var opUMax = map[Op]uint64{
+ OpAdd64: math.MaxUint64, OpSub64: math.MaxUint64,
+ OpAdd32: math.MaxUint32, OpSub32: math.MaxUint32,
+}
+
+// isNonNegative reports whether v is known to be non-negative.
+func (ft *factsTable) isNonNegative(v *Value) bool {
+ if isNonNegative(v) {
+ return true
+ }
+
+ var max int64
+ switch v.Type.Size() {
+ case 1:
+ max = math.MaxInt8
+ case 2:
+ max = math.MaxInt16
+ case 4:
+ max = math.MaxInt32
+ case 8:
+ max = math.MaxInt64
+ default:
+ panic("unexpected integer size")
+ }
+
+ // Check if the recorded limits can prove that the value is positive
+
+ if l, has := ft.limits[v.ID]; has && (l.min >= 0 || l.umax <= uint64(max)) {
+ return true
+ }
+
+ // Check if v = x+delta, and we can use x's limits to prove that it's positive
+ if x, delta := isConstDelta(v); x != nil {
+ if l, has := ft.limits[x.ID]; has {
+ if delta > 0 && l.min >= -delta && l.max <= max-delta {
+ return true
+ }
+ if delta < 0 && l.min >= -delta {
+ return true
+ }
+ }
+ }
+
+ // Check if v is a value-preserving extension of a non-negative value.
+ if isCleanExt(v) && ft.isNonNegative(v.Args[0]) {
+ return true
+ }
+
+ // Check if the signed poset can prove that the value is >= 0
+ return ft.orderS.OrderedOrEqual(ft.zero, v)
+}
+
+// checkpoint saves the current state of known relations.
+// Called when descending on a branch.
+func (ft *factsTable) checkpoint() {
+ if ft.unsat {
+ ft.unsatDepth++
+ }
+ ft.stack = append(ft.stack, checkpointFact)
+ ft.limitStack = append(ft.limitStack, checkpointBound)
+ ft.orderS.Checkpoint()
+ ft.orderU.Checkpoint()
+}
+
+// restore restores known relation to the state just
+// before the previous checkpoint.
+// Called when backing up on a branch.
+func (ft *factsTable) restore() {
+ if ft.unsatDepth > 0 {
+ ft.unsatDepth--
+ } else {
+ ft.unsat = false
+ }
+ for {
+ old := ft.stack[len(ft.stack)-1]
+ ft.stack = ft.stack[:len(ft.stack)-1]
+ if old == checkpointFact {
+ break
+ }
+ if old.r == lt|eq|gt {
+ delete(ft.facts, old.p)
+ } else {
+ ft.facts[old.p] = old.r
+ }
+ }
+ for {
+ old := ft.limitStack[len(ft.limitStack)-1]
+ ft.limitStack = ft.limitStack[:len(ft.limitStack)-1]
+ if old.vid == 0 { // checkpointBound
+ break
+ }
+ if old.limit == noLimit {
+ delete(ft.limits, old.vid)
+ } else {
+ ft.limits[old.vid] = old.limit
+ }
+ }
+ ft.orderS.Undo()
+ ft.orderU.Undo()
+}
+
+func lessByID(v, w *Value) bool {
+ if v == nil && w == nil {
+ // Should not happen, but just in case.
+ return false
+ }
+ if v == nil {
+ return true
+ }
+ return w != nil && v.ID < w.ID
+}
+
+var (
+ reverseBits = [...]relation{0, 4, 2, 6, 1, 5, 3, 7}
+
+ // maps what we learn when the positive branch is taken.
+ // For example:
+ // OpLess8: {signed, lt},
+ // v1 = (OpLess8 v2 v3).
+ // If v1 branch is taken then we learn that the rangeMask
+ // can be at most lt.
+ domainRelationTable = map[Op]struct {
+ d domain
+ r relation
+ }{
+ OpEq8: {signed | unsigned, eq},
+ OpEq16: {signed | unsigned, eq},
+ OpEq32: {signed | unsigned, eq},
+ OpEq64: {signed | unsigned, eq},
+ OpEqPtr: {pointer, eq},
+
+ OpNeq8: {signed | unsigned, lt | gt},
+ OpNeq16: {signed | unsigned, lt | gt},
+ OpNeq32: {signed | unsigned, lt | gt},
+ OpNeq64: {signed | unsigned, lt | gt},
+ OpNeqPtr: {pointer, lt | gt},
+
+ OpLess8: {signed, lt},
+ OpLess8U: {unsigned, lt},
+ OpLess16: {signed, lt},
+ OpLess16U: {unsigned, lt},
+ OpLess32: {signed, lt},
+ OpLess32U: {unsigned, lt},
+ OpLess64: {signed, lt},
+ OpLess64U: {unsigned, lt},
+
+ OpLeq8: {signed, lt | eq},
+ OpLeq8U: {unsigned, lt | eq},
+ OpLeq16: {signed, lt | eq},
+ OpLeq16U: {unsigned, lt | eq},
+ OpLeq32: {signed, lt | eq},
+ OpLeq32U: {unsigned, lt | eq},
+ OpLeq64: {signed, lt | eq},
+ OpLeq64U: {unsigned, lt | eq},
+
+ // For these ops, the negative branch is different: we can only
+ // prove signed/GE (signed/GT) if we can prove that arg0 is non-negative.
+ // See the special case in addBranchRestrictions.
+ OpIsInBounds: {signed | unsigned, lt}, // 0 <= arg0 < arg1
+ OpIsSliceInBounds: {signed | unsigned, lt | eq}, // 0 <= arg0 <= arg1
+ }
+)
+
+// prove removes redundant BlockIf branches that can be inferred
+// from previous dominating comparisons.
+//
+// By far, the most common redundant pair are generated by bounds checking.
+// For example for the code:
+//
+// a[i] = 4
+// foo(a[i])
+//
+// The compiler will generate the following code:
+//
+// if i >= len(a) {
+// panic("not in bounds")
+// }
+// a[i] = 4
+// if i >= len(a) {
+// panic("not in bounds")
+// }
+// foo(a[i])
+//
+// The second comparison i >= len(a) is clearly redundant because if the
+// else branch of the first comparison is executed, we already know that i < len(a).
+// The code for the second panic can be removed.
+//
+// prove works by finding contradictions and trimming branches whose
+// conditions are unsatisfiable given the branches leading up to them.
+// It tracks a "fact table" of branch conditions. For each branching
+// block, it asserts the branch conditions that uniquely dominate that
+// block, and then separately asserts the block's branch condition and
+// its negation. If either leads to a contradiction, it can trim that
+// successor.
+func prove(f *Func) {
+ ft := newFactsTable(f)
+ ft.checkpoint()
+
+ var lensVars map[*Block][]*Value
+
+ // Find length and capacity ops.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Uses == 0 {
+ // We don't care about dead values.
+ // (There can be some that are CSEd but not removed yet.)
+ continue
+ }
+ switch v.Op {
+ case OpStringLen:
+ ft.update(b, v, ft.zero, signed, gt|eq)
+ case OpSliceLen:
+ if ft.lens == nil {
+ ft.lens = map[ID]*Value{}
+ }
+ ft.lens[v.Args[0].ID] = v
+ ft.update(b, v, ft.zero, signed, gt|eq)
+ if v.Args[0].Op == OpSliceMake {
+ if lensVars == nil {
+ lensVars = make(map[*Block][]*Value)
+ }
+ lensVars[b] = append(lensVars[b], v)
+ }
+ case OpSliceCap:
+ if ft.caps == nil {
+ ft.caps = map[ID]*Value{}
+ }
+ ft.caps[v.Args[0].ID] = v
+ ft.update(b, v, ft.zero, signed, gt|eq)
+ if v.Args[0].Op == OpSliceMake {
+ if lensVars == nil {
+ lensVars = make(map[*Block][]*Value)
+ }
+ lensVars[b] = append(lensVars[b], v)
+ }
+ }
+ }
+ }
+
+ // Find induction variables. Currently, findIndVars
+ // is limited to one induction variable per block.
+ var indVars map[*Block]indVar
+ for _, v := range findIndVar(f) {
+ if indVars == nil {
+ indVars = make(map[*Block]indVar)
+ }
+ indVars[v.entry] = v
+ }
+
+ // current node state
+ type walkState int
+ const (
+ descend walkState = iota
+ simplify
+ )
+ // work maintains the DFS stack.
+ type bp struct {
+ block *Block // current handled block
+ state walkState // what's to do
+ }
+ work := make([]bp, 0, 256)
+ work = append(work, bp{
+ block: f.Entry,
+ state: descend,
+ })
+
+ idom := f.Idom()
+ sdom := f.Sdom()
+
+ // DFS on the dominator tree.
+ //
+ // For efficiency, we consider only the dominator tree rather
+ // than the entire flow graph. On the way down, we consider
+ // incoming branches and accumulate conditions that uniquely
+ // dominate the current block. If we discover a contradiction,
+ // we can eliminate the entire block and all of its children.
+ // On the way back up, we consider outgoing branches that
+ // haven't already been considered. This way we consider each
+ // branch condition only once.
+ for len(work) > 0 {
+ node := work[len(work)-1]
+ work = work[:len(work)-1]
+ parent := idom[node.block.ID]
+ branch := getBranch(sdom, parent, node.block)
+
+ switch node.state {
+ case descend:
+ ft.checkpoint()
+
+ // Entering the block, add the block-depending facts that we collected
+ // at the beginning: induction variables and lens/caps of slices.
+ if iv, ok := indVars[node.block]; ok {
+ addIndVarRestrictions(ft, parent, iv)
+ }
+ if lens, ok := lensVars[node.block]; ok {
+ for _, v := range lens {
+ switch v.Op {
+ case OpSliceLen:
+ ft.update(node.block, v, v.Args[0].Args[1], signed, eq)
+ case OpSliceCap:
+ ft.update(node.block, v, v.Args[0].Args[2], signed, eq)
+ }
+ }
+ }
+
+ if branch != unknown {
+ addBranchRestrictions(ft, parent, branch)
+ if ft.unsat {
+ // node.block is unreachable.
+ // Remove it and don't visit
+ // its children.
+ removeBranch(parent, branch)
+ ft.restore()
+ break
+ }
+ // Otherwise, we can now commit to
+ // taking this branch. We'll restore
+ // ft when we unwind.
+ }
+
+ // Add inductive facts for phis in this block.
+ addLocalInductiveFacts(ft, node.block)
+
+ work = append(work, bp{
+ block: node.block,
+ state: simplify,
+ })
+ for s := sdom.Child(node.block); s != nil; s = sdom.Sibling(s) {
+ work = append(work, bp{
+ block: s,
+ state: descend,
+ })
+ }
+
+ case simplify:
+ simplifyBlock(sdom, ft, node.block)
+ ft.restore()
+ }
+ }
+
+ ft.restore()
+
+ // Return the posets to the free list
+ for _, po := range []*poset{ft.orderS, ft.orderU} {
+ // Make sure it's empty as it should be. A non-empty poset
+ // might cause errors and miscompilations if reused.
+ if checkEnabled {
+ if err := po.CheckEmpty(); err != nil {
+ f.Fatalf("prove poset not empty after function %s: %v", f.Name, err)
+ }
+ }
+ f.retPoset(po)
+ }
+}
+
+// getBranch returns the range restrictions added by p
+// when reaching b. p is the immediate dominator of b.
+func getBranch(sdom SparseTree, p *Block, b *Block) branch {
+ if p == nil || p.Kind != BlockIf {
+ return unknown
+ }
+ // If p and p.Succs[0] are dominators it means that every path
+ // from entry to b passes through p and p.Succs[0]. We care that
+ // no path from entry to b passes through p.Succs[1]. If p.Succs[0]
+ // has one predecessor then (apart from the degenerate case),
+ // there is no path from entry that can reach b through p.Succs[1].
+ // TODO: how about p->yes->b->yes, i.e. a loop in yes.
+ if sdom.IsAncestorEq(p.Succs[0].b, b) && len(p.Succs[0].b.Preds) == 1 {
+ return positive
+ }
+ if sdom.IsAncestorEq(p.Succs[1].b, b) && len(p.Succs[1].b.Preds) == 1 {
+ return negative
+ }
+ return unknown
+}
+
+// addIndVarRestrictions updates the factsTables ft with the facts
+// learned from the induction variable indVar which drives the loop
+// starting in Block b.
+func addIndVarRestrictions(ft *factsTable, b *Block, iv indVar) {
+ d := signed
+ if ft.isNonNegative(iv.min) && ft.isNonNegative(iv.max) {
+ d |= unsigned
+ }
+
+ if iv.flags&indVarMinExc == 0 {
+ addRestrictions(b, ft, d, iv.min, iv.ind, lt|eq)
+ } else {
+ addRestrictions(b, ft, d, iv.min, iv.ind, lt)
+ }
+
+ if iv.flags&indVarMaxInc == 0 {
+ addRestrictions(b, ft, d, iv.ind, iv.max, lt)
+ } else {
+ addRestrictions(b, ft, d, iv.ind, iv.max, lt|eq)
+ }
+}
+
+// addBranchRestrictions updates the factsTables ft with the facts learned when
+// branching from Block b in direction br.
+func addBranchRestrictions(ft *factsTable, b *Block, br branch) {
+ c := b.Controls[0]
+ switch br {
+ case negative:
+ addRestrictions(b, ft, boolean, nil, c, eq)
+ case positive:
+ addRestrictions(b, ft, boolean, nil, c, lt|gt)
+ default:
+ panic("unknown branch")
+ }
+ if tr, has := domainRelationTable[c.Op]; has {
+ // When we branched from parent we learned a new set of
+ // restrictions. Update the factsTable accordingly.
+ d := tr.d
+ if d == signed && ft.isNonNegative(c.Args[0]) && ft.isNonNegative(c.Args[1]) {
+ d |= unsigned
+ }
+ switch c.Op {
+ case OpIsInBounds, OpIsSliceInBounds:
+ // 0 <= a0 < a1 (or 0 <= a0 <= a1)
+ //
+ // On the positive branch, we learn:
+ // signed: 0 <= a0 < a1 (or 0 <= a0 <= a1)
+ // unsigned: a0 < a1 (or a0 <= a1)
+ //
+ // On the negative branch, we learn (0 > a0 ||
+ // a0 >= a1). In the unsigned domain, this is
+ // simply a0 >= a1 (which is the reverse of the
+ // positive branch, so nothing surprising).
+ // But in the signed domain, we can't express the ||
+ // condition, so check if a0 is non-negative instead,
+ // to be able to learn something.
+ switch br {
+ case negative:
+ d = unsigned
+ if ft.isNonNegative(c.Args[0]) {
+ d |= signed
+ }
+ addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r^(lt|gt|eq))
+ case positive:
+ addRestrictions(b, ft, signed, ft.zero, c.Args[0], lt|eq)
+ addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r)
+ }
+ default:
+ switch br {
+ case negative:
+ addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r^(lt|gt|eq))
+ case positive:
+ addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r)
+ }
+ }
+
+ }
+}
+
+// addRestrictions updates restrictions from the immediate
+// dominating block (p) using r.
+func addRestrictions(parent *Block, ft *factsTable, t domain, v, w *Value, r relation) {
+ if t == 0 {
+ // Trivial case: nothing to do.
+ // Shoult not happen, but just in case.
+ return
+ }
+ for i := domain(1); i <= t; i <<= 1 {
+ if t&i == 0 {
+ continue
+ }
+ ft.update(parent, v, w, i, r)
+ }
+}
+
+// addLocalInductiveFacts adds inductive facts when visiting b, where
+// b is a join point in a loop. In contrast with findIndVar, this
+// depends on facts established for b, which is why it happens when
+// visiting b. addLocalInductiveFacts specifically targets the pattern
+// created by OFORUNTIL, which isn't detected by findIndVar.
+//
+// TODO: It would be nice to combine this with findIndVar.
+func addLocalInductiveFacts(ft *factsTable, b *Block) {
+ // This looks for a specific pattern of induction:
+ //
+ // 1. i1 = OpPhi(min, i2) in b
+ // 2. i2 = i1 + 1
+ // 3. i2 < max at exit from b.Preds[1]
+ // 4. min < max
+ //
+ // If all of these conditions are true, then i1 < max and i1 >= min.
+
+ // To ensure this is a loop header node.
+ if len(b.Preds) != 2 {
+ return
+ }
+
+ for _, i1 := range b.Values {
+ if i1.Op != OpPhi {
+ continue
+ }
+
+ // Check for conditions 1 and 2. This is easy to do
+ // and will throw out most phis.
+ min, i2 := i1.Args[0], i1.Args[1]
+ if i1q, delta := isConstDelta(i2); i1q != i1 || delta != 1 {
+ continue
+ }
+
+ // Try to prove condition 3. We can't just query the
+ // fact table for this because we don't know what the
+ // facts of b.Preds[1] are (in general, b.Preds[1] is
+ // a loop-back edge, so we haven't even been there
+ // yet). As a conservative approximation, we look for
+ // this condition in the predecessor chain until we
+ // hit a join point.
+ uniquePred := func(b *Block) *Block {
+ if len(b.Preds) == 1 {
+ return b.Preds[0].b
+ }
+ return nil
+ }
+ pred, child := b.Preds[1].b, b
+ for ; pred != nil; pred, child = uniquePred(pred), pred {
+ if pred.Kind != BlockIf {
+ continue
+ }
+ control := pred.Controls[0]
+
+ br := unknown
+ if pred.Succs[0].b == child {
+ br = positive
+ }
+ if pred.Succs[1].b == child {
+ if br != unknown {
+ continue
+ }
+ br = negative
+ }
+ if br == unknown {
+ continue
+ }
+
+ tr, has := domainRelationTable[control.Op]
+ if !has {
+ continue
+ }
+ r := tr.r
+ if br == negative {
+ // Negative branch taken to reach b.
+ // Complement the relations.
+ r = (lt | eq | gt) ^ r
+ }
+
+ // Check for i2 < max or max > i2.
+ var max *Value
+ if r == lt && control.Args[0] == i2 {
+ max = control.Args[1]
+ } else if r == gt && control.Args[1] == i2 {
+ max = control.Args[0]
+ } else {
+ continue
+ }
+
+ // Check condition 4 now that we have a
+ // candidate max. For this we can query the
+ // fact table. We "prove" min < max by showing
+ // that min >= max is unsat. (This may simply
+ // compare two constants; that's fine.)
+ ft.checkpoint()
+ ft.update(b, min, max, tr.d, gt|eq)
+ proved := ft.unsat
+ ft.restore()
+
+ if proved {
+ // We know that min <= i1 < max.
+ if b.Func.pass.debug > 0 {
+ printIndVar(b, i1, min, max, 1, 0)
+ }
+ ft.update(b, min, i1, tr.d, lt|eq)
+ ft.update(b, i1, max, tr.d, lt)
+ }
+ }
+ }
+}
+
+var ctzNonZeroOp = map[Op]Op{OpCtz8: OpCtz8NonZero, OpCtz16: OpCtz16NonZero, OpCtz32: OpCtz32NonZero, OpCtz64: OpCtz64NonZero}
+var mostNegativeDividend = map[Op]int64{
+ OpDiv16: -1 << 15,
+ OpMod16: -1 << 15,
+ OpDiv32: -1 << 31,
+ OpMod32: -1 << 31,
+ OpDiv64: -1 << 63,
+ OpMod64: -1 << 63}
+
+// simplifyBlock simplifies some constant values in b and evaluates
+// branches to non-uniquely dominated successors of b.
+func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpSlicemask:
+ // Replace OpSlicemask operations in b with constants where possible.
+ x, delta := isConstDelta(v.Args[0])
+ if x == nil {
+ continue
+ }
+ // slicemask(x + y)
+ // if x is larger than -y (y is negative), then slicemask is -1.
+ lim, ok := ft.limits[x.ID]
+ if !ok {
+ continue
+ }
+ if lim.umin > uint64(-delta) {
+ if v.Args[0].Op == OpAdd64 {
+ v.reset(OpConst64)
+ } else {
+ v.reset(OpConst32)
+ }
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved slicemask not needed")
+ }
+ v.AuxInt = -1
+ }
+ case OpCtz8, OpCtz16, OpCtz32, OpCtz64:
+ // On some architectures, notably amd64, we can generate much better
+ // code for CtzNN if we know that the argument is non-zero.
+ // Capture that information here for use in arch-specific optimizations.
+ x := v.Args[0]
+ lim, ok := ft.limits[x.ID]
+ if !ok {
+ continue
+ }
+ if lim.umin > 0 || lim.min > 0 || lim.max < 0 {
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %v non-zero", v.Op)
+ }
+ v.Op = ctzNonZeroOp[v.Op]
+ }
+ case OpRsh8x8, OpRsh8x16, OpRsh8x32, OpRsh8x64,
+ OpRsh16x8, OpRsh16x16, OpRsh16x32, OpRsh16x64,
+ OpRsh32x8, OpRsh32x16, OpRsh32x32, OpRsh32x64,
+ OpRsh64x8, OpRsh64x16, OpRsh64x32, OpRsh64x64:
+ // Check whether, for a >> b, we know that a is non-negative
+ // and b is all of a's bits except the MSB. If so, a is shifted to zero.
+ bits := 8 * v.Type.Size()
+ if v.Args[1].isGenericIntConst() && v.Args[1].AuxInt >= bits-1 && ft.isNonNegative(v.Args[0]) {
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %v shifts to zero", v.Op)
+ }
+ switch bits {
+ case 64:
+ v.reset(OpConst64)
+ case 32:
+ v.reset(OpConst32)
+ case 16:
+ v.reset(OpConst16)
+ case 8:
+ v.reset(OpConst8)
+ default:
+ panic("unexpected integer size")
+ }
+ v.AuxInt = 0
+ continue // Be sure not to fallthrough - this is no longer OpRsh.
+ }
+ // If the Rsh hasn't been replaced with 0, still check if it is bounded.
+ fallthrough
+ case OpLsh8x8, OpLsh8x16, OpLsh8x32, OpLsh8x64,
+ OpLsh16x8, OpLsh16x16, OpLsh16x32, OpLsh16x64,
+ OpLsh32x8, OpLsh32x16, OpLsh32x32, OpLsh32x64,
+ OpLsh64x8, OpLsh64x16, OpLsh64x32, OpLsh64x64,
+ OpRsh8Ux8, OpRsh8Ux16, OpRsh8Ux32, OpRsh8Ux64,
+ OpRsh16Ux8, OpRsh16Ux16, OpRsh16Ux32, OpRsh16Ux64,
+ OpRsh32Ux8, OpRsh32Ux16, OpRsh32Ux32, OpRsh32Ux64,
+ OpRsh64Ux8, OpRsh64Ux16, OpRsh64Ux32, OpRsh64Ux64:
+ // Check whether, for a << b, we know that b
+ // is strictly less than the number of bits in a.
+ by := v.Args[1]
+ lim, ok := ft.limits[by.ID]
+ if !ok {
+ continue
+ }
+ bits := 8 * v.Args[0].Type.Size()
+ if lim.umax < uint64(bits) || (lim.max < bits && ft.isNonNegative(by)) {
+ v.AuxInt = 1 // see shiftIsBounded
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %v bounded", v.Op)
+ }
+ }
+ case OpDiv16, OpDiv32, OpDiv64, OpMod16, OpMod32, OpMod64:
+ // On amd64 and 386 fix-up code can be avoided if we know
+ // the divisor is not -1 or the dividend > MinIntNN.
+ // Don't modify AuxInt on other architectures,
+ // as that can interfere with CSE.
+ // TODO: add other architectures?
+ if b.Func.Config.arch != "386" && b.Func.Config.arch != "amd64" {
+ break
+ }
+ divr := v.Args[1]
+ divrLim, divrLimok := ft.limits[divr.ID]
+ divd := v.Args[0]
+ divdLim, divdLimok := ft.limits[divd.ID]
+ if (divrLimok && (divrLim.max < -1 || divrLim.min > -1)) ||
+ (divdLimok && divdLim.min > mostNegativeDividend[v.Op]) {
+ // See DivisionNeedsFixUp in rewrite.go.
+ // v.AuxInt = 1 means we have proved both that the divisor is not -1
+ // and that the dividend is not the most negative integer,
+ // so we do not need to add fix-up code.
+ v.AuxInt = 1
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %v does not need fix-up", v.Op)
+ }
+ }
+ }
+ }
+
+ if b.Kind != BlockIf {
+ return
+ }
+
+ // Consider outgoing edges from this block.
+ parent := b
+ for i, branch := range [...]branch{positive, negative} {
+ child := parent.Succs[i].b
+ if getBranch(sdom, parent, child) != unknown {
+ // For edges to uniquely dominated blocks, we
+ // already did this when we visited the child.
+ continue
+ }
+ // For edges to other blocks, this can trim a branch
+ // even if we couldn't get rid of the child itself.
+ ft.checkpoint()
+ addBranchRestrictions(ft, parent, branch)
+ unsat := ft.unsat
+ ft.restore()
+ if unsat {
+ // This branch is impossible, so remove it
+ // from the block.
+ removeBranch(parent, branch)
+ // No point in considering the other branch.
+ // (It *is* possible for both to be
+ // unsatisfiable since the fact table is
+ // incomplete. We could turn this into a
+ // BlockExit, but it doesn't seem worth it.)
+ break
+ }
+ }
+}
+
+func removeBranch(b *Block, branch branch) {
+ c := b.Controls[0]
+ if b.Func.pass.debug > 0 {
+ verb := "Proved"
+ if branch == positive {
+ verb = "Disproved"
+ }
+ if b.Func.pass.debug > 1 {
+ b.Func.Warnl(b.Pos, "%s %s (%s)", verb, c.Op, c)
+ } else {
+ b.Func.Warnl(b.Pos, "%s %s", verb, c.Op)
+ }
+ }
+ if c != nil && c.Pos.IsStmt() == src.PosIsStmt && c.Pos.SameFileAndLine(b.Pos) {
+ // attempt to preserve statement marker.
+ b.Pos = b.Pos.WithIsStmt()
+ }
+ b.Kind = BlockFirst
+ b.ResetControls()
+ if branch == positive {
+ b.swapSuccessors()
+ }
+}
+
+// isNonNegative reports whether v is known to be greater or equal to zero.
+func isNonNegative(v *Value) bool {
+ if !v.Type.IsInteger() {
+ v.Fatalf("isNonNegative bad type: %v", v.Type)
+ }
+ // TODO: return true if !v.Type.IsSigned()
+ // SSA isn't type-safe enough to do that now (issue 37753).
+ // The checks below depend only on the pattern of bits.
+
+ switch v.Op {
+ case OpConst64:
+ return v.AuxInt >= 0
+
+ case OpConst32:
+ return int32(v.AuxInt) >= 0
+
+ case OpConst16:
+ return int16(v.AuxInt) >= 0
+
+ case OpConst8:
+ return int8(v.AuxInt) >= 0
+
+ case OpStringLen, OpSliceLen, OpSliceCap,
+ OpZeroExt8to64, OpZeroExt16to64, OpZeroExt32to64,
+ OpZeroExt8to32, OpZeroExt16to32, OpZeroExt8to16,
+ OpCtz64, OpCtz32, OpCtz16, OpCtz8:
+ return true
+
+ case OpRsh64Ux64, OpRsh32Ux64:
+ by := v.Args[1]
+ return by.Op == OpConst64 && by.AuxInt > 0
+
+ case OpRsh64x64, OpRsh32x64, OpRsh8x64, OpRsh16x64, OpRsh32x32, OpRsh64x32,
+ OpSignExt32to64, OpSignExt16to64, OpSignExt8to64, OpSignExt16to32, OpSignExt8to32:
+ return isNonNegative(v.Args[0])
+
+ case OpAnd64, OpAnd32, OpAnd16, OpAnd8:
+ return isNonNegative(v.Args[0]) || isNonNegative(v.Args[1])
+
+ case OpMod64, OpMod32, OpMod16, OpMod8,
+ OpDiv64, OpDiv32, OpDiv16, OpDiv8,
+ OpOr64, OpOr32, OpOr16, OpOr8,
+ OpXor64, OpXor32, OpXor16, OpXor8:
+ return isNonNegative(v.Args[0]) && isNonNegative(v.Args[1])
+
+ // We could handle OpPhi here, but the improvements from doing
+ // so are very minor, and it is neither simple nor cheap.
+ }
+ return false
+}
+
+// isConstDelta returns non-nil if v is equivalent to w+delta (signed).
+func isConstDelta(v *Value) (w *Value, delta int64) {
+ cop := OpConst64
+ switch v.Op {
+ case OpAdd32, OpSub32:
+ cop = OpConst32
+ }
+ switch v.Op {
+ case OpAdd64, OpAdd32:
+ if v.Args[0].Op == cop {
+ return v.Args[1], v.Args[0].AuxInt
+ }
+ if v.Args[1].Op == cop {
+ return v.Args[0], v.Args[1].AuxInt
+ }
+ case OpSub64, OpSub32:
+ if v.Args[1].Op == cop {
+ aux := v.Args[1].AuxInt
+ if aux != -aux { // Overflow; too bad
+ return v.Args[0], -aux
+ }
+ }
+ }
+ return nil, 0
+}
+
+// isCleanExt reports whether v is the result of a value-preserving
+// sign or zero extension
+func isCleanExt(v *Value) bool {
+ switch v.Op {
+ case OpSignExt8to16, OpSignExt8to32, OpSignExt8to64,
+ OpSignExt16to32, OpSignExt16to64, OpSignExt32to64:
+ // signed -> signed is the only value-preserving sign extension
+ return v.Args[0].Type.IsSigned() && v.Type.IsSigned()
+
+ case OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64,
+ OpZeroExt16to32, OpZeroExt16to64, OpZeroExt32to64:
+ // unsigned -> signed/unsigned are value-preserving zero extensions
+ return !v.Args[0].Type.IsSigned()
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/redblack32.go b/src/cmd/compile/internal/ssa/redblack32.go
new file mode 100644
index 0000000..fc9cc71
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/redblack32.go
@@ -0,0 +1,429 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "fmt"
+
+const (
+ rankLeaf rbrank = 1
+ rankZero rbrank = 0
+)
+
+type rbrank int8
+
+// RBTint32 is a red-black tree with data stored at internal nodes,
+// following Tarjan, Data Structures and Network Algorithms,
+// pp 48-52, using explicit rank instead of red and black.
+// Deletion is not yet implemented because it is not yet needed.
+// Extra operations glb, lub, glbEq, lubEq are provided for
+// use in sparse lookup algorithms.
+type RBTint32 struct {
+ root *node32
+ // An extra-clever implementation will have special cases
+ // for small sets, but we are not extra-clever today.
+}
+
+func (t *RBTint32) String() string {
+ if t.root == nil {
+ return "[]"
+ }
+ return "[" + t.root.String() + "]"
+}
+
+func (t *node32) String() string {
+ s := ""
+ if t.left != nil {
+ s = t.left.String() + " "
+ }
+ s = s + fmt.Sprintf("k=%d,d=%v", t.key, t.data)
+ if t.right != nil {
+ s = s + " " + t.right.String()
+ }
+ return s
+}
+
+type node32 struct {
+ // Standard conventions hold for left = smaller, right = larger
+ left, right, parent *node32
+ data interface{}
+ key int32
+ rank rbrank // From Tarjan pp 48-49:
+ // If x is a node with a parent, then x.rank <= x.parent.rank <= x.rank+1.
+ // If x is a node with a grandparent, then x.rank < x.parent.parent.rank.
+ // If x is an "external [null] node", then x.rank = 0 && x.parent.rank = 1.
+ // Any node with one or more null children should have rank = 1.
+}
+
+// makeNode returns a new leaf node with the given key and nil data.
+func (t *RBTint32) makeNode(key int32) *node32 {
+ return &node32{key: key, rank: rankLeaf}
+}
+
+// IsEmpty reports whether t is empty.
+func (t *RBTint32) IsEmpty() bool {
+ return t.root == nil
+}
+
+// IsSingle reports whether t is a singleton (leaf).
+func (t *RBTint32) IsSingle() bool {
+ return t.root != nil && t.root.isLeaf()
+}
+
+// VisitInOrder applies f to the key and data pairs in t,
+// with keys ordered from smallest to largest.
+func (t *RBTint32) VisitInOrder(f func(int32, interface{})) {
+ if t.root == nil {
+ return
+ }
+ t.root.visitInOrder(f)
+}
+
+func (n *node32) Data() interface{} {
+ if n == nil {
+ return nil
+ }
+ return n.data
+}
+
+func (n *node32) keyAndData() (k int32, d interface{}) {
+ if n == nil {
+ k = 0
+ d = nil
+ } else {
+ k = n.key
+ d = n.data
+ }
+ return
+}
+
+func (n *node32) Rank() rbrank {
+ if n == nil {
+ return 0
+ }
+ return n.rank
+}
+
+// Find returns the data associated with key in the tree, or
+// nil if key is not in the tree.
+func (t *RBTint32) Find(key int32) interface{} {
+ return t.root.find(key).Data()
+}
+
+// Insert adds key to the tree and associates key with data.
+// If key was already in the tree, it updates the associated data.
+// Insert returns the previous data associated with key,
+// or nil if key was not present.
+// Insert panics if data is nil.
+func (t *RBTint32) Insert(key int32, data interface{}) interface{} {
+ if data == nil {
+ panic("Cannot insert nil data into tree")
+ }
+ n := t.root
+ var newroot *node32
+ if n == nil {
+ n = t.makeNode(key)
+ newroot = n
+ } else {
+ newroot, n = n.insert(key, t)
+ }
+ r := n.data
+ n.data = data
+ t.root = newroot
+ return r
+}
+
+// Min returns the minimum element of t and its associated data.
+// If t is empty, then (0, nil) is returned.
+func (t *RBTint32) Min() (k int32, d interface{}) {
+ return t.root.min().keyAndData()
+}
+
+// Max returns the maximum element of t and its associated data.
+// If t is empty, then (0, nil) is returned.
+func (t *RBTint32) Max() (k int32, d interface{}) {
+ return t.root.max().keyAndData()
+}
+
+// Glb returns the greatest-lower-bound-exclusive of x and its associated
+// data. If x has no glb in the tree, then (0, nil) is returned.
+func (t *RBTint32) Glb(x int32) (k int32, d interface{}) {
+ return t.root.glb(x, false).keyAndData()
+}
+
+// GlbEq returns the greatest-lower-bound-inclusive of x and its associated
+// data. If x has no glbEQ in the tree, then (0, nil) is returned.
+func (t *RBTint32) GlbEq(x int32) (k int32, d interface{}) {
+ return t.root.glb(x, true).keyAndData()
+}
+
+// Lub returns the least-upper-bound-exclusive of x and its associated
+// data. If x has no lub in the tree, then (0, nil) is returned.
+func (t *RBTint32) Lub(x int32) (k int32, d interface{}) {
+ return t.root.lub(x, false).keyAndData()
+}
+
+// LubEq returns the least-upper-bound-inclusive of x and its associated
+// data. If x has no lubEq in the tree, then (0, nil) is returned.
+func (t *RBTint32) LubEq(x int32) (k int32, d interface{}) {
+ return t.root.lub(x, true).keyAndData()
+}
+
+func (t *node32) isLeaf() bool {
+ return t.left == nil && t.right == nil
+}
+
+func (t *node32) visitInOrder(f func(int32, interface{})) {
+ if t.left != nil {
+ t.left.visitInOrder(f)
+ }
+ f(t.key, t.data)
+ if t.right != nil {
+ t.right.visitInOrder(f)
+ }
+}
+
+func (t *node32) maxChildRank() rbrank {
+ if t.left == nil {
+ if t.right == nil {
+ return rankZero
+ }
+ return t.right.rank
+ }
+ if t.right == nil {
+ return t.left.rank
+ }
+ if t.right.rank > t.left.rank {
+ return t.right.rank
+ }
+ return t.left.rank
+}
+
+func (t *node32) minChildRank() rbrank {
+ if t.left == nil || t.right == nil {
+ return rankZero
+ }
+ if t.right.rank < t.left.rank {
+ return t.right.rank
+ }
+ return t.left.rank
+}
+
+func (t *node32) find(key int32) *node32 {
+ for t != nil {
+ if key < t.key {
+ t = t.left
+ } else if key > t.key {
+ t = t.right
+ } else {
+ return t
+ }
+ }
+ return nil
+}
+
+func (t *node32) min() *node32 {
+ if t == nil {
+ return t
+ }
+ for t.left != nil {
+ t = t.left
+ }
+ return t
+}
+
+func (t *node32) max() *node32 {
+ if t == nil {
+ return t
+ }
+ for t.right != nil {
+ t = t.right
+ }
+ return t
+}
+
+func (t *node32) glb(key int32, allow_eq bool) *node32 {
+ var best *node32
+ for t != nil {
+ if key <= t.key {
+ if key == t.key && allow_eq {
+ return t
+ }
+ // t is too big, glb is to left.
+ t = t.left
+ } else {
+ // t is a lower bound, record it and seek a better one.
+ best = t
+ t = t.right
+ }
+ }
+ return best
+}
+
+func (t *node32) lub(key int32, allow_eq bool) *node32 {
+ var best *node32
+ for t != nil {
+ if key >= t.key {
+ if key == t.key && allow_eq {
+ return t
+ }
+ // t is too small, lub is to right.
+ t = t.right
+ } else {
+ // t is a upper bound, record it and seek a better one.
+ best = t
+ t = t.left
+ }
+ }
+ return best
+}
+
+func (t *node32) insert(x int32, w *RBTint32) (newroot, newnode *node32) {
+ // defaults
+ newroot = t
+ newnode = t
+ if x == t.key {
+ return
+ }
+ if x < t.key {
+ if t.left == nil {
+ n := w.makeNode(x)
+ n.parent = t
+ t.left = n
+ newnode = n
+ return
+ }
+ var new_l *node32
+ new_l, newnode = t.left.insert(x, w)
+ t.left = new_l
+ new_l.parent = t
+ newrank := 1 + new_l.maxChildRank()
+ if newrank > t.rank {
+ if newrank > 1+t.right.Rank() { // rotations required
+ if new_l.left.Rank() < new_l.right.Rank() {
+ // double rotation
+ t.left = new_l.rightToRoot()
+ }
+ newroot = t.leftToRoot()
+ return
+ } else {
+ t.rank = newrank
+ }
+ }
+ } else { // x > t.key
+ if t.right == nil {
+ n := w.makeNode(x)
+ n.parent = t
+ t.right = n
+ newnode = n
+ return
+ }
+ var new_r *node32
+ new_r, newnode = t.right.insert(x, w)
+ t.right = new_r
+ new_r.parent = t
+ newrank := 1 + new_r.maxChildRank()
+ if newrank > t.rank {
+ if newrank > 1+t.left.Rank() { // rotations required
+ if new_r.right.Rank() < new_r.left.Rank() {
+ // double rotation
+ t.right = new_r.leftToRoot()
+ }
+ newroot = t.rightToRoot()
+ return
+ } else {
+ t.rank = newrank
+ }
+ }
+ }
+ return
+}
+
+func (t *node32) rightToRoot() *node32 {
+ // this
+ // left right
+ // rl rr
+ //
+ // becomes
+ //
+ // right
+ // this rr
+ // left rl
+ //
+ right := t.right
+ rl := right.left
+ right.parent = t.parent
+ right.left = t
+ t.parent = right
+ // parent's child ptr fixed in caller
+ t.right = rl
+ if rl != nil {
+ rl.parent = t
+ }
+ return right
+}
+
+func (t *node32) leftToRoot() *node32 {
+ // this
+ // left right
+ // ll lr
+ //
+ // becomes
+ //
+ // left
+ // ll this
+ // lr right
+ //
+ left := t.left
+ lr := left.right
+ left.parent = t.parent
+ left.right = t
+ t.parent = left
+ // parent's child ptr fixed in caller
+ t.left = lr
+ if lr != nil {
+ lr.parent = t
+ }
+ return left
+}
+
+// next returns the successor of t in a left-to-right
+// walk of the tree in which t is embedded.
+func (t *node32) next() *node32 {
+ // If there is a right child, it is to the right
+ r := t.right
+ if r != nil {
+ return r.min()
+ }
+ // if t is p.left, then p, else repeat.
+ p := t.parent
+ for p != nil {
+ if p.left == t {
+ return p
+ }
+ t = p
+ p = t.parent
+ }
+ return nil
+}
+
+// prev returns the predecessor of t in a left-to-right
+// walk of the tree in which t is embedded.
+func (t *node32) prev() *node32 {
+ // If there is a left child, it is to the left
+ l := t.left
+ if l != nil {
+ return l.max()
+ }
+ // if t is p.right, then p, else repeat.
+ p := t.parent
+ for p != nil {
+ if p.right == t {
+ return p
+ }
+ t = p
+ p = t.parent
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/ssa/redblack32_test.go b/src/cmd/compile/internal/ssa/redblack32_test.go
new file mode 100644
index 0000000..376e8cf
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/redblack32_test.go
@@ -0,0 +1,274 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "testing"
+)
+
+type sstring string
+
+func (s sstring) String() string {
+ return string(s)
+}
+
+// wellFormed ensures that a red-black tree meets
+// all of its invariants and returns a string identifying
+// the first problem encountered. If there is no problem
+// then the returned string is empty. The size is also
+// returned to allow comparison of calculated tree size
+// with expected.
+func (t *RBTint32) wellFormed() (s string, i int) {
+ if t.root == nil {
+ s = ""
+ i = 0
+ return
+ }
+ return t.root.wellFormedSubtree(nil, -0x80000000, 0x7fffffff)
+}
+
+// wellFormedSubtree ensures that a red-black subtree meets
+// all of its invariants and returns a string identifying
+// the first problem encountered. If there is no problem
+// then the returned string is empty. The size is also
+// returned to allow comparison of calculated tree size
+// with expected.
+func (t *node32) wellFormedSubtree(parent *node32, min, max int32) (s string, i int) {
+ i = -1 // initialize to a failing value
+ s = "" // s is the reason for failure; empty means okay.
+
+ if t.parent != parent {
+ s = "t.parent != parent"
+ return
+ }
+
+ if min >= t.key {
+ s = "min >= t.key"
+ return
+ }
+
+ if max <= t.key {
+ s = "max <= t.key"
+ return
+ }
+
+ l := t.left
+ r := t.right
+ if l == nil && r == nil {
+ if t.rank != rankLeaf {
+ s = "leaf rank wrong"
+ return
+ }
+ }
+ if l != nil {
+ if t.rank < l.rank {
+ s = "t.rank < l.rank"
+ } else if t.rank > 1+l.rank {
+ s = "t.rank > 1+l.rank"
+ } else if t.rank <= l.maxChildRank() {
+ s = "t.rank <= l.maxChildRank()"
+ } else if t.key <= l.key {
+ s = "t.key <= l.key"
+ }
+ if s != "" {
+ return
+ }
+ } else {
+ if t.rank != 1 {
+ s = "t w/ left nil has rank != 1"
+ return
+ }
+ }
+ if r != nil {
+ if t.rank < r.rank {
+ s = "t.rank < r.rank"
+ } else if t.rank > 1+r.rank {
+ s = "t.rank > 1+r.rank"
+ } else if t.rank <= r.maxChildRank() {
+ s = "t.rank <= r.maxChildRank()"
+ } else if t.key >= r.key {
+ s = "t.key >= r.key"
+ }
+ if s != "" {
+ return
+ }
+ } else {
+ if t.rank != 1 {
+ s = "t w/ right nil has rank != 1"
+ return
+ }
+ }
+ ii := 1
+ if l != nil {
+ res, il := l.wellFormedSubtree(t, min, t.key)
+ if res != "" {
+ s = "L." + res
+ return
+ }
+ ii += il
+ }
+ if r != nil {
+ res, ir := r.wellFormedSubtree(t, t.key, max)
+ if res != "" {
+ s = "R." + res
+ return
+ }
+ ii += ir
+ }
+ i = ii
+ return
+}
+
+func (t *RBTint32) DebugString() string {
+ if t.root == nil {
+ return ""
+ }
+ return t.root.DebugString()
+}
+
+// DebugString prints the tree with nested information
+// to allow an eyeball check on the tree balance.
+func (t *node32) DebugString() string {
+ s := ""
+ if t.left != nil {
+ s += "["
+ s += t.left.DebugString()
+ s += "]"
+ }
+ s += fmt.Sprintf("%v=%v:%d", t.key, t.data, t.rank)
+ if t.right != nil {
+ s += "["
+ s += t.right.DebugString()
+ s += "]"
+ }
+ return s
+}
+
+func allRBT32Ops(te *testing.T, x []int32) {
+ t := &RBTint32{}
+ for i, d := range x {
+ x[i] = d + d // Double everything for glb/lub testing
+ }
+
+ // fmt.Printf("Inserting double of %v", x)
+ k := 0
+ min := int32(0x7fffffff)
+ max := int32(-0x80000000)
+ for _, d := range x {
+ if d < min {
+ min = d
+ }
+
+ if d > max {
+ max = d
+ }
+
+ t.Insert(d, sstring(fmt.Sprintf("%v", d)))
+ k++
+ s, i := t.wellFormed()
+ if i != k {
+ te.Errorf("Wrong tree size %v, expected %v for %v", i, k, t.DebugString())
+ }
+ if s != "" {
+ te.Errorf("Tree consistency problem at %v", s)
+ return
+ }
+ }
+
+ oops := false
+
+ for _, d := range x {
+ s := fmt.Sprintf("%v", d)
+ f := t.Find(d)
+
+ // data
+ if s != fmt.Sprintf("%v", f) {
+ te.Errorf("s(%v) != f(%v)", s, f)
+ oops = true
+ }
+ }
+
+ if !oops {
+ for _, d := range x {
+ s := fmt.Sprintf("%v", d)
+
+ kg, g := t.Glb(d + 1)
+ kge, ge := t.GlbEq(d)
+ kl, l := t.Lub(d - 1)
+ kle, le := t.LubEq(d)
+
+ // keys
+ if d != kg {
+ te.Errorf("d(%v) != kg(%v)", d, kg)
+ }
+ if d != kl {
+ te.Errorf("d(%v) != kl(%v)", d, kl)
+ }
+ if d != kge {
+ te.Errorf("d(%v) != kge(%v)", d, kge)
+ }
+ if d != kle {
+ te.Errorf("d(%v) != kle(%v)", d, kle)
+ }
+ // data
+ if s != fmt.Sprintf("%v", g) {
+ te.Errorf("s(%v) != g(%v)", s, g)
+ }
+ if s != fmt.Sprintf("%v", l) {
+ te.Errorf("s(%v) != l(%v)", s, l)
+ }
+ if s != fmt.Sprintf("%v", ge) {
+ te.Errorf("s(%v) != ge(%v)", s, ge)
+ }
+ if s != fmt.Sprintf("%v", le) {
+ te.Errorf("s(%v) != le(%v)", s, le)
+ }
+ }
+
+ for _, d := range x {
+ s := fmt.Sprintf("%v", d)
+ kge, ge := t.GlbEq(d + 1)
+ kle, le := t.LubEq(d - 1)
+ if d != kge {
+ te.Errorf("d(%v) != kge(%v)", d, kge)
+ }
+ if d != kle {
+ te.Errorf("d(%v) != kle(%v)", d, kle)
+ }
+ if s != fmt.Sprintf("%v", ge) {
+ te.Errorf("s(%v) != ge(%v)", s, ge)
+ }
+ if s != fmt.Sprintf("%v", le) {
+ te.Errorf("s(%v) != le(%v)", s, le)
+ }
+ }
+
+ kg, g := t.Glb(min)
+ kge, ge := t.GlbEq(min - 1)
+ kl, l := t.Lub(max)
+ kle, le := t.LubEq(max + 1)
+ fmin := t.Find(min - 1)
+ fmax := t.Find(min + 11)
+
+ if kg != 0 || kge != 0 || kl != 0 || kle != 0 {
+ te.Errorf("Got non-zero-key for missing query")
+ }
+
+ if g != nil || ge != nil || l != nil || le != nil || fmin != nil || fmax != nil {
+ te.Errorf("Got non-error-data for missing query")
+ }
+
+ }
+}
+
+func TestAllRBTreeOps(t *testing.T) {
+ allRBT32Ops(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25})
+ allRBT32Ops(t, []int32{22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 3, 2, 1, 25, 24, 23, 12, 11, 10, 9, 8, 7, 6, 5, 4})
+ allRBT32Ops(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ allRBT32Ops(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24})
+ allRBT32Ops(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2})
+ allRBT32Ops(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25})
+}
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
new file mode 100644
index 0000000..0339b07
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -0,0 +1,2696 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Register allocation.
+//
+// We use a version of a linear scan register allocator. We treat the
+// whole function as a single long basic block and run through
+// it using a greedy register allocator. Then all merge edges
+// (those targeting a block with len(Preds)>1) are processed to
+// shuffle data into the place that the target of the edge expects.
+//
+// The greedy allocator moves values into registers just before they
+// are used, spills registers only when necessary, and spills the
+// value whose next use is farthest in the future.
+//
+// The register allocator requires that a block is not scheduled until
+// at least one of its predecessors have been scheduled. The most recent
+// such predecessor provides the starting register state for a block.
+//
+// It also requires that there are no critical edges (critical =
+// comes from a block with >1 successor and goes to a block with >1
+// predecessor). This makes it easy to add fixup code on merge edges -
+// the source of a merge edge has only one successor, so we can add
+// fixup code to the end of that block.
+
+// Spilling
+//
+// During the normal course of the allocator, we might throw a still-live
+// value out of all registers. When that value is subsequently used, we must
+// load it from a slot on the stack. We must also issue an instruction to
+// initialize that stack location with a copy of v.
+//
+// pre-regalloc:
+// (1) v = Op ...
+// (2) x = Op ...
+// (3) ... = Op v ...
+//
+// post-regalloc:
+// (1) v = Op ... : AX // computes v, store result in AX
+// s = StoreReg v // spill v to a stack slot
+// (2) x = Op ... : AX // some other op uses AX
+// c = LoadReg s : CX // restore v from stack slot
+// (3) ... = Op c ... // use the restored value
+//
+// Allocation occurs normally until we reach (3) and we realize we have
+// a use of v and it isn't in any register. At that point, we allocate
+// a spill (a StoreReg) for v. We can't determine the correct place for
+// the spill at this point, so we allocate the spill as blockless initially.
+// The restore is then generated to load v back into a register so it can
+// be used. Subsequent uses of v will use the restored value c instead.
+//
+// What remains is the question of where to schedule the spill.
+// During allocation, we keep track of the dominator of all restores of v.
+// The spill of v must dominate that block. The spill must also be issued at
+// a point where v is still in a register.
+//
+// To find the right place, start at b, the block which dominates all restores.
+// - If b is v.Block, then issue the spill right after v.
+// It is known to be in a register at that point, and dominates any restores.
+// - Otherwise, if v is in a register at the start of b,
+// put the spill of v at the start of b.
+// - Otherwise, set b = immediate dominator of b, and repeat.
+//
+// Phi values are special, as always. We define two kinds of phis, those
+// where the merge happens in a register (a "register" phi) and those where
+// the merge happens in a stack location (a "stack" phi).
+//
+// A register phi must have the phi and all of its inputs allocated to the
+// same register. Register phis are spilled similarly to regular ops.
+//
+// A stack phi must have the phi and all of its inputs allocated to the same
+// stack location. Stack phis start out life already spilled - each phi
+// input must be a store (using StoreReg) at the end of the corresponding
+// predecessor block.
+// b1: y = ... : AX b2: z = ... : BX
+// y2 = StoreReg y z2 = StoreReg z
+// goto b3 goto b3
+// b3: x = phi(y2, z2)
+// The stack allocator knows that StoreReg args of stack-allocated phis
+// must be allocated to the same stack slot as the phi that uses them.
+// x is now a spilled value and a restore must appear before its first use.
+
+// TODO
+
+// Use an affinity graph to mark two values which should use the
+// same register. This affinity graph will be used to prefer certain
+// registers for allocation. This affinity helps eliminate moves that
+// are required for phi implementations and helps generate allocations
+// for 2-register architectures.
+
+// Note: regalloc generates a not-quite-SSA output. If we have:
+//
+// b1: x = ... : AX
+// x2 = StoreReg x
+// ... AX gets reused for something else ...
+// if ... goto b3 else b4
+//
+// b3: x3 = LoadReg x2 : BX b4: x4 = LoadReg x2 : CX
+// ... use x3 ... ... use x4 ...
+//
+// b2: ... use x3 ...
+//
+// If b3 is the primary predecessor of b2, then we use x3 in b2 and
+// add a x4:CX->BX copy at the end of b4.
+// But the definition of x3 doesn't dominate b2. We should really
+// insert a dummy phi at the start of b2 (x5=phi(x3,x4):BX) to keep
+// SSA form. For now, we ignore this problem as remaining in strict
+// SSA form isn't needed after regalloc. We'll just leave the use
+// of x3 not dominated by the definition of x3, and the CX->BX copy
+// will have no use (so don't run deadcode after regalloc!).
+// TODO: maybe we should introduce these extra phis?
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+ "fmt"
+ "math/bits"
+ "unsafe"
+)
+
+const (
+ moveSpills = iota
+ logSpills
+ regDebug
+ stackDebug
+)
+
+// distance is a measure of how far into the future values are used.
+// distance is measured in units of instructions.
+const (
+ likelyDistance = 1
+ normalDistance = 10
+ unlikelyDistance = 100
+)
+
+// regalloc performs register allocation on f. It sets f.RegAlloc
+// to the resulting allocation.
+func regalloc(f *Func) {
+ var s regAllocState
+ s.init(f)
+ s.regalloc(f)
+}
+
+type register uint8
+
+const noRegister register = 255
+
+// A regMask encodes a set of machine registers.
+// TODO: regMask -> regSet?
+type regMask uint64
+
+func (m regMask) String() string {
+ s := ""
+ for r := register(0); m != 0; r++ {
+ if m>>r&1 == 0 {
+ continue
+ }
+ m &^= regMask(1) << r
+ if s != "" {
+ s += " "
+ }
+ s += fmt.Sprintf("r%d", r)
+ }
+ return s
+}
+
+func (s *regAllocState) RegMaskString(m regMask) string {
+ str := ""
+ for r := register(0); m != 0; r++ {
+ if m>>r&1 == 0 {
+ continue
+ }
+ m &^= regMask(1) << r
+ if str != "" {
+ str += " "
+ }
+ str += s.registers[r].String()
+ }
+ return str
+}
+
+// countRegs returns the number of set bits in the register mask.
+func countRegs(r regMask) int {
+ return bits.OnesCount64(uint64(r))
+}
+
+// pickReg picks an arbitrary register from the register mask.
+func pickReg(r regMask) register {
+ if r == 0 {
+ panic("can't pick a register from an empty set")
+ }
+ // pick the lowest one
+ return register(bits.TrailingZeros64(uint64(r)))
+}
+
+type use struct {
+ dist int32 // distance from start of the block to a use of a value
+ pos src.XPos // source position of the use
+ next *use // linked list of uses of a value in nondecreasing dist order
+}
+
+// A valState records the register allocation state for a (pre-regalloc) value.
+type valState struct {
+ regs regMask // the set of registers holding a Value (usually just one)
+ uses *use // list of uses in this block
+ spill *Value // spilled copy of the Value (if any)
+ restoreMin int32 // minimum of all restores' blocks' sdom.entry
+ restoreMax int32 // maximum of all restores' blocks' sdom.exit
+ needReg bool // cached value of !v.Type.IsMemory() && !v.Type.IsVoid() && !.v.Type.IsFlags()
+ rematerializeable bool // cached value of v.rematerializeable()
+}
+
+type regState struct {
+ v *Value // Original (preregalloc) Value stored in this register.
+ c *Value // A Value equal to v which is currently in a register. Might be v or a copy of it.
+ // If a register is unused, v==c==nil
+}
+
+type regAllocState struct {
+ f *Func
+
+ sdom SparseTree
+ registers []Register
+ numRegs register
+ SPReg register
+ SBReg register
+ GReg register
+ allocatable regMask
+
+ // for each block, its primary predecessor.
+ // A predecessor of b is primary if it is the closest
+ // predecessor that appears before b in the layout order.
+ // We record the index in the Preds list where the primary predecessor sits.
+ primary []int32
+
+ // live values at the end of each block. live[b.ID] is a list of value IDs
+ // which are live at the end of b, together with a count of how many instructions
+ // forward to the next use.
+ live [][]liveInfo
+ // desired register assignments at the end of each block.
+ // Note that this is a static map computed before allocation occurs. Dynamic
+ // register desires (from partially completed allocations) will trump
+ // this information.
+ desired []desiredState
+
+ // current state of each (preregalloc) Value
+ values []valState
+
+ // ID of SP, SB values
+ sp, sb ID
+
+ // For each Value, map from its value ID back to the
+ // preregalloc Value it was derived from.
+ orig []*Value
+
+ // current state of each register
+ regs []regState
+
+ // registers that contain values which can't be kicked out
+ nospill regMask
+
+ // mask of registers currently in use
+ used regMask
+
+ // mask of registers used in the current instruction
+ tmpused regMask
+
+ // current block we're working on
+ curBlock *Block
+
+ // cache of use records
+ freeUseRecords *use
+
+ // endRegs[blockid] is the register state at the end of each block.
+ // encoded as a set of endReg records.
+ endRegs [][]endReg
+
+ // startRegs[blockid] is the register state at the start of merge blocks.
+ // saved state does not include the state of phi ops in the block.
+ startRegs [][]startReg
+
+ // spillLive[blockid] is the set of live spills at the end of each block
+ spillLive [][]ID
+
+ // a set of copies we generated to move things around, and
+ // whether it is used in shuffle. Unused copies will be deleted.
+ copies map[*Value]bool
+
+ loopnest *loopnest
+
+ // choose a good order in which to visit blocks for allocation purposes.
+ visitOrder []*Block
+}
+
+type endReg struct {
+ r register
+ v *Value // pre-regalloc value held in this register (TODO: can we use ID here?)
+ c *Value // cached version of the value
+}
+
+type startReg struct {
+ r register
+ v *Value // pre-regalloc value needed in this register
+ c *Value // cached version of the value
+ pos src.XPos // source position of use of this register
+}
+
+// freeReg frees up register r. Any current user of r is kicked out.
+func (s *regAllocState) freeReg(r register) {
+ v := s.regs[r].v
+ if v == nil {
+ s.f.Fatalf("tried to free an already free register %d\n", r)
+ }
+
+ // Mark r as unused.
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("freeReg %s (dump %s/%s)\n", &s.registers[r], v, s.regs[r].c)
+ }
+ s.regs[r] = regState{}
+ s.values[v.ID].regs &^= regMask(1) << r
+ s.used &^= regMask(1) << r
+}
+
+// freeRegs frees up all registers listed in m.
+func (s *regAllocState) freeRegs(m regMask) {
+ for m&s.used != 0 {
+ s.freeReg(pickReg(m & s.used))
+ }
+}
+
+// setOrig records that c's original value is the same as
+// v's original value.
+func (s *regAllocState) setOrig(c *Value, v *Value) {
+ for int(c.ID) >= len(s.orig) {
+ s.orig = append(s.orig, nil)
+ }
+ if s.orig[c.ID] != nil {
+ s.f.Fatalf("orig value set twice %s %s", c, v)
+ }
+ s.orig[c.ID] = s.orig[v.ID]
+}
+
+// assignReg assigns register r to hold c, a copy of v.
+// r must be unused.
+func (s *regAllocState) assignReg(r register, v *Value, c *Value) {
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("assignReg %s %s/%s\n", &s.registers[r], v, c)
+ }
+ if s.regs[r].v != nil {
+ s.f.Fatalf("tried to assign register %d to %s/%s but it is already used by %s", r, v, c, s.regs[r].v)
+ }
+
+ // Update state.
+ s.regs[r] = regState{v, c}
+ s.values[v.ID].regs |= regMask(1) << r
+ s.used |= regMask(1) << r
+ s.f.setHome(c, &s.registers[r])
+}
+
+// allocReg chooses a register from the set of registers in mask.
+// If there is no unused register, a Value will be kicked out of
+// a register to make room.
+func (s *regAllocState) allocReg(mask regMask, v *Value) register {
+ if v.OnWasmStack {
+ return noRegister
+ }
+
+ mask &= s.allocatable
+ mask &^= s.nospill
+ if mask == 0 {
+ s.f.Fatalf("no register available for %s", v.LongString())
+ }
+
+ // Pick an unused register if one is available.
+ if mask&^s.used != 0 {
+ return pickReg(mask &^ s.used)
+ }
+
+ // Pick a value to spill. Spill the value with the
+ // farthest-in-the-future use.
+ // TODO: Prefer registers with already spilled Values?
+ // TODO: Modify preference using affinity graph.
+ // TODO: if a single value is in multiple registers, spill one of them
+ // before spilling a value in just a single register.
+
+ // Find a register to spill. We spill the register containing the value
+ // whose next use is as far in the future as possible.
+ // https://en.wikipedia.org/wiki/Page_replacement_algorithm#The_theoretically_optimal_page_replacement_algorithm
+ var r register
+ maxuse := int32(-1)
+ for t := register(0); t < s.numRegs; t++ {
+ if mask>>t&1 == 0 {
+ continue
+ }
+ v := s.regs[t].v
+ if n := s.values[v.ID].uses.dist; n > maxuse {
+ // v's next use is farther in the future than any value
+ // we've seen so far. A new best spill candidate.
+ r = t
+ maxuse = n
+ }
+ }
+ if maxuse == -1 {
+ s.f.Fatalf("couldn't find register to spill")
+ }
+
+ if s.f.Config.ctxt.Arch.Arch == sys.ArchWasm {
+ // TODO(neelance): In theory this should never happen, because all wasm registers are equal.
+ // So if there is still a free register, the allocation should have picked that one in the first place instead of
+ // trying to kick some other value out. In practice, this case does happen and it breaks the stack optimization.
+ s.freeReg(r)
+ return r
+ }
+
+ // Try to move it around before kicking out, if there is a free register.
+ // We generate a Copy and record it. It will be deleted if never used.
+ v2 := s.regs[r].v
+ m := s.compatRegs(v2.Type) &^ s.used &^ s.tmpused &^ (regMask(1) << r)
+ if m != 0 && !s.values[v2.ID].rematerializeable && countRegs(s.values[v2.ID].regs) == 1 {
+ r2 := pickReg(m)
+ c := s.curBlock.NewValue1(v2.Pos, OpCopy, v2.Type, s.regs[r].c)
+ s.copies[c] = false
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("copy %s to %s : %s\n", v2, c, &s.registers[r2])
+ }
+ s.setOrig(c, v2)
+ s.assignReg(r2, v2, c)
+ }
+ s.freeReg(r)
+ return r
+}
+
+// makeSpill returns a Value which represents the spilled value of v.
+// b is the block in which the spill is used.
+func (s *regAllocState) makeSpill(v *Value, b *Block) *Value {
+ vi := &s.values[v.ID]
+ if vi.spill != nil {
+ // Final block not known - keep track of subtree where restores reside.
+ vi.restoreMin = min32(vi.restoreMin, s.sdom[b.ID].entry)
+ vi.restoreMax = max32(vi.restoreMax, s.sdom[b.ID].exit)
+ return vi.spill
+ }
+ // Make a spill for v. We don't know where we want
+ // to put it yet, so we leave it blockless for now.
+ spill := s.f.newValueNoBlock(OpStoreReg, v.Type, v.Pos)
+ // We also don't know what the spill's arg will be.
+ // Leave it argless for now.
+ s.setOrig(spill, v)
+ vi.spill = spill
+ vi.restoreMin = s.sdom[b.ID].entry
+ vi.restoreMax = s.sdom[b.ID].exit
+ return spill
+}
+
+// allocValToReg allocates v to a register selected from regMask and
+// returns the register copy of v. Any previous user is kicked out and spilled
+// (if necessary). Load code is added at the current pc. If nospill is set the
+// allocated register is marked nospill so the assignment cannot be
+// undone until the caller allows it by clearing nospill. Returns a
+// *Value which is either v or a copy of v allocated to the chosen register.
+func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos src.XPos) *Value {
+ if s.f.Config.ctxt.Arch.Arch == sys.ArchWasm && v.rematerializeable() {
+ c := v.copyIntoWithXPos(s.curBlock, pos)
+ c.OnWasmStack = true
+ s.setOrig(c, v)
+ return c
+ }
+ if v.OnWasmStack {
+ return v
+ }
+
+ vi := &s.values[v.ID]
+ pos = pos.WithNotStmt()
+ // Check if v is already in a requested register.
+ if mask&vi.regs != 0 {
+ r := pickReg(mask & vi.regs)
+ if s.regs[r].v != v || s.regs[r].c == nil {
+ panic("bad register state")
+ }
+ if nospill {
+ s.nospill |= regMask(1) << r
+ }
+ return s.regs[r].c
+ }
+
+ var r register
+ // If nospill is set, the value is used immediately, so it can live on the WebAssembly stack.
+ onWasmStack := nospill && s.f.Config.ctxt.Arch.Arch == sys.ArchWasm
+ if !onWasmStack {
+ // Allocate a register.
+ r = s.allocReg(mask, v)
+ }
+
+ // Allocate v to the new register.
+ var c *Value
+ if vi.regs != 0 {
+ // Copy from a register that v is already in.
+ r2 := pickReg(vi.regs)
+ if s.regs[r2].v != v {
+ panic("bad register state")
+ }
+ c = s.curBlock.NewValue1(pos, OpCopy, v.Type, s.regs[r2].c)
+ } else if v.rematerializeable() {
+ // Rematerialize instead of loading from the spill location.
+ c = v.copyIntoWithXPos(s.curBlock, pos)
+ } else {
+ // Load v from its spill location.
+ spill := s.makeSpill(v, s.curBlock)
+ if s.f.pass.debug > logSpills {
+ s.f.Warnl(vi.spill.Pos, "load spill for %v from %v", v, spill)
+ }
+ c = s.curBlock.NewValue1(pos, OpLoadReg, v.Type, spill)
+ }
+
+ s.setOrig(c, v)
+
+ if onWasmStack {
+ c.OnWasmStack = true
+ return c
+ }
+
+ s.assignReg(r, v, c)
+ if c.Op == OpLoadReg && s.isGReg(r) {
+ s.f.Fatalf("allocValToReg.OpLoadReg targeting g: " + c.LongString())
+ }
+ if nospill {
+ s.nospill |= regMask(1) << r
+ }
+ return c
+}
+
+// isLeaf reports whether f performs any calls.
+func isLeaf(f *Func) bool {
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if opcodeTable[v.Op].call {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func (s *regAllocState) init(f *Func) {
+ s.f = f
+ s.f.RegAlloc = s.f.Cache.locs[:0]
+ s.registers = f.Config.registers
+ if nr := len(s.registers); nr == 0 || nr > int(noRegister) || nr > int(unsafe.Sizeof(regMask(0))*8) {
+ s.f.Fatalf("bad number of registers: %d", nr)
+ } else {
+ s.numRegs = register(nr)
+ }
+ // Locate SP, SB, and g registers.
+ s.SPReg = noRegister
+ s.SBReg = noRegister
+ s.GReg = noRegister
+ for r := register(0); r < s.numRegs; r++ {
+ switch s.registers[r].String() {
+ case "SP":
+ s.SPReg = r
+ case "SB":
+ s.SBReg = r
+ case "g":
+ s.GReg = r
+ }
+ }
+ // Make sure we found all required registers.
+ switch noRegister {
+ case s.SPReg:
+ s.f.Fatalf("no SP register found")
+ case s.SBReg:
+ s.f.Fatalf("no SB register found")
+ case s.GReg:
+ if f.Config.hasGReg {
+ s.f.Fatalf("no g register found")
+ }
+ }
+
+ // Figure out which registers we're allowed to use.
+ s.allocatable = s.f.Config.gpRegMask | s.f.Config.fpRegMask | s.f.Config.specialRegMask
+ s.allocatable &^= 1 << s.SPReg
+ s.allocatable &^= 1 << s.SBReg
+ if s.f.Config.hasGReg {
+ s.allocatable &^= 1 << s.GReg
+ }
+ if objabi.Framepointer_enabled && s.f.Config.FPReg >= 0 {
+ s.allocatable &^= 1 << uint(s.f.Config.FPReg)
+ }
+ if s.f.Config.LinkReg != -1 {
+ if isLeaf(f) {
+ // Leaf functions don't save/restore the link register.
+ s.allocatable &^= 1 << uint(s.f.Config.LinkReg)
+ }
+ if s.f.Config.arch == "arm" && objabi.GOARM == 5 {
+ // On ARMv5 we insert softfloat calls at each FP instruction.
+ // This clobbers LR almost everywhere. Disable allocating LR
+ // on ARMv5.
+ s.allocatable &^= 1 << uint(s.f.Config.LinkReg)
+ }
+ }
+ if s.f.Config.ctxt.Flag_dynlink {
+ switch s.f.Config.arch {
+ case "amd64":
+ s.allocatable &^= 1 << 15 // R15
+ case "arm":
+ s.allocatable &^= 1 << 9 // R9
+ case "ppc64le": // R2 already reserved.
+ // nothing to do
+ case "arm64":
+ // nothing to do?
+ case "386":
+ // nothing to do.
+ // Note that for Flag_shared (position independent code)
+ // we do need to be careful, but that carefulness is hidden
+ // in the rewrite rules so we always have a free register
+ // available for global load/stores. See gen/386.rules (search for Flag_shared).
+ case "s390x":
+ s.allocatable &^= 1 << 11 // R11
+ default:
+ s.f.fe.Fatalf(src.NoXPos, "arch %s not implemented", s.f.Config.arch)
+ }
+ }
+
+ // Linear scan register allocation can be influenced by the order in which blocks appear.
+ // Decouple the register allocation order from the generated block order.
+ // This also creates an opportunity for experiments to find a better order.
+ s.visitOrder = layoutRegallocOrder(f)
+
+ // Compute block order. This array allows us to distinguish forward edges
+ // from backward edges and compute how far they go.
+ blockOrder := make([]int32, f.NumBlocks())
+ for i, b := range s.visitOrder {
+ blockOrder[b.ID] = int32(i)
+ }
+
+ s.regs = make([]regState, s.numRegs)
+ nv := f.NumValues()
+ if cap(s.f.Cache.regallocValues) >= nv {
+ s.f.Cache.regallocValues = s.f.Cache.regallocValues[:nv]
+ } else {
+ s.f.Cache.regallocValues = make([]valState, nv)
+ }
+ s.values = s.f.Cache.regallocValues
+ s.orig = make([]*Value, nv)
+ s.copies = make(map[*Value]bool)
+ for _, b := range s.visitOrder {
+ for _, v := range b.Values {
+ if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple() {
+ s.values[v.ID].needReg = true
+ s.values[v.ID].rematerializeable = v.rematerializeable()
+ s.orig[v.ID] = v
+ }
+ // Note: needReg is false for values returning Tuple types.
+ // Instead, we mark the corresponding Selects as needReg.
+ }
+ }
+ s.computeLive()
+
+ // Compute primary predecessors.
+ s.primary = make([]int32, f.NumBlocks())
+ for _, b := range s.visitOrder {
+ best := -1
+ for i, e := range b.Preds {
+ p := e.b
+ if blockOrder[p.ID] >= blockOrder[b.ID] {
+ continue // backward edge
+ }
+ if best == -1 || blockOrder[p.ID] > blockOrder[b.Preds[best].b.ID] {
+ best = i
+ }
+ }
+ s.primary[b.ID] = int32(best)
+ }
+
+ s.endRegs = make([][]endReg, f.NumBlocks())
+ s.startRegs = make([][]startReg, f.NumBlocks())
+ s.spillLive = make([][]ID, f.NumBlocks())
+ s.sdom = f.Sdom()
+
+ // wasm: Mark instructions that can be optimized to have their values only on the WebAssembly stack.
+ if f.Config.ctxt.Arch.Arch == sys.ArchWasm {
+ canLiveOnStack := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(canLiveOnStack)
+ for _, b := range f.Blocks {
+ // New block. Clear candidate set.
+ canLiveOnStack.clear()
+ for _, c := range b.ControlValues() {
+ if c.Uses == 1 && !opcodeTable[c.Op].generic {
+ canLiveOnStack.add(c.ID)
+ }
+ }
+ // Walking backwards.
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if canLiveOnStack.contains(v.ID) {
+ v.OnWasmStack = true
+ } else {
+ // Value can not live on stack. Values are not allowed to be reordered, so clear candidate set.
+ canLiveOnStack.clear()
+ }
+ for _, arg := range v.Args {
+ // Value can live on the stack if:
+ // - it is only used once
+ // - it is used in the same basic block
+ // - it is not a "mem" value
+ // - it is a WebAssembly op
+ if arg.Uses == 1 && arg.Block == v.Block && !arg.Type.IsMemory() && !opcodeTable[arg.Op].generic {
+ canLiveOnStack.add(arg.ID)
+ }
+ }
+ }
+ }
+ }
+}
+
+// Adds a use record for id at distance dist from the start of the block.
+// All calls to addUse must happen with nonincreasing dist.
+func (s *regAllocState) addUse(id ID, dist int32, pos src.XPos) {
+ r := s.freeUseRecords
+ if r != nil {
+ s.freeUseRecords = r.next
+ } else {
+ r = &use{}
+ }
+ r.dist = dist
+ r.pos = pos
+ r.next = s.values[id].uses
+ s.values[id].uses = r
+ if r.next != nil && dist > r.next.dist {
+ s.f.Fatalf("uses added in wrong order")
+ }
+}
+
+// advanceUses advances the uses of v's args from the state before v to the state after v.
+// Any values which have no more uses are deallocated from registers.
+func (s *regAllocState) advanceUses(v *Value) {
+ for _, a := range v.Args {
+ if !s.values[a.ID].needReg {
+ continue
+ }
+ ai := &s.values[a.ID]
+ r := ai.uses
+ ai.uses = r.next
+ if r.next == nil {
+ // Value is dead, free all registers that hold it.
+ s.freeRegs(ai.regs)
+ }
+ r.next = s.freeUseRecords
+ s.freeUseRecords = r
+ }
+}
+
+// liveAfterCurrentInstruction reports whether v is live after
+// the current instruction is completed. v must be used by the
+// current instruction.
+func (s *regAllocState) liveAfterCurrentInstruction(v *Value) bool {
+ u := s.values[v.ID].uses
+ d := u.dist
+ for u != nil && u.dist == d {
+ u = u.next
+ }
+ return u != nil && u.dist > d
+}
+
+// Sets the state of the registers to that encoded in regs.
+func (s *regAllocState) setState(regs []endReg) {
+ s.freeRegs(s.used)
+ for _, x := range regs {
+ s.assignReg(x.r, x.v, x.c)
+ }
+}
+
+// compatRegs returns the set of registers which can store a type t.
+func (s *regAllocState) compatRegs(t *types.Type) regMask {
+ var m regMask
+ if t.IsTuple() || t.IsFlags() {
+ return 0
+ }
+ if t.IsFloat() || t == types.TypeInt128 {
+ if t.Etype == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 {
+ m = s.f.Config.fp32RegMask
+ } else if t.Etype == types.TFLOAT64 && s.f.Config.fp64RegMask != 0 {
+ m = s.f.Config.fp64RegMask
+ } else {
+ m = s.f.Config.fpRegMask
+ }
+ } else {
+ m = s.f.Config.gpRegMask
+ }
+ return m & s.allocatable
+}
+
+// regspec returns the regInfo for operation op.
+func (s *regAllocState) regspec(op Op) regInfo {
+ if op == OpConvert {
+ // OpConvert is a generic op, so it doesn't have a
+ // register set in the static table. It can use any
+ // allocatable integer register.
+ m := s.allocatable & s.f.Config.gpRegMask
+ return regInfo{inputs: []inputInfo{{regs: m}}, outputs: []outputInfo{{regs: m}}}
+ }
+ return opcodeTable[op].reg
+}
+
+func (s *regAllocState) isGReg(r register) bool {
+ return s.f.Config.hasGReg && s.GReg == r
+}
+
+func (s *regAllocState) regalloc(f *Func) {
+ regValLiveSet := f.newSparseSet(f.NumValues()) // set of values that may be live in register
+ defer f.retSparseSet(regValLiveSet)
+ var oldSched []*Value
+ var phis []*Value
+ var phiRegs []register
+ var args []*Value
+
+ // Data structure used for computing desired registers.
+ var desired desiredState
+
+ // Desired registers for inputs & outputs for each instruction in the block.
+ type dentry struct {
+ out [4]register // desired output registers
+ in [3][4]register // desired input registers (for inputs 0,1, and 2)
+ }
+ var dinfo []dentry
+
+ if f.Entry != f.Blocks[0] {
+ f.Fatalf("entry block must be first")
+ }
+
+ for _, b := range s.visitOrder {
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("Begin processing block %v\n", b)
+ }
+ s.curBlock = b
+
+ // Initialize regValLiveSet and uses fields for this block.
+ // Walk backwards through the block doing liveness analysis.
+ regValLiveSet.clear()
+ for _, e := range s.live[b.ID] {
+ s.addUse(e.ID, int32(len(b.Values))+e.dist, e.pos) // pseudo-uses from beyond end of block
+ regValLiveSet.add(e.ID)
+ }
+ for _, v := range b.ControlValues() {
+ if s.values[v.ID].needReg {
+ s.addUse(v.ID, int32(len(b.Values)), b.Pos) // pseudo-use by control values
+ regValLiveSet.add(v.ID)
+ }
+ }
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ regValLiveSet.remove(v.ID)
+ if v.Op == OpPhi {
+ // Remove v from the live set, but don't add
+ // any inputs. This is the state the len(b.Preds)>1
+ // case below desires; it wants to process phis specially.
+ continue
+ }
+ if opcodeTable[v.Op].call {
+ // Function call clobbers all the registers but SP and SB.
+ regValLiveSet.clear()
+ if s.sp != 0 && s.values[s.sp].uses != nil {
+ regValLiveSet.add(s.sp)
+ }
+ if s.sb != 0 && s.values[s.sb].uses != nil {
+ regValLiveSet.add(s.sb)
+ }
+ }
+ for _, a := range v.Args {
+ if !s.values[a.ID].needReg {
+ continue
+ }
+ s.addUse(a.ID, int32(i), v.Pos)
+ regValLiveSet.add(a.ID)
+ }
+ }
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("use distances for %s\n", b)
+ for i := range s.values {
+ vi := &s.values[i]
+ u := vi.uses
+ if u == nil {
+ continue
+ }
+ fmt.Printf(" v%d:", i)
+ for u != nil {
+ fmt.Printf(" %d", u.dist)
+ u = u.next
+ }
+ fmt.Println()
+ }
+ }
+
+ // Make a copy of the block schedule so we can generate a new one in place.
+ // We make a separate copy for phis and regular values.
+ nphi := 0
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ break
+ }
+ nphi++
+ }
+ phis = append(phis[:0], b.Values[:nphi]...)
+ oldSched = append(oldSched[:0], b.Values[nphi:]...)
+ b.Values = b.Values[:0]
+
+ // Initialize start state of block.
+ if b == f.Entry {
+ // Regalloc state is empty to start.
+ if nphi > 0 {
+ f.Fatalf("phis in entry block")
+ }
+ } else if len(b.Preds) == 1 {
+ // Start regalloc state with the end state of the previous block.
+ s.setState(s.endRegs[b.Preds[0].b.ID])
+ if nphi > 0 {
+ f.Fatalf("phis in single-predecessor block")
+ }
+ // Drop any values which are no longer live.
+ // This may happen because at the end of p, a value may be
+ // live but only used by some other successor of p.
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v != nil && !regValLiveSet.contains(v.ID) {
+ s.freeReg(r)
+ }
+ }
+ } else {
+ // This is the complicated case. We have more than one predecessor,
+ // which means we may have Phi ops.
+
+ // Start with the final register state of the primary predecessor
+ idx := s.primary[b.ID]
+ if idx < 0 {
+ f.Fatalf("block with no primary predecessor %s", b)
+ }
+ p := b.Preds[idx].b
+ s.setState(s.endRegs[p.ID])
+
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("starting merge block %s with end state of %s:\n", b, p)
+ for _, x := range s.endRegs[p.ID] {
+ fmt.Printf(" %s: orig:%s cache:%s\n", &s.registers[x.r], x.v, x.c)
+ }
+ }
+
+ // Decide on registers for phi ops. Use the registers determined
+ // by the primary predecessor if we can.
+ // TODO: pick best of (already processed) predecessors?
+ // Majority vote? Deepest nesting level?
+ phiRegs = phiRegs[:0]
+ var phiUsed regMask
+
+ for _, v := range phis {
+ if !s.values[v.ID].needReg {
+ phiRegs = append(phiRegs, noRegister)
+ continue
+ }
+ a := v.Args[idx]
+ // Some instructions target not-allocatable registers.
+ // They're not suitable for further (phi-function) allocation.
+ m := s.values[a.ID].regs &^ phiUsed & s.allocatable
+ if m != 0 {
+ r := pickReg(m)
+ phiUsed |= regMask(1) << r
+ phiRegs = append(phiRegs, r)
+ } else {
+ phiRegs = append(phiRegs, noRegister)
+ }
+ }
+
+ // Second pass - deallocate all in-register phi inputs.
+ for i, v := range phis {
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ a := v.Args[idx]
+ r := phiRegs[i]
+ if r == noRegister {
+ continue
+ }
+ if regValLiveSet.contains(a.ID) {
+ // Input value is still live (it is used by something other than Phi).
+ // Try to move it around before kicking out, if there is a free register.
+ // We generate a Copy in the predecessor block and record it. It will be
+ // deleted later if never used.
+ //
+ // Pick a free register. At this point some registers used in the predecessor
+ // block may have been deallocated. Those are the ones used for Phis. Exclude
+ // them (and they are not going to be helpful anyway).
+ m := s.compatRegs(a.Type) &^ s.used &^ phiUsed
+ if m != 0 && !s.values[a.ID].rematerializeable && countRegs(s.values[a.ID].regs) == 1 {
+ r2 := pickReg(m)
+ c := p.NewValue1(a.Pos, OpCopy, a.Type, s.regs[r].c)
+ s.copies[c] = false
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("copy %s to %s : %s\n", a, c, &s.registers[r2])
+ }
+ s.setOrig(c, a)
+ s.assignReg(r2, a, c)
+ s.endRegs[p.ID] = append(s.endRegs[p.ID], endReg{r2, a, c})
+ }
+ }
+ s.freeReg(r)
+ }
+
+ // Copy phi ops into new schedule.
+ b.Values = append(b.Values, phis...)
+
+ // Third pass - pick registers for phis whose input
+ // was not in a register in the primary predecessor.
+ for i, v := range phis {
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ if phiRegs[i] != noRegister {
+ continue
+ }
+ m := s.compatRegs(v.Type) &^ phiUsed &^ s.used
+ // If one of the other inputs of v is in a register, and the register is available,
+ // select this register, which can save some unnecessary copies.
+ for i, pe := range b.Preds {
+ if int32(i) == idx {
+ continue
+ }
+ ri := noRegister
+ for _, er := range s.endRegs[pe.b.ID] {
+ if er.v == s.orig[v.Args[i].ID] {
+ ri = er.r
+ break
+ }
+ }
+ if ri != noRegister && m>>ri&1 != 0 {
+ m = regMask(1) << ri
+ break
+ }
+ }
+ if m != 0 {
+ r := pickReg(m)
+ phiRegs[i] = r
+ phiUsed |= regMask(1) << r
+ }
+ }
+
+ // Set registers for phis. Add phi spill code.
+ for i, v := range phis {
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ r := phiRegs[i]
+ if r == noRegister {
+ // stack-based phi
+ // Spills will be inserted in all the predecessors below.
+ s.values[v.ID].spill = v // v starts life spilled
+ continue
+ }
+ // register-based phi
+ s.assignReg(r, v, v)
+ }
+
+ // Deallocate any values which are no longer live. Phis are excluded.
+ for r := register(0); r < s.numRegs; r++ {
+ if phiUsed>>r&1 != 0 {
+ continue
+ }
+ v := s.regs[r].v
+ if v != nil && !regValLiveSet.contains(v.ID) {
+ s.freeReg(r)
+ }
+ }
+
+ // Save the starting state for use by merge edges.
+ // We append to a stack allocated variable that we'll
+ // later copy into s.startRegs in one fell swoop, to save
+ // on allocations.
+ regList := make([]startReg, 0, 32)
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v == nil {
+ continue
+ }
+ if phiUsed>>r&1 != 0 {
+ // Skip registers that phis used, we'll handle those
+ // specially during merge edge processing.
+ continue
+ }
+ regList = append(regList, startReg{r, v, s.regs[r].c, s.values[v.ID].uses.pos})
+ }
+ s.startRegs[b.ID] = make([]startReg, len(regList))
+ copy(s.startRegs[b.ID], regList)
+
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("after phis\n")
+ for _, x := range s.startRegs[b.ID] {
+ fmt.Printf(" %s: v%d\n", &s.registers[x.r], x.v.ID)
+ }
+ }
+ }
+
+ // Allocate space to record the desired registers for each value.
+ if l := len(oldSched); cap(dinfo) < l {
+ dinfo = make([]dentry, l)
+ } else {
+ dinfo = dinfo[:l]
+ for i := range dinfo {
+ dinfo[i] = dentry{}
+ }
+ }
+
+ // Load static desired register info at the end of the block.
+ desired.copy(&s.desired[b.ID])
+
+ // Check actual assigned registers at the start of the next block(s).
+ // Dynamically assigned registers will trump the static
+ // desired registers computed during liveness analysis.
+ // Note that we do this phase after startRegs is set above, so that
+ // we get the right behavior for a block which branches to itself.
+ for _, e := range b.Succs {
+ succ := e.b
+ // TODO: prioritize likely successor?
+ for _, x := range s.startRegs[succ.ID] {
+ desired.add(x.v.ID, x.r)
+ }
+ // Process phi ops in succ.
+ pidx := e.i
+ for _, v := range succ.Values {
+ if v.Op != OpPhi {
+ break
+ }
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ rp, ok := s.f.getHome(v.ID).(*Register)
+ if !ok {
+ // If v is not assigned a register, pick a register assigned to one of v's inputs.
+ // Hopefully v will get assigned that register later.
+ // If the inputs have allocated register information, add it to desired,
+ // which may reduce spill or copy operations when the register is available.
+ for _, a := range v.Args {
+ rp, ok = s.f.getHome(a.ID).(*Register)
+ if ok {
+ break
+ }
+ }
+ if !ok {
+ continue
+ }
+ }
+ desired.add(v.Args[pidx].ID, register(rp.num))
+ }
+ }
+ // Walk values backwards computing desired register info.
+ // See computeLive for more comments.
+ for i := len(oldSched) - 1; i >= 0; i-- {
+ v := oldSched[i]
+ prefs := desired.remove(v.ID)
+ regspec := s.regspec(v.Op)
+ desired.clobber(regspec.clobbers)
+ for _, j := range regspec.inputs {
+ if countRegs(j.regs) != 1 {
+ continue
+ }
+ desired.clobber(j.regs)
+ desired.add(v.Args[j.idx].ID, pickReg(j.regs))
+ }
+ if opcodeTable[v.Op].resultInArg0 {
+ if opcodeTable[v.Op].commutative {
+ desired.addList(v.Args[1].ID, prefs)
+ }
+ desired.addList(v.Args[0].ID, prefs)
+ }
+ // Save desired registers for this value.
+ dinfo[i].out = prefs
+ for j, a := range v.Args {
+ if j >= len(dinfo[i].in) {
+ break
+ }
+ dinfo[i].in[j] = desired.get(a.ID)
+ }
+ }
+
+ // Process all the non-phi values.
+ for idx, v := range oldSched {
+ if s.f.pass.debug > regDebug {
+ fmt.Printf(" processing %s\n", v.LongString())
+ }
+ regspec := s.regspec(v.Op)
+ if v.Op == OpPhi {
+ f.Fatalf("phi %s not at start of block", v)
+ }
+ if v.Op == OpSP {
+ s.assignReg(s.SPReg, v, v)
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ s.sp = v.ID
+ continue
+ }
+ if v.Op == OpSB {
+ s.assignReg(s.SBReg, v, v)
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ s.sb = v.ID
+ continue
+ }
+ if v.Op == OpSelect0 || v.Op == OpSelect1 {
+ if s.values[v.ID].needReg {
+ var i = 0
+ if v.Op == OpSelect1 {
+ i = 1
+ }
+ s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocPair)[i].(*Register).num), v, v)
+ }
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ goto issueSpill
+ }
+ if v.Op == OpGetG && s.f.Config.hasGReg {
+ // use hardware g register
+ if s.regs[s.GReg].v != nil {
+ s.freeReg(s.GReg) // kick out the old value
+ }
+ s.assignReg(s.GReg, v, v)
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ goto issueSpill
+ }
+ if v.Op == OpArg {
+ // Args are "pre-spilled" values. We don't allocate
+ // any register here. We just set up the spill pointer to
+ // point at itself and any later user will restore it to use it.
+ s.values[v.ID].spill = v
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ continue
+ }
+ if v.Op == OpKeepAlive {
+ // Make sure the argument to v is still live here.
+ s.advanceUses(v)
+ a := v.Args[0]
+ vi := &s.values[a.ID]
+ if vi.regs == 0 && !vi.rematerializeable {
+ // Use the spill location.
+ // This forces later liveness analysis to make the
+ // value live at this point.
+ v.SetArg(0, s.makeSpill(a, b))
+ } else if _, ok := a.Aux.(GCNode); ok && vi.rematerializeable {
+ // Rematerializeable value with a gc.Node. This is the address of
+ // a stack object (e.g. an LEAQ). Keep the object live.
+ // Change it to VarLive, which is what plive expects for locals.
+ v.Op = OpVarLive
+ v.SetArgs1(v.Args[1])
+ v.Aux = a.Aux
+ } else {
+ // In-register and rematerializeable values are already live.
+ // These are typically rematerializeable constants like nil,
+ // or values of a variable that were modified since the last call.
+ v.Op = OpCopy
+ v.SetArgs1(v.Args[1])
+ }
+ b.Values = append(b.Values, v)
+ continue
+ }
+ if len(regspec.inputs) == 0 && len(regspec.outputs) == 0 {
+ // No register allocation required (or none specified yet)
+ s.freeRegs(regspec.clobbers)
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ continue
+ }
+
+ if s.values[v.ID].rematerializeable {
+ // Value is rematerializeable, don't issue it here.
+ // It will get issued just before each use (see
+ // allocValueToReg).
+ for _, a := range v.Args {
+ a.Uses--
+ }
+ s.advanceUses(v)
+ continue
+ }
+
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("value %s\n", v.LongString())
+ fmt.Printf(" out:")
+ for _, r := range dinfo[idx].out {
+ if r != noRegister {
+ fmt.Printf(" %s", &s.registers[r])
+ }
+ }
+ fmt.Println()
+ for i := 0; i < len(v.Args) && i < 3; i++ {
+ fmt.Printf(" in%d:", i)
+ for _, r := range dinfo[idx].in[i] {
+ if r != noRegister {
+ fmt.Printf(" %s", &s.registers[r])
+ }
+ }
+ fmt.Println()
+ }
+ }
+
+ // Move arguments to registers. Process in an ordering defined
+ // by the register specification (most constrained first).
+ args = append(args[:0], v.Args...)
+ for _, i := range regspec.inputs {
+ mask := i.regs
+ if mask&s.values[args[i.idx].ID].regs == 0 {
+ // Need a new register for the input.
+ mask &= s.allocatable
+ mask &^= s.nospill
+ // Used desired register if available.
+ if i.idx < 3 {
+ for _, r := range dinfo[idx].in[i.idx] {
+ if r != noRegister && (mask&^s.used)>>r&1 != 0 {
+ // Desired register is allowed and unused.
+ mask = regMask(1) << r
+ break
+ }
+ }
+ }
+ // Avoid registers we're saving for other values.
+ if mask&^desired.avoid != 0 {
+ mask &^= desired.avoid
+ }
+ }
+ args[i.idx] = s.allocValToReg(args[i.idx], mask, true, v.Pos)
+ }
+
+ // If the output clobbers the input register, make sure we have
+ // at least two copies of the input register so we don't
+ // have to reload the value from the spill location.
+ if opcodeTable[v.Op].resultInArg0 {
+ var m regMask
+ if !s.liveAfterCurrentInstruction(v.Args[0]) {
+ // arg0 is dead. We can clobber its register.
+ goto ok
+ }
+ if opcodeTable[v.Op].commutative && !s.liveAfterCurrentInstruction(v.Args[1]) {
+ args[0], args[1] = args[1], args[0]
+ goto ok
+ }
+ if s.values[v.Args[0].ID].rematerializeable {
+ // We can rematerialize the input, don't worry about clobbering it.
+ goto ok
+ }
+ if opcodeTable[v.Op].commutative && s.values[v.Args[1].ID].rematerializeable {
+ args[0], args[1] = args[1], args[0]
+ goto ok
+ }
+ if countRegs(s.values[v.Args[0].ID].regs) >= 2 {
+ // we have at least 2 copies of arg0. We can afford to clobber one.
+ goto ok
+ }
+ if opcodeTable[v.Op].commutative && countRegs(s.values[v.Args[1].ID].regs) >= 2 {
+ args[0], args[1] = args[1], args[0]
+ goto ok
+ }
+
+ // We can't overwrite arg0 (or arg1, if commutative). So we
+ // need to make a copy of an input so we have a register we can modify.
+
+ // Possible new registers to copy into.
+ m = s.compatRegs(v.Args[0].Type) &^ s.used
+ if m == 0 {
+ // No free registers. In this case we'll just clobber
+ // an input and future uses of that input must use a restore.
+ // TODO(khr): We should really do this like allocReg does it,
+ // spilling the value with the most distant next use.
+ goto ok
+ }
+
+ // Try to move an input to the desired output.
+ for _, r := range dinfo[idx].out {
+ if r != noRegister && m>>r&1 != 0 {
+ m = regMask(1) << r
+ args[0] = s.allocValToReg(v.Args[0], m, true, v.Pos)
+ // Note: we update args[0] so the instruction will
+ // use the register copy we just made.
+ goto ok
+ }
+ }
+ // Try to copy input to its desired location & use its old
+ // location as the result register.
+ for _, r := range dinfo[idx].in[0] {
+ if r != noRegister && m>>r&1 != 0 {
+ m = regMask(1) << r
+ c := s.allocValToReg(v.Args[0], m, true, v.Pos)
+ s.copies[c] = false
+ // Note: no update to args[0] so the instruction will
+ // use the original copy.
+ goto ok
+ }
+ }
+ if opcodeTable[v.Op].commutative {
+ for _, r := range dinfo[idx].in[1] {
+ if r != noRegister && m>>r&1 != 0 {
+ m = regMask(1) << r
+ c := s.allocValToReg(v.Args[1], m, true, v.Pos)
+ s.copies[c] = false
+ args[0], args[1] = args[1], args[0]
+ goto ok
+ }
+ }
+ }
+ // Avoid future fixed uses if we can.
+ if m&^desired.avoid != 0 {
+ m &^= desired.avoid
+ }
+ // Save input 0 to a new register so we can clobber it.
+ c := s.allocValToReg(v.Args[0], m, true, v.Pos)
+ s.copies[c] = false
+ }
+
+ ok:
+ // Now that all args are in regs, we're ready to issue the value itself.
+ // Before we pick a register for the output value, allow input registers
+ // to be deallocated. We do this here so that the output can use the
+ // same register as a dying input.
+ if !opcodeTable[v.Op].resultNotInArgs {
+ s.tmpused = s.nospill
+ s.nospill = 0
+ s.advanceUses(v) // frees any registers holding args that are no longer live
+ }
+
+ // Dump any registers which will be clobbered
+ s.freeRegs(regspec.clobbers)
+ s.tmpused |= regspec.clobbers
+
+ // Pick registers for outputs.
+ {
+ outRegs := [2]register{noRegister, noRegister}
+ var used regMask
+ for _, out := range regspec.outputs {
+ mask := out.regs & s.allocatable &^ used
+ if mask == 0 {
+ continue
+ }
+ if opcodeTable[v.Op].resultInArg0 && out.idx == 0 {
+ if !opcodeTable[v.Op].commutative {
+ // Output must use the same register as input 0.
+ r := register(s.f.getHome(args[0].ID).(*Register).num)
+ mask = regMask(1) << r
+ } else {
+ // Output must use the same register as input 0 or 1.
+ r0 := register(s.f.getHome(args[0].ID).(*Register).num)
+ r1 := register(s.f.getHome(args[1].ID).(*Register).num)
+ // Check r0 and r1 for desired output register.
+ found := false
+ for _, r := range dinfo[idx].out {
+ if (r == r0 || r == r1) && (mask&^s.used)>>r&1 != 0 {
+ mask = regMask(1) << r
+ found = true
+ if r == r1 {
+ args[0], args[1] = args[1], args[0]
+ }
+ break
+ }
+ }
+ if !found {
+ // Neither are desired, pick r0.
+ mask = regMask(1) << r0
+ }
+ }
+ }
+ for _, r := range dinfo[idx].out {
+ if r != noRegister && (mask&^s.used)>>r&1 != 0 {
+ // Desired register is allowed and unused.
+ mask = regMask(1) << r
+ break
+ }
+ }
+ // Avoid registers we're saving for other values.
+ if mask&^desired.avoid&^s.nospill != 0 {
+ mask &^= desired.avoid
+ }
+ r := s.allocReg(mask, v)
+ outRegs[out.idx] = r
+ used |= regMask(1) << r
+ s.tmpused |= regMask(1) << r
+ }
+ // Record register choices
+ if v.Type.IsTuple() {
+ var outLocs LocPair
+ if r := outRegs[0]; r != noRegister {
+ outLocs[0] = &s.registers[r]
+ }
+ if r := outRegs[1]; r != noRegister {
+ outLocs[1] = &s.registers[r]
+ }
+ s.f.setHome(v, outLocs)
+ // Note that subsequent SelectX instructions will do the assignReg calls.
+ } else {
+ if r := outRegs[0]; r != noRegister {
+ s.assignReg(r, v, v)
+ }
+ }
+ }
+
+ // deallocate dead args, if we have not done so
+ if opcodeTable[v.Op].resultNotInArgs {
+ s.nospill = 0
+ s.advanceUses(v) // frees any registers holding args that are no longer live
+ }
+ s.tmpused = 0
+
+ // Issue the Value itself.
+ for i, a := range args {
+ v.SetArg(i, a) // use register version of arguments
+ }
+ b.Values = append(b.Values, v)
+
+ issueSpill:
+ }
+
+ // Copy the control values - we need this so we can reduce the
+ // uses property of these values later.
+ controls := append(make([]*Value, 0, 2), b.ControlValues()...)
+
+ // Load control values into registers.
+ for i, v := range b.ControlValues() {
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ if s.f.pass.debug > regDebug {
+ fmt.Printf(" processing control %s\n", v.LongString())
+ }
+ // We assume that a control input can be passed in any
+ // type-compatible register. If this turns out not to be true,
+ // we'll need to introduce a regspec for a block's control value.
+ b.ReplaceControl(i, s.allocValToReg(v, s.compatRegs(v.Type), false, b.Pos))
+ }
+
+ // Reduce the uses of the control values once registers have been loaded.
+ // This loop is equivalent to the advanceUses method.
+ for _, v := range controls {
+ vi := &s.values[v.ID]
+ if !vi.needReg {
+ continue
+ }
+ // Remove this use from the uses list.
+ u := vi.uses
+ vi.uses = u.next
+ if u.next == nil {
+ s.freeRegs(vi.regs) // value is dead
+ }
+ u.next = s.freeUseRecords
+ s.freeUseRecords = u
+ }
+
+ // If we are approaching a merge point and we are the primary
+ // predecessor of it, find live values that we use soon after
+ // the merge point and promote them to registers now.
+ if len(b.Succs) == 1 {
+ if s.f.Config.hasGReg && s.regs[s.GReg].v != nil {
+ s.freeReg(s.GReg) // Spill value in G register before any merge.
+ }
+ // For this to be worthwhile, the loop must have no calls in it.
+ top := b.Succs[0].b
+ loop := s.loopnest.b2l[top.ID]
+ if loop == nil || loop.header != top || loop.containsUnavoidableCall {
+ goto badloop
+ }
+
+ // TODO: sort by distance, pick the closest ones?
+ for _, live := range s.live[b.ID] {
+ if live.dist >= unlikelyDistance {
+ // Don't preload anything live after the loop.
+ continue
+ }
+ vid := live.ID
+ vi := &s.values[vid]
+ if vi.regs != 0 {
+ continue
+ }
+ if vi.rematerializeable {
+ continue
+ }
+ v := s.orig[vid]
+ m := s.compatRegs(v.Type) &^ s.used
+ // Used desired register if available.
+ outerloop:
+ for _, e := range desired.entries {
+ if e.ID != v.ID {
+ continue
+ }
+ for _, r := range e.regs {
+ if r != noRegister && m>>r&1 != 0 {
+ m = regMask(1) << r
+ break outerloop
+ }
+ }
+ }
+ if m&^desired.avoid != 0 {
+ m &^= desired.avoid
+ }
+ if m != 0 {
+ s.allocValToReg(v, m, false, b.Pos)
+ }
+ }
+ }
+ badloop:
+ ;
+
+ // Save end-of-block register state.
+ // First count how many, this cuts allocations in half.
+ k := 0
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v == nil {
+ continue
+ }
+ k++
+ }
+ regList := make([]endReg, 0, k)
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v == nil {
+ continue
+ }
+ regList = append(regList, endReg{r, v, s.regs[r].c})
+ }
+ s.endRegs[b.ID] = regList
+
+ if checkEnabled {
+ regValLiveSet.clear()
+ for _, x := range s.live[b.ID] {
+ regValLiveSet.add(x.ID)
+ }
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v == nil {
+ continue
+ }
+ if !regValLiveSet.contains(v.ID) {
+ s.f.Fatalf("val %s is in reg but not live at end of %s", v, b)
+ }
+ }
+ }
+
+ // If a value is live at the end of the block and
+ // isn't in a register, generate a use for the spill location.
+ // We need to remember this information so that
+ // the liveness analysis in stackalloc is correct.
+ for _, e := range s.live[b.ID] {
+ vi := &s.values[e.ID]
+ if vi.regs != 0 {
+ // in a register, we'll use that source for the merge.
+ continue
+ }
+ if vi.rematerializeable {
+ // we'll rematerialize during the merge.
+ continue
+ }
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("live-at-end spill for %s at %s\n", s.orig[e.ID], b)
+ }
+ spill := s.makeSpill(s.orig[e.ID], b)
+ s.spillLive[b.ID] = append(s.spillLive[b.ID], spill.ID)
+ }
+
+ // Clear any final uses.
+ // All that is left should be the pseudo-uses added for values which
+ // are live at the end of b.
+ for _, e := range s.live[b.ID] {
+ u := s.values[e.ID].uses
+ if u == nil {
+ f.Fatalf("live at end, no uses v%d", e.ID)
+ }
+ if u.next != nil {
+ f.Fatalf("live at end, too many uses v%d", e.ID)
+ }
+ s.values[e.ID].uses = nil
+ u.next = s.freeUseRecords
+ s.freeUseRecords = u
+ }
+ }
+
+ // Decide where the spills we generated will go.
+ s.placeSpills()
+
+ // Anything that didn't get a register gets a stack location here.
+ // (StoreReg, stack-based phis, inputs, ...)
+ stacklive := stackalloc(s.f, s.spillLive)
+
+ // Fix up all merge edges.
+ s.shuffle(stacklive)
+
+ // Erase any copies we never used.
+ // Also, an unused copy might be the only use of another copy,
+ // so continue erasing until we reach a fixed point.
+ for {
+ progress := false
+ for c, used := range s.copies {
+ if !used && c.Uses == 0 {
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("delete copied value %s\n", c.LongString())
+ }
+ c.RemoveArg(0)
+ f.freeValue(c)
+ delete(s.copies, c)
+ progress = true
+ }
+ }
+ if !progress {
+ break
+ }
+ }
+
+ for _, b := range s.visitOrder {
+ i := 0
+ for _, v := range b.Values {
+ if v.Op == OpInvalid {
+ continue
+ }
+ b.Values[i] = v
+ i++
+ }
+ b.Values = b.Values[:i]
+ }
+}
+
+func (s *regAllocState) placeSpills() {
+ f := s.f
+
+ // Precompute some useful info.
+ phiRegs := make([]regMask, f.NumBlocks())
+ for _, b := range s.visitOrder {
+ var m regMask
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ break
+ }
+ if r, ok := f.getHome(v.ID).(*Register); ok {
+ m |= regMask(1) << uint(r.num)
+ }
+ }
+ phiRegs[b.ID] = m
+ }
+
+ // Start maps block IDs to the list of spills
+ // that go at the start of the block (but after any phis).
+ start := map[ID][]*Value{}
+ // After maps value IDs to the list of spills
+ // that go immediately after that value ID.
+ after := map[ID][]*Value{}
+
+ for i := range s.values {
+ vi := s.values[i]
+ spill := vi.spill
+ if spill == nil {
+ continue
+ }
+ if spill.Block != nil {
+ // Some spills are already fully set up,
+ // like OpArgs and stack-based phis.
+ continue
+ }
+ v := s.orig[i]
+
+ // Walk down the dominator tree looking for a good place to
+ // put the spill of v. At the start "best" is the best place
+ // we have found so far.
+ // TODO: find a way to make this O(1) without arbitrary cutoffs.
+ best := v.Block
+ bestArg := v
+ var bestDepth int16
+ if l := s.loopnest.b2l[best.ID]; l != nil {
+ bestDepth = l.depth
+ }
+ b := best
+ const maxSpillSearch = 100
+ for i := 0; i < maxSpillSearch; i++ {
+ // Find the child of b in the dominator tree which
+ // dominates all restores.
+ p := b
+ b = nil
+ for c := s.sdom.Child(p); c != nil && i < maxSpillSearch; c, i = s.sdom.Sibling(c), i+1 {
+ if s.sdom[c.ID].entry <= vi.restoreMin && s.sdom[c.ID].exit >= vi.restoreMax {
+ // c also dominates all restores. Walk down into c.
+ b = c
+ break
+ }
+ }
+ if b == nil {
+ // Ran out of blocks which dominate all restores.
+ break
+ }
+
+ var depth int16
+ if l := s.loopnest.b2l[b.ID]; l != nil {
+ depth = l.depth
+ }
+ if depth > bestDepth {
+ // Don't push the spill into a deeper loop.
+ continue
+ }
+
+ // If v is in a register at the start of b, we can
+ // place the spill here (after the phis).
+ if len(b.Preds) == 1 {
+ for _, e := range s.endRegs[b.Preds[0].b.ID] {
+ if e.v == v {
+ // Found a better spot for the spill.
+ best = b
+ bestArg = e.c
+ bestDepth = depth
+ break
+ }
+ }
+ } else {
+ for _, e := range s.startRegs[b.ID] {
+ if e.v == v {
+ // Found a better spot for the spill.
+ best = b
+ bestArg = e.c
+ bestDepth = depth
+ break
+ }
+ }
+ }
+ }
+
+ // Put the spill in the best block we found.
+ spill.Block = best
+ spill.AddArg(bestArg)
+ if best == v.Block && v.Op != OpPhi {
+ // Place immediately after v.
+ after[v.ID] = append(after[v.ID], spill)
+ } else {
+ // Place at the start of best block.
+ start[best.ID] = append(start[best.ID], spill)
+ }
+ }
+
+ // Insert spill instructions into the block schedules.
+ var oldSched []*Value
+ for _, b := range s.visitOrder {
+ nphi := 0
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ break
+ }
+ nphi++
+ }
+ oldSched = append(oldSched[:0], b.Values[nphi:]...)
+ b.Values = b.Values[:nphi]
+ b.Values = append(b.Values, start[b.ID]...)
+ for _, v := range oldSched {
+ b.Values = append(b.Values, v)
+ b.Values = append(b.Values, after[v.ID]...)
+ }
+ }
+}
+
+// shuffle fixes up all the merge edges (those going into blocks of indegree > 1).
+func (s *regAllocState) shuffle(stacklive [][]ID) {
+ var e edgeState
+ e.s = s
+ e.cache = map[ID][]*Value{}
+ e.contents = map[Location]contentRecord{}
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("shuffle %s\n", s.f.Name)
+ fmt.Println(s.f.String())
+ }
+
+ for _, b := range s.visitOrder {
+ if len(b.Preds) <= 1 {
+ continue
+ }
+ e.b = b
+ for i, edge := range b.Preds {
+ p := edge.b
+ e.p = p
+ e.setup(i, s.endRegs[p.ID], s.startRegs[b.ID], stacklive[p.ID])
+ e.process()
+ }
+ }
+
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("post shuffle %s\n", s.f.Name)
+ fmt.Println(s.f.String())
+ }
+}
+
+type edgeState struct {
+ s *regAllocState
+ p, b *Block // edge goes from p->b.
+
+ // for each pre-regalloc value, a list of equivalent cached values
+ cache map[ID][]*Value
+ cachedVals []ID // (superset of) keys of the above map, for deterministic iteration
+
+ // map from location to the value it contains
+ contents map[Location]contentRecord
+
+ // desired destination locations
+ destinations []dstRecord
+ extra []dstRecord
+
+ usedRegs regMask // registers currently holding something
+ uniqueRegs regMask // registers holding the only copy of a value
+ finalRegs regMask // registers holding final target
+ rematerializeableRegs regMask // registers that hold rematerializeable values
+}
+
+type contentRecord struct {
+ vid ID // pre-regalloc value
+ c *Value // cached value
+ final bool // this is a satisfied destination
+ pos src.XPos // source position of use of the value
+}
+
+type dstRecord struct {
+ loc Location // register or stack slot
+ vid ID // pre-regalloc value it should contain
+ splice **Value // place to store reference to the generating instruction
+ pos src.XPos // source position of use of this location
+}
+
+// setup initializes the edge state for shuffling.
+func (e *edgeState) setup(idx int, srcReg []endReg, dstReg []startReg, stacklive []ID) {
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("edge %s->%s\n", e.p, e.b)
+ }
+
+ // Clear state.
+ for _, vid := range e.cachedVals {
+ delete(e.cache, vid)
+ }
+ e.cachedVals = e.cachedVals[:0]
+ for k := range e.contents {
+ delete(e.contents, k)
+ }
+ e.usedRegs = 0
+ e.uniqueRegs = 0
+ e.finalRegs = 0
+ e.rematerializeableRegs = 0
+
+ // Live registers can be sources.
+ for _, x := range srcReg {
+ e.set(&e.s.registers[x.r], x.v.ID, x.c, false, src.NoXPos) // don't care the position of the source
+ }
+ // So can all of the spill locations.
+ for _, spillID := range stacklive {
+ v := e.s.orig[spillID]
+ spill := e.s.values[v.ID].spill
+ if !e.s.sdom.IsAncestorEq(spill.Block, e.p) {
+ // Spills were placed that only dominate the uses found
+ // during the first regalloc pass. The edge fixup code
+ // can't use a spill location if the spill doesn't dominate
+ // the edge.
+ // We are guaranteed that if the spill doesn't dominate this edge,
+ // then the value is available in a register (because we called
+ // makeSpill for every value not in a register at the start
+ // of an edge).
+ continue
+ }
+ e.set(e.s.f.getHome(spillID), v.ID, spill, false, src.NoXPos) // don't care the position of the source
+ }
+
+ // Figure out all the destinations we need.
+ dsts := e.destinations[:0]
+ for _, x := range dstReg {
+ dsts = append(dsts, dstRecord{&e.s.registers[x.r], x.v.ID, nil, x.pos})
+ }
+ // Phis need their args to end up in a specific location.
+ for _, v := range e.b.Values {
+ if v.Op != OpPhi {
+ break
+ }
+ loc := e.s.f.getHome(v.ID)
+ if loc == nil {
+ continue
+ }
+ dsts = append(dsts, dstRecord{loc, v.Args[idx].ID, &v.Args[idx], v.Pos})
+ }
+ e.destinations = dsts
+
+ if e.s.f.pass.debug > regDebug {
+ for _, vid := range e.cachedVals {
+ a := e.cache[vid]
+ for _, c := range a {
+ fmt.Printf("src %s: v%d cache=%s\n", e.s.f.getHome(c.ID), vid, c)
+ }
+ }
+ for _, d := range e.destinations {
+ fmt.Printf("dst %s: v%d\n", d.loc, d.vid)
+ }
+ }
+}
+
+// process generates code to move all the values to the right destination locations.
+func (e *edgeState) process() {
+ dsts := e.destinations
+
+ // Process the destinations until they are all satisfied.
+ for len(dsts) > 0 {
+ i := 0
+ for _, d := range dsts {
+ if !e.processDest(d.loc, d.vid, d.splice, d.pos) {
+ // Failed - save for next iteration.
+ dsts[i] = d
+ i++
+ }
+ }
+ if i < len(dsts) {
+ // Made some progress. Go around again.
+ dsts = dsts[:i]
+
+ // Append any extras destinations we generated.
+ dsts = append(dsts, e.extra...)
+ e.extra = e.extra[:0]
+ continue
+ }
+
+ // We made no progress. That means that any
+ // remaining unsatisfied moves are in simple cycles.
+ // For example, A -> B -> C -> D -> A.
+ // A ----> B
+ // ^ |
+ // | |
+ // | v
+ // D <---- C
+
+ // To break the cycle, we pick an unused register, say R,
+ // and put a copy of B there.
+ // A ----> B
+ // ^ |
+ // | |
+ // | v
+ // D <---- C <---- R=copyofB
+ // When we resume the outer loop, the A->B move can now proceed,
+ // and eventually the whole cycle completes.
+
+ // Copy any cycle location to a temp register. This duplicates
+ // one of the cycle entries, allowing the just duplicated value
+ // to be overwritten and the cycle to proceed.
+ d := dsts[0]
+ loc := d.loc
+ vid := e.contents[loc].vid
+ c := e.contents[loc].c
+ r := e.findRegFor(c.Type)
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("breaking cycle with v%d in %s:%s\n", vid, loc, c)
+ }
+ e.erase(r)
+ pos := d.pos.WithNotStmt()
+ if _, isReg := loc.(*Register); isReg {
+ c = e.p.NewValue1(pos, OpCopy, c.Type, c)
+ } else {
+ c = e.p.NewValue1(pos, OpLoadReg, c.Type, c)
+ }
+ e.set(r, vid, c, false, pos)
+ if c.Op == OpLoadReg && e.s.isGReg(register(r.(*Register).num)) {
+ e.s.f.Fatalf("process.OpLoadReg targeting g: " + c.LongString())
+ }
+ }
+}
+
+// processDest generates code to put value vid into location loc. Returns true
+// if progress was made.
+func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XPos) bool {
+ pos = pos.WithNotStmt()
+ occupant := e.contents[loc]
+ if occupant.vid == vid {
+ // Value is already in the correct place.
+ e.contents[loc] = contentRecord{vid, occupant.c, true, pos}
+ if splice != nil {
+ (*splice).Uses--
+ *splice = occupant.c
+ occupant.c.Uses++
+ }
+ // Note: if splice==nil then c will appear dead. This is
+ // non-SSA formed code, so be careful after this pass not to run
+ // deadcode elimination.
+ if _, ok := e.s.copies[occupant.c]; ok {
+ // The copy at occupant.c was used to avoid spill.
+ e.s.copies[occupant.c] = true
+ }
+ return true
+ }
+
+ // Check if we're allowed to clobber the destination location.
+ if len(e.cache[occupant.vid]) == 1 && !e.s.values[occupant.vid].rematerializeable {
+ // We can't overwrite the last copy
+ // of a value that needs to survive.
+ return false
+ }
+
+ // Copy from a source of v, register preferred.
+ v := e.s.orig[vid]
+ var c *Value
+ var src Location
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("moving v%d to %s\n", vid, loc)
+ fmt.Printf("sources of v%d:", vid)
+ }
+ for _, w := range e.cache[vid] {
+ h := e.s.f.getHome(w.ID)
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf(" %s:%s", h, w)
+ }
+ _, isreg := h.(*Register)
+ if src == nil || isreg {
+ c = w
+ src = h
+ }
+ }
+ if e.s.f.pass.debug > regDebug {
+ if src != nil {
+ fmt.Printf(" [use %s]\n", src)
+ } else {
+ fmt.Printf(" [no source]\n")
+ }
+ }
+ _, dstReg := loc.(*Register)
+
+ // Pre-clobber destination. This avoids the
+ // following situation:
+ // - v is currently held in R0 and stacktmp0.
+ // - We want to copy stacktmp1 to stacktmp0.
+ // - We choose R0 as the temporary register.
+ // During the copy, both R0 and stacktmp0 are
+ // clobbered, losing both copies of v. Oops!
+ // Erasing the destination early means R0 will not
+ // be chosen as the temp register, as it will then
+ // be the last copy of v.
+ e.erase(loc)
+ var x *Value
+ if c == nil || e.s.values[vid].rematerializeable {
+ if !e.s.values[vid].rematerializeable {
+ e.s.f.Fatalf("can't find source for %s->%s: %s\n", e.p, e.b, v.LongString())
+ }
+ if dstReg {
+ x = v.copyInto(e.p)
+ } else {
+ // Rematerialize into stack slot. Need a free
+ // register to accomplish this.
+ r := e.findRegFor(v.Type)
+ e.erase(r)
+ x = v.copyIntoWithXPos(e.p, pos)
+ e.set(r, vid, x, false, pos)
+ // Make sure we spill with the size of the slot, not the
+ // size of x (which might be wider due to our dropping
+ // of narrowing conversions).
+ x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, x)
+ }
+ } else {
+ // Emit move from src to dst.
+ _, srcReg := src.(*Register)
+ if srcReg {
+ if dstReg {
+ x = e.p.NewValue1(pos, OpCopy, c.Type, c)
+ } else {
+ x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, c)
+ }
+ } else {
+ if dstReg {
+ x = e.p.NewValue1(pos, OpLoadReg, c.Type, c)
+ } else {
+ // mem->mem. Use temp register.
+ r := e.findRegFor(c.Type)
+ e.erase(r)
+ t := e.p.NewValue1(pos, OpLoadReg, c.Type, c)
+ e.set(r, vid, t, false, pos)
+ x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, t)
+ }
+ }
+ }
+ e.set(loc, vid, x, true, pos)
+ if x.Op == OpLoadReg && e.s.isGReg(register(loc.(*Register).num)) {
+ e.s.f.Fatalf("processDest.OpLoadReg targeting g: " + x.LongString())
+ }
+ if splice != nil {
+ (*splice).Uses--
+ *splice = x
+ x.Uses++
+ }
+ return true
+}
+
+// set changes the contents of location loc to hold the given value and its cached representative.
+func (e *edgeState) set(loc Location, vid ID, c *Value, final bool, pos src.XPos) {
+ e.s.f.setHome(c, loc)
+ e.contents[loc] = contentRecord{vid, c, final, pos}
+ a := e.cache[vid]
+ if len(a) == 0 {
+ e.cachedVals = append(e.cachedVals, vid)
+ }
+ a = append(a, c)
+ e.cache[vid] = a
+ if r, ok := loc.(*Register); ok {
+ if e.usedRegs&(regMask(1)<<uint(r.num)) != 0 {
+ e.s.f.Fatalf("%v is already set (v%d/%v)", r, vid, c)
+ }
+ e.usedRegs |= regMask(1) << uint(r.num)
+ if final {
+ e.finalRegs |= regMask(1) << uint(r.num)
+ }
+ if len(a) == 1 {
+ e.uniqueRegs |= regMask(1) << uint(r.num)
+ }
+ if len(a) == 2 {
+ if t, ok := e.s.f.getHome(a[0].ID).(*Register); ok {
+ e.uniqueRegs &^= regMask(1) << uint(t.num)
+ }
+ }
+ if e.s.values[vid].rematerializeable {
+ e.rematerializeableRegs |= regMask(1) << uint(r.num)
+ }
+ }
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("%s\n", c.LongString())
+ fmt.Printf("v%d now available in %s:%s\n", vid, loc, c)
+ }
+}
+
+// erase removes any user of loc.
+func (e *edgeState) erase(loc Location) {
+ cr := e.contents[loc]
+ if cr.c == nil {
+ return
+ }
+ vid := cr.vid
+
+ if cr.final {
+ // Add a destination to move this value back into place.
+ // Make sure it gets added to the tail of the destination queue
+ // so we make progress on other moves first.
+ e.extra = append(e.extra, dstRecord{loc, cr.vid, nil, cr.pos})
+ }
+
+ // Remove c from the list of cached values.
+ a := e.cache[vid]
+ for i, c := range a {
+ if e.s.f.getHome(c.ID) == loc {
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("v%d no longer available in %s:%s\n", vid, loc, c)
+ }
+ a[i], a = a[len(a)-1], a[:len(a)-1]
+ break
+ }
+ }
+ e.cache[vid] = a
+
+ // Update register masks.
+ if r, ok := loc.(*Register); ok {
+ e.usedRegs &^= regMask(1) << uint(r.num)
+ if cr.final {
+ e.finalRegs &^= regMask(1) << uint(r.num)
+ }
+ e.rematerializeableRegs &^= regMask(1) << uint(r.num)
+ }
+ if len(a) == 1 {
+ if r, ok := e.s.f.getHome(a[0].ID).(*Register); ok {
+ e.uniqueRegs |= regMask(1) << uint(r.num)
+ }
+ }
+}
+
+// findRegFor finds a register we can use to make a temp copy of type typ.
+func (e *edgeState) findRegFor(typ *types.Type) Location {
+ // Which registers are possibilities.
+ types := &e.s.f.Config.Types
+ m := e.s.compatRegs(typ)
+
+ // Pick a register. In priority order:
+ // 1) an unused register
+ // 2) a non-unique register not holding a final value
+ // 3) a non-unique register
+ // 4) a register holding a rematerializeable value
+ x := m &^ e.usedRegs
+ if x != 0 {
+ return &e.s.registers[pickReg(x)]
+ }
+ x = m &^ e.uniqueRegs &^ e.finalRegs
+ if x != 0 {
+ return &e.s.registers[pickReg(x)]
+ }
+ x = m &^ e.uniqueRegs
+ if x != 0 {
+ return &e.s.registers[pickReg(x)]
+ }
+ x = m & e.rematerializeableRegs
+ if x != 0 {
+ return &e.s.registers[pickReg(x)]
+ }
+
+ // No register is available.
+ // Pick a register to spill.
+ for _, vid := range e.cachedVals {
+ a := e.cache[vid]
+ for _, c := range a {
+ if r, ok := e.s.f.getHome(c.ID).(*Register); ok && m>>uint(r.num)&1 != 0 {
+ if !c.rematerializeable() {
+ x := e.p.NewValue1(c.Pos, OpStoreReg, c.Type, c)
+ // Allocate a temp location to spill a register to.
+ // The type of the slot is immaterial - it will not be live across
+ // any safepoint. Just use a type big enough to hold any register.
+ t := LocalSlot{N: e.s.f.fe.Auto(c.Pos, types.Int64), Type: types.Int64}
+ // TODO: reuse these slots. They'll need to be erased first.
+ e.set(t, vid, x, false, c.Pos)
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf(" SPILL %s->%s %s\n", r, t, x.LongString())
+ }
+ }
+ // r will now be overwritten by the caller. At some point
+ // later, the newly saved value will be moved back to its
+ // final destination in processDest.
+ return r
+ }
+ }
+ }
+
+ fmt.Printf("m:%d unique:%d final:%d rematerializable:%d\n", m, e.uniqueRegs, e.finalRegs, e.rematerializeableRegs)
+ for _, vid := range e.cachedVals {
+ a := e.cache[vid]
+ for _, c := range a {
+ fmt.Printf("v%d: %s %s\n", vid, c, e.s.f.getHome(c.ID))
+ }
+ }
+ e.s.f.Fatalf("can't find empty register on edge %s->%s", e.p, e.b)
+ return nil
+}
+
+// rematerializeable reports whether the register allocator should recompute
+// a value instead of spilling/restoring it.
+func (v *Value) rematerializeable() bool {
+ if !opcodeTable[v.Op].rematerializeable {
+ return false
+ }
+ for _, a := range v.Args {
+ // SP and SB (generated by OpSP and OpSB) are always available.
+ if a.Op != OpSP && a.Op != OpSB {
+ return false
+ }
+ }
+ return true
+}
+
+type liveInfo struct {
+ ID ID // ID of value
+ dist int32 // # of instructions before next use
+ pos src.XPos // source position of next use
+}
+
+// computeLive computes a map from block ID to a list of value IDs live at the end
+// of that block. Together with the value ID is a count of how many instructions
+// to the next use of that value. The resulting map is stored in s.live.
+// computeLive also computes the desired register information at the end of each block.
+// This desired register information is stored in s.desired.
+// TODO: this could be quadratic if lots of variables are live across lots of
+// basic blocks. Figure out a way to make this function (or, more precisely, the user
+// of this function) require only linear size & time.
+func (s *regAllocState) computeLive() {
+ f := s.f
+ s.live = make([][]liveInfo, f.NumBlocks())
+ s.desired = make([]desiredState, f.NumBlocks())
+ var phis []*Value
+
+ live := f.newSparseMap(f.NumValues())
+ defer f.retSparseMap(live)
+ t := f.newSparseMap(f.NumValues())
+ defer f.retSparseMap(t)
+
+ // Keep track of which value we want in each register.
+ var desired desiredState
+
+ // Instead of iterating over f.Blocks, iterate over their postordering.
+ // Liveness information flows backward, so starting at the end
+ // increases the probability that we will stabilize quickly.
+ // TODO: Do a better job yet. Here's one possibility:
+ // Calculate the dominator tree and locate all strongly connected components.
+ // If a value is live in one block of an SCC, it is live in all.
+ // Walk the dominator tree from end to beginning, just once, treating SCC
+ // components as single blocks, duplicated calculated liveness information
+ // out to all of them.
+ po := f.postorder()
+ s.loopnest = f.loopnest()
+ s.loopnest.calculateDepths()
+ for {
+ changed := false
+
+ for _, b := range po {
+ // Start with known live values at the end of the block.
+ // Add len(b.Values) to adjust from end-of-block distance
+ // to beginning-of-block distance.
+ live.clear()
+ for _, e := range s.live[b.ID] {
+ live.set(e.ID, e.dist+int32(len(b.Values)), e.pos)
+ }
+
+ // Mark control values as live
+ for _, c := range b.ControlValues() {
+ if s.values[c.ID].needReg {
+ live.set(c.ID, int32(len(b.Values)), b.Pos)
+ }
+ }
+
+ // Propagate backwards to the start of the block
+ // Assumes Values have been scheduled.
+ phis = phis[:0]
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ live.remove(v.ID)
+ if v.Op == OpPhi {
+ // save phi ops for later
+ phis = append(phis, v)
+ continue
+ }
+ if opcodeTable[v.Op].call {
+ c := live.contents()
+ for i := range c {
+ c[i].val += unlikelyDistance
+ }
+ }
+ for _, a := range v.Args {
+ if s.values[a.ID].needReg {
+ live.set(a.ID, int32(i), v.Pos)
+ }
+ }
+ }
+ // Propagate desired registers backwards.
+ desired.copy(&s.desired[b.ID])
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ prefs := desired.remove(v.ID)
+ if v.Op == OpPhi {
+ // TODO: if v is a phi, save desired register for phi inputs.
+ // For now, we just drop it and don't propagate
+ // desired registers back though phi nodes.
+ continue
+ }
+ regspec := s.regspec(v.Op)
+ // Cancel desired registers if they get clobbered.
+ desired.clobber(regspec.clobbers)
+ // Update desired registers if there are any fixed register inputs.
+ for _, j := range regspec.inputs {
+ if countRegs(j.regs) != 1 {
+ continue
+ }
+ desired.clobber(j.regs)
+ desired.add(v.Args[j.idx].ID, pickReg(j.regs))
+ }
+ // Set desired register of input 0 if this is a 2-operand instruction.
+ if opcodeTable[v.Op].resultInArg0 {
+ if opcodeTable[v.Op].commutative {
+ desired.addList(v.Args[1].ID, prefs)
+ }
+ desired.addList(v.Args[0].ID, prefs)
+ }
+ }
+
+ // For each predecessor of b, expand its list of live-at-end values.
+ // invariant: live contains the values live at the start of b (excluding phi inputs)
+ for i, e := range b.Preds {
+ p := e.b
+ // Compute additional distance for the edge.
+ // Note: delta must be at least 1 to distinguish the control
+ // value use from the first user in a successor block.
+ delta := int32(normalDistance)
+ if len(p.Succs) == 2 {
+ if p.Succs[0].b == b && p.Likely == BranchLikely ||
+ p.Succs[1].b == b && p.Likely == BranchUnlikely {
+ delta = likelyDistance
+ }
+ if p.Succs[0].b == b && p.Likely == BranchUnlikely ||
+ p.Succs[1].b == b && p.Likely == BranchLikely {
+ delta = unlikelyDistance
+ }
+ }
+
+ // Update any desired registers at the end of p.
+ s.desired[p.ID].merge(&desired)
+
+ // Start t off with the previously known live values at the end of p.
+ t.clear()
+ for _, e := range s.live[p.ID] {
+ t.set(e.ID, e.dist, e.pos)
+ }
+ update := false
+
+ // Add new live values from scanning this block.
+ for _, e := range live.contents() {
+ d := e.val + delta
+ if !t.contains(e.key) || d < t.get(e.key) {
+ update = true
+ t.set(e.key, d, e.aux)
+ }
+ }
+ // Also add the correct arg from the saved phi values.
+ // All phis are at distance delta (we consider them
+ // simultaneously happening at the start of the block).
+ for _, v := range phis {
+ id := v.Args[i].ID
+ if s.values[id].needReg && (!t.contains(id) || delta < t.get(id)) {
+ update = true
+ t.set(id, delta, v.Pos)
+ }
+ }
+
+ if !update {
+ continue
+ }
+ // The live set has changed, update it.
+ l := s.live[p.ID][:0]
+ if cap(l) < t.size() {
+ l = make([]liveInfo, 0, t.size())
+ }
+ for _, e := range t.contents() {
+ l = append(l, liveInfo{e.key, e.val, e.aux})
+ }
+ s.live[p.ID] = l
+ changed = true
+ }
+ }
+
+ if !changed {
+ break
+ }
+ }
+ if f.pass.debug > regDebug {
+ fmt.Println("live values at end of each block")
+ for _, b := range f.Blocks {
+ fmt.Printf(" %s:", b)
+ for _, x := range s.live[b.ID] {
+ fmt.Printf(" v%d(%d)", x.ID, x.dist)
+ for _, e := range s.desired[b.ID].entries {
+ if e.ID != x.ID {
+ continue
+ }
+ fmt.Printf("[")
+ first := true
+ for _, r := range e.regs {
+ if r == noRegister {
+ continue
+ }
+ if !first {
+ fmt.Printf(",")
+ }
+ fmt.Print(&s.registers[r])
+ first = false
+ }
+ fmt.Printf("]")
+ }
+ }
+ if avoid := s.desired[b.ID].avoid; avoid != 0 {
+ fmt.Printf(" avoid=%v", s.RegMaskString(avoid))
+ }
+ fmt.Println()
+ }
+ }
+}
+
+// A desiredState represents desired register assignments.
+type desiredState struct {
+ // Desired assignments will be small, so we just use a list
+ // of valueID+registers entries.
+ entries []desiredStateEntry
+ // Registers that other values want to be in. This value will
+ // contain at least the union of the regs fields of entries, but
+ // may contain additional entries for values that were once in
+ // this data structure but are no longer.
+ avoid regMask
+}
+type desiredStateEntry struct {
+ // (pre-regalloc) value
+ ID ID
+ // Registers it would like to be in, in priority order.
+ // Unused slots are filled with noRegister.
+ regs [4]register
+}
+
+func (d *desiredState) clear() {
+ d.entries = d.entries[:0]
+ d.avoid = 0
+}
+
+// get returns a list of desired registers for value vid.
+func (d *desiredState) get(vid ID) [4]register {
+ for _, e := range d.entries {
+ if e.ID == vid {
+ return e.regs
+ }
+ }
+ return [4]register{noRegister, noRegister, noRegister, noRegister}
+}
+
+// add records that we'd like value vid to be in register r.
+func (d *desiredState) add(vid ID, r register) {
+ d.avoid |= regMask(1) << r
+ for i := range d.entries {
+ e := &d.entries[i]
+ if e.ID != vid {
+ continue
+ }
+ if e.regs[0] == r {
+ // Already known and highest priority
+ return
+ }
+ for j := 1; j < len(e.regs); j++ {
+ if e.regs[j] == r {
+ // Move from lower priority to top priority
+ copy(e.regs[1:], e.regs[:j])
+ e.regs[0] = r
+ return
+ }
+ }
+ copy(e.regs[1:], e.regs[:])
+ e.regs[0] = r
+ return
+ }
+ d.entries = append(d.entries, desiredStateEntry{vid, [4]register{r, noRegister, noRegister, noRegister}})
+}
+
+func (d *desiredState) addList(vid ID, regs [4]register) {
+ // regs is in priority order, so iterate in reverse order.
+ for i := len(regs) - 1; i >= 0; i-- {
+ r := regs[i]
+ if r != noRegister {
+ d.add(vid, r)
+ }
+ }
+}
+
+// clobber erases any desired registers in the set m.
+func (d *desiredState) clobber(m regMask) {
+ for i := 0; i < len(d.entries); {
+ e := &d.entries[i]
+ j := 0
+ for _, r := range e.regs {
+ if r != noRegister && m>>r&1 == 0 {
+ e.regs[j] = r
+ j++
+ }
+ }
+ if j == 0 {
+ // No more desired registers for this value.
+ d.entries[i] = d.entries[len(d.entries)-1]
+ d.entries = d.entries[:len(d.entries)-1]
+ continue
+ }
+ for ; j < len(e.regs); j++ {
+ e.regs[j] = noRegister
+ }
+ i++
+ }
+ d.avoid &^= m
+}
+
+// copy copies a desired state from another desiredState x.
+func (d *desiredState) copy(x *desiredState) {
+ d.entries = append(d.entries[:0], x.entries...)
+ d.avoid = x.avoid
+}
+
+// remove removes the desired registers for vid and returns them.
+func (d *desiredState) remove(vid ID) [4]register {
+ for i := range d.entries {
+ if d.entries[i].ID == vid {
+ regs := d.entries[i].regs
+ d.entries[i] = d.entries[len(d.entries)-1]
+ d.entries = d.entries[:len(d.entries)-1]
+ return regs
+ }
+ }
+ return [4]register{noRegister, noRegister, noRegister, noRegister}
+}
+
+// merge merges another desired state x into d.
+func (d *desiredState) merge(x *desiredState) {
+ d.avoid |= x.avoid
+ // There should only be a few desired registers, so
+ // linear insert is ok.
+ for _, e := range x.entries {
+ d.addList(e.ID, e.regs)
+ }
+}
+
+func min32(x, y int32) int32 {
+ if x < y {
+ return x
+ }
+ return y
+}
+func max32(x, y int32) int32 {
+ if x > y {
+ return x
+ }
+ return y
+}
diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go
new file mode 100644
index 0000000..d990cac
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/regalloc_test.go
@@ -0,0 +1,230 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "testing"
+)
+
+func TestLiveControlOps(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpAMD64MOVLconst, c.config.Types.Int8, 1, nil),
+ Valu("y", OpAMD64MOVLconst, c.config.Types.Int8, 2, nil),
+ Valu("a", OpAMD64TESTB, types.TypeFlags, 0, nil, "x", "y"),
+ Valu("b", OpAMD64TESTB, types.TypeFlags, 0, nil, "y", "x"),
+ Eq("a", "if", "exit"),
+ ),
+ Bloc("if",
+ Eq("b", "plain", "exit"),
+ ),
+ Bloc("plain",
+ Goto("exit"),
+ ),
+ Bloc("exit",
+ Exit("mem"),
+ ),
+ )
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+}
+
+// Test to make sure G register is never reloaded from spill (spill of G is okay)
+// See #25504
+func TestNoGetgLoadReg(t *testing.T) {
+ /*
+ Original:
+ func fff3(i int) *g {
+ gee := getg()
+ if i == 0 {
+ fff()
+ }
+ return gee // here
+ }
+ */
+ c := testConfigARM64(t)
+ f := c.Fun("b1",
+ Bloc("b1",
+ Valu("v1", OpInitMem, types.TypeMem, 0, nil),
+ Valu("v6", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+ Valu("v8", OpGetG, c.config.Types.Int64.PtrTo(), 0, nil, "v1"),
+ Valu("v11", OpARM64CMPconst, types.TypeFlags, 0, nil, "v6"),
+ Eq("v11", "b2", "b4"),
+ ),
+ Bloc("b4",
+ Goto("b3"),
+ ),
+ Bloc("b3",
+ Valu("v14", OpPhi, types.TypeMem, 0, nil, "v1", "v12"),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v16", OpARM64MOVDstore, types.TypeMem, 0, nil, "v8", "sb", "v14"),
+ Exit("v16"),
+ ),
+ Bloc("b2",
+ Valu("v12", OpARM64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "v1"),
+ Goto("b3"),
+ ),
+ )
+ regalloc(f.f)
+ checkFunc(f.f)
+ // Double-check that we never restore to the G register. Regalloc should catch it, but check again anyway.
+ r := f.f.RegAlloc
+ for _, b := range f.blocks {
+ for _, v := range b.Values {
+ if v.Op == OpLoadReg && r[v.ID].String() == "g" {
+ t.Errorf("Saw OpLoadReg targeting g register: %s", v.LongString())
+ }
+ }
+ }
+}
+
+// Test to make sure we don't push spills into loops.
+// See issue #19595.
+func TestSpillWithLoop(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("ptr", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+ Valu("cond", OpArg, c.config.Types.Bool, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Bool)),
+ Valu("ld", OpAMD64MOVQload, c.config.Types.Int64, 0, nil, "ptr", "mem"), // this value needs a spill
+ Goto("loop"),
+ ),
+ Bloc("loop",
+ Valu("memphi", OpPhi, types.TypeMem, 0, nil, "mem", "call"),
+ Valu("call", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "memphi"),
+ Valu("test", OpAMD64CMPBconst, types.TypeFlags, 0, nil, "cond"),
+ Eq("test", "next", "exit"),
+ ),
+ Bloc("next",
+ Goto("loop"),
+ ),
+ Bloc("exit",
+ Valu("store", OpAMD64MOVQstore, types.TypeMem, 0, nil, "ptr", "ld", "call"),
+ Exit("store"),
+ ),
+ )
+ regalloc(f.f)
+ checkFunc(f.f)
+ for _, v := range f.blocks["loop"].Values {
+ if v.Op == OpStoreReg {
+ t.Errorf("spill inside loop %s", v.LongString())
+ }
+ }
+}
+
+func TestSpillMove1(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+ Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64.PtrTo())),
+ Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
+ Goto("loop1"),
+ ),
+ Bloc("loop1",
+ Valu("y", OpAMD64MULQ, c.config.Types.Int64, 0, nil, "x", "x"),
+ Eq("a", "loop2", "exit1"),
+ ),
+ Bloc("loop2",
+ Eq("a", "loop1", "exit2"),
+ ),
+ Bloc("exit1",
+ // store before call, y is available in a register
+ Valu("mem2", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem"),
+ Valu("mem3", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem2"),
+ Exit("mem3"),
+ ),
+ Bloc("exit2",
+ // store after call, y must be loaded from a spill location
+ Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"),
+ Exit("mem5"),
+ ),
+ )
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+ // Spill should be moved to exit2.
+ if numSpills(f.blocks["loop1"]) != 0 {
+ t.Errorf("spill present from loop1")
+ }
+ if numSpills(f.blocks["loop2"]) != 0 {
+ t.Errorf("spill present in loop2")
+ }
+ if numSpills(f.blocks["exit1"]) != 0 {
+ t.Errorf("spill present in exit1")
+ }
+ if numSpills(f.blocks["exit2"]) != 1 {
+ t.Errorf("spill missing in exit2")
+ }
+
+}
+
+func TestSpillMove2(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+ Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64.PtrTo())),
+ Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
+ Goto("loop1"),
+ ),
+ Bloc("loop1",
+ Valu("y", OpAMD64MULQ, c.config.Types.Int64, 0, nil, "x", "x"),
+ Eq("a", "loop2", "exit1"),
+ ),
+ Bloc("loop2",
+ Eq("a", "loop1", "exit2"),
+ ),
+ Bloc("exit1",
+ // store after call, y must be loaded from a spill location
+ Valu("mem2", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Valu("mem3", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem2"),
+ Exit("mem3"),
+ ),
+ Bloc("exit2",
+ // store after call, y must be loaded from a spill location
+ Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"),
+ Exit("mem5"),
+ ),
+ )
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+ // There should be a spill in loop1, and nowhere else.
+ // TODO: resurrect moving spills out of loops? We could put spills at the start of both exit1 and exit2.
+ if numSpills(f.blocks["loop1"]) != 1 {
+ t.Errorf("spill missing from loop1")
+ }
+ if numSpills(f.blocks["loop2"]) != 0 {
+ t.Errorf("spill present in loop2")
+ }
+ if numSpills(f.blocks["exit1"]) != 0 {
+ t.Errorf("spill present in exit1")
+ }
+ if numSpills(f.blocks["exit2"]) != 0 {
+ t.Errorf("spill present in exit2")
+ }
+
+}
+
+func numSpills(b *Block) int {
+ n := 0
+ for _, v := range b.Values {
+ if v.Op == OpStoreReg {
+ n++
+ }
+ }
+ return n
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
new file mode 100644
index 0000000..9e5ef68
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -0,0 +1,1892 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+ "math/bits"
+ "os"
+ "path/filepath"
+)
+
+type deadValueChoice bool
+
+const (
+ leaveDeadValues deadValueChoice = false
+ removeDeadValues = true
+)
+
+// deadcode indicates that rewrite should try to remove any values that become dead.
+func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValueChoice) {
+ // repeat rewrites until we find no more rewrites
+ pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
+ pendingLines.clear()
+ debug := f.pass.debug
+ if debug > 1 {
+ fmt.Printf("%s: rewriting for %s\n", f.pass.name, f.Name)
+ }
+ for {
+ change := false
+ for _, b := range f.Blocks {
+ var b0 *Block
+ if debug > 1 {
+ b0 = new(Block)
+ *b0 = *b
+ b0.Succs = append([]Edge{}, b.Succs...) // make a new copy, not aliasing
+ }
+ for i, c := range b.ControlValues() {
+ for c.Op == OpCopy {
+ c = c.Args[0]
+ b.ReplaceControl(i, c)
+ }
+ }
+ if rb(b) {
+ change = true
+ if debug > 1 {
+ fmt.Printf("rewriting %s -> %s\n", b0.LongString(), b.LongString())
+ }
+ }
+ for j, v := range b.Values {
+ var v0 *Value
+ if debug > 1 {
+ v0 = new(Value)
+ *v0 = *v
+ v0.Args = append([]*Value{}, v.Args...) // make a new copy, not aliasing
+ }
+ if v.Uses == 0 && v.removeable() {
+ if v.Op != OpInvalid && deadcode == removeDeadValues {
+ // Reset any values that are now unused, so that we decrement
+ // the use count of all of its arguments.
+ // Not quite a deadcode pass, because it does not handle cycles.
+ // But it should help Uses==1 rules to fire.
+ v.reset(OpInvalid)
+ change = true
+ }
+ // No point rewriting values which aren't used.
+ continue
+ }
+
+ vchange := phielimValue(v)
+ if vchange && debug > 1 {
+ fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString())
+ }
+
+ // Eliminate copy inputs.
+ // If any copy input becomes unused, mark it
+ // as invalid and discard its argument. Repeat
+ // recursively on the discarded argument.
+ // This phase helps remove phantom "dead copy" uses
+ // of a value so that a x.Uses==1 rule condition
+ // fires reliably.
+ for i, a := range v.Args {
+ if a.Op != OpCopy {
+ continue
+ }
+ aa := copySource(a)
+ v.SetArg(i, aa)
+ // If a, a copy, has a line boundary indicator, attempt to find a new value
+ // to hold it. The first candidate is the value that will replace a (aa),
+ // if it shares the same block and line and is eligible.
+ // The second option is v, which has a as an input. Because aa is earlier in
+ // the data flow, it is the better choice.
+ if a.Pos.IsStmt() == src.PosIsStmt {
+ if aa.Block == a.Block && aa.Pos.Line() == a.Pos.Line() && aa.Pos.IsStmt() != src.PosNotStmt {
+ aa.Pos = aa.Pos.WithIsStmt()
+ } else if v.Block == a.Block && v.Pos.Line() == a.Pos.Line() && v.Pos.IsStmt() != src.PosNotStmt {
+ v.Pos = v.Pos.WithIsStmt()
+ } else {
+ // Record the lost line and look for a new home after all rewrites are complete.
+ // TODO: it's possible (in FOR loops, in particular) for statement boundaries for the same
+ // line to appear in more than one block, but only one block is stored, so if both end
+ // up here, then one will be lost.
+ pendingLines.set(a.Pos, int32(a.Block.ID))
+ }
+ a.Pos = a.Pos.WithNotStmt()
+ }
+ vchange = true
+ for a.Uses == 0 {
+ b := a.Args[0]
+ a.reset(OpInvalid)
+ a = b
+ }
+ }
+ if vchange && debug > 1 {
+ fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString())
+ }
+
+ // apply rewrite function
+ if rv(v) {
+ vchange = true
+ // If value changed to a poor choice for a statement boundary, move the boundary
+ if v.Pos.IsStmt() == src.PosIsStmt {
+ if k := nextGoodStatementIndex(v, j, b); k != j {
+ v.Pos = v.Pos.WithNotStmt()
+ b.Values[k].Pos = b.Values[k].Pos.WithIsStmt()
+ }
+ }
+ }
+
+ change = change || vchange
+ if vchange && debug > 1 {
+ fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString())
+ }
+ }
+ }
+ if !change {
+ break
+ }
+ }
+ // remove clobbered values
+ for _, b := range f.Blocks {
+ j := 0
+ for i, v := range b.Values {
+ vl := v.Pos
+ if v.Op == OpInvalid {
+ if v.Pos.IsStmt() == src.PosIsStmt {
+ pendingLines.set(vl, int32(b.ID))
+ }
+ f.freeValue(v)
+ continue
+ }
+ if v.Pos.IsStmt() != src.PosNotStmt && pendingLines.get(vl) == int32(b.ID) {
+ pendingLines.remove(vl)
+ v.Pos = v.Pos.WithIsStmt()
+ }
+ if i != j {
+ b.Values[j] = v
+ }
+ j++
+ }
+ if pendingLines.get(b.Pos) == int32(b.ID) {
+ b.Pos = b.Pos.WithIsStmt()
+ pendingLines.remove(b.Pos)
+ }
+ b.truncateValues(j)
+ }
+}
+
+// Common functions called from rewriting rules
+
+func is64BitFloat(t *types.Type) bool {
+ return t.Size() == 8 && t.IsFloat()
+}
+
+func is32BitFloat(t *types.Type) bool {
+ return t.Size() == 4 && t.IsFloat()
+}
+
+func is64BitInt(t *types.Type) bool {
+ return t.Size() == 8 && t.IsInteger()
+}
+
+func is32BitInt(t *types.Type) bool {
+ return t.Size() == 4 && t.IsInteger()
+}
+
+func is16BitInt(t *types.Type) bool {
+ return t.Size() == 2 && t.IsInteger()
+}
+
+func is8BitInt(t *types.Type) bool {
+ return t.Size() == 1 && t.IsInteger()
+}
+
+func isPtr(t *types.Type) bool {
+ return t.IsPtrShaped()
+}
+
+func isSigned(t *types.Type) bool {
+ return t.IsSigned()
+}
+
+// mergeSym merges two symbolic offsets. There is no real merging of
+// offsets, we just pick the non-nil one.
+func mergeSym(x, y Sym) Sym {
+ if x == nil {
+ return y
+ }
+ if y == nil {
+ return x
+ }
+ panic(fmt.Sprintf("mergeSym with two non-nil syms %v %v", x, y))
+}
+
+func canMergeSym(x, y Sym) bool {
+ return x == nil || y == nil
+}
+
+// canMergeLoadClobber reports whether the load can be merged into target without
+// invalidating the schedule.
+// It also checks that the other non-load argument x is something we
+// are ok with clobbering.
+func canMergeLoadClobber(target, load, x *Value) bool {
+ // The register containing x is going to get clobbered.
+ // Don't merge if we still need the value of x.
+ // We don't have liveness information here, but we can
+ // approximate x dying with:
+ // 1) target is x's only use.
+ // 2) target is not in a deeper loop than x.
+ if x.Uses != 1 {
+ return false
+ }
+ loopnest := x.Block.Func.loopnest()
+ loopnest.calculateDepths()
+ if loopnest.depth(target.Block.ID) > loopnest.depth(x.Block.ID) {
+ return false
+ }
+ return canMergeLoad(target, load)
+}
+
+// canMergeLoad reports whether the load can be merged into target without
+// invalidating the schedule.
+func canMergeLoad(target, load *Value) bool {
+ if target.Block.ID != load.Block.ID {
+ // If the load is in a different block do not merge it.
+ return false
+ }
+
+ // We can't merge the load into the target if the load
+ // has more than one use.
+ if load.Uses != 1 {
+ return false
+ }
+
+ mem := load.MemoryArg()
+
+ // We need the load's memory arg to still be alive at target. That
+ // can't be the case if one of target's args depends on a memory
+ // state that is a successor of load's memory arg.
+ //
+ // For example, it would be invalid to merge load into target in
+ // the following situation because newmem has killed oldmem
+ // before target is reached:
+ // load = read ... oldmem
+ // newmem = write ... oldmem
+ // arg0 = read ... newmem
+ // target = add arg0 load
+ //
+ // If the argument comes from a different block then we can exclude
+ // it immediately because it must dominate load (which is in the
+ // same block as target).
+ var args []*Value
+ for _, a := range target.Args {
+ if a != load && a.Block.ID == target.Block.ID {
+ args = append(args, a)
+ }
+ }
+
+ // memPreds contains memory states known to be predecessors of load's
+ // memory state. It is lazily initialized.
+ var memPreds map[*Value]bool
+ for i := 0; len(args) > 0; i++ {
+ const limit = 100
+ if i >= limit {
+ // Give up if we have done a lot of iterations.
+ return false
+ }
+ v := args[len(args)-1]
+ args = args[:len(args)-1]
+ if target.Block.ID != v.Block.ID {
+ // Since target and load are in the same block
+ // we can stop searching when we leave the block.
+ continue
+ }
+ if v.Op == OpPhi {
+ // A Phi implies we have reached the top of the block.
+ // The memory phi, if it exists, is always
+ // the first logical store in the block.
+ continue
+ }
+ if v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
+ // We could handle this situation however it is likely
+ // to be very rare.
+ return false
+ }
+ if v.Op.SymEffect()&SymAddr != 0 {
+ // This case prevents an operation that calculates the
+ // address of a local variable from being forced to schedule
+ // before its corresponding VarDef.
+ // See issue 28445.
+ // v1 = LOAD ...
+ // v2 = VARDEF
+ // v3 = LEAQ
+ // v4 = CMPQ v1 v3
+ // We don't want to combine the CMPQ with the load, because
+ // that would force the CMPQ to schedule before the VARDEF, which
+ // in turn requires the LEAQ to schedule before the VARDEF.
+ return false
+ }
+ if v.Type.IsMemory() {
+ if memPreds == nil {
+ // Initialise a map containing memory states
+ // known to be predecessors of load's memory
+ // state.
+ memPreds = make(map[*Value]bool)
+ m := mem
+ const limit = 50
+ for i := 0; i < limit; i++ {
+ if m.Op == OpPhi {
+ // The memory phi, if it exists, is always
+ // the first logical store in the block.
+ break
+ }
+ if m.Block.ID != target.Block.ID {
+ break
+ }
+ if !m.Type.IsMemory() {
+ break
+ }
+ memPreds[m] = true
+ if len(m.Args) == 0 {
+ break
+ }
+ m = m.MemoryArg()
+ }
+ }
+
+ // We can merge if v is a predecessor of mem.
+ //
+ // For example, we can merge load into target in the
+ // following scenario:
+ // x = read ... v
+ // mem = write ... v
+ // load = read ... mem
+ // target = add x load
+ if memPreds[v] {
+ continue
+ }
+ return false
+ }
+ if len(v.Args) > 0 && v.Args[len(v.Args)-1] == mem {
+ // If v takes mem as an input then we know mem
+ // is valid at this point.
+ continue
+ }
+ for _, a := range v.Args {
+ if target.Block.ID == a.Block.ID {
+ args = append(args, a)
+ }
+ }
+ }
+
+ return true
+}
+
+// isSameCall reports whether sym is the same as the given named symbol
+func isSameCall(sym interface{}, name string) bool {
+ fn := sym.(*AuxCall).Fn
+ return fn != nil && fn.String() == name
+}
+
+// nlz returns the number of leading zeros.
+func nlz64(x int64) int { return bits.LeadingZeros64(uint64(x)) }
+func nlz32(x int32) int { return bits.LeadingZeros32(uint32(x)) }
+func nlz16(x int16) int { return bits.LeadingZeros16(uint16(x)) }
+func nlz8(x int8) int { return bits.LeadingZeros8(uint8(x)) }
+
+// ntzX returns the number of trailing zeros.
+func ntz64(x int64) int { return bits.TrailingZeros64(uint64(x)) }
+func ntz32(x int32) int { return bits.TrailingZeros32(uint32(x)) }
+func ntz16(x int16) int { return bits.TrailingZeros16(uint16(x)) }
+func ntz8(x int8) int { return bits.TrailingZeros8(uint8(x)) }
+
+func oneBit(x int64) bool { return x&(x-1) == 0 && x != 0 }
+func oneBit8(x int8) bool { return x&(x-1) == 0 && x != 0 }
+func oneBit16(x int16) bool { return x&(x-1) == 0 && x != 0 }
+func oneBit32(x int32) bool { return x&(x-1) == 0 && x != 0 }
+func oneBit64(x int64) bool { return x&(x-1) == 0 && x != 0 }
+
+// nto returns the number of trailing ones.
+func nto(x int64) int64 {
+ return int64(ntz64(^x))
+}
+
+// logX returns logarithm of n base 2.
+// n must be a positive power of 2 (isPowerOfTwoX returns true).
+func log8(n int8) int64 {
+ return int64(bits.Len8(uint8(n))) - 1
+}
+func log16(n int16) int64 {
+ return int64(bits.Len16(uint16(n))) - 1
+}
+func log32(n int32) int64 {
+ return int64(bits.Len32(uint32(n))) - 1
+}
+func log64(n int64) int64 {
+ return int64(bits.Len64(uint64(n))) - 1
+}
+
+// log2uint32 returns logarithm in base 2 of uint32(n), with log2(0) = -1.
+// Rounds down.
+func log2uint32(n int64) int64 {
+ return int64(bits.Len32(uint32(n))) - 1
+}
+
+// isPowerOfTwo functions report whether n is a power of 2.
+func isPowerOfTwo8(n int8) bool {
+ return n > 0 && n&(n-1) == 0
+}
+func isPowerOfTwo16(n int16) bool {
+ return n > 0 && n&(n-1) == 0
+}
+func isPowerOfTwo32(n int32) bool {
+ return n > 0 && n&(n-1) == 0
+}
+func isPowerOfTwo64(n int64) bool {
+ return n > 0 && n&(n-1) == 0
+}
+
+// isUint64PowerOfTwo reports whether uint64(n) is a power of 2.
+func isUint64PowerOfTwo(in int64) bool {
+ n := uint64(in)
+ return n > 0 && n&(n-1) == 0
+}
+
+// isUint32PowerOfTwo reports whether uint32(n) is a power of 2.
+func isUint32PowerOfTwo(in int64) bool {
+ n := uint64(uint32(in))
+ return n > 0 && n&(n-1) == 0
+}
+
+// is32Bit reports whether n can be represented as a signed 32 bit integer.
+func is32Bit(n int64) bool {
+ return n == int64(int32(n))
+}
+
+// is16Bit reports whether n can be represented as a signed 16 bit integer.
+func is16Bit(n int64) bool {
+ return n == int64(int16(n))
+}
+
+// is8Bit reports whether n can be represented as a signed 8 bit integer.
+func is8Bit(n int64) bool {
+ return n == int64(int8(n))
+}
+
+// isU8Bit reports whether n can be represented as an unsigned 8 bit integer.
+func isU8Bit(n int64) bool {
+ return n == int64(uint8(n))
+}
+
+// isU12Bit reports whether n can be represented as an unsigned 12 bit integer.
+func isU12Bit(n int64) bool {
+ return 0 <= n && n < (1<<12)
+}
+
+// isU16Bit reports whether n can be represented as an unsigned 16 bit integer.
+func isU16Bit(n int64) bool {
+ return n == int64(uint16(n))
+}
+
+// isU32Bit reports whether n can be represented as an unsigned 32 bit integer.
+func isU32Bit(n int64) bool {
+ return n == int64(uint32(n))
+}
+
+// is20Bit reports whether n can be represented as a signed 20 bit integer.
+func is20Bit(n int64) bool {
+ return -(1<<19) <= n && n < (1<<19)
+}
+
+// b2i translates a boolean value to 0 or 1 for assigning to auxInt.
+func b2i(b bool) int64 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// b2i32 translates a boolean value to 0 or 1.
+func b2i32(b bool) int32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// shiftIsBounded reports whether (left/right) shift Value v is known to be bounded.
+// A shift is bounded if it is shifting by less than the width of the shifted value.
+func shiftIsBounded(v *Value) bool {
+ return v.AuxInt != 0
+}
+
+// truncate64Fto32F converts a float64 value to a float32 preserving the bit pattern
+// of the mantissa. It will panic if the truncation results in lost information.
+func truncate64Fto32F(f float64) float32 {
+ if !isExactFloat32(f) {
+ panic("truncate64Fto32F: truncation is not exact")
+ }
+ if !math.IsNaN(f) {
+ return float32(f)
+ }
+ // NaN bit patterns aren't necessarily preserved across conversion
+ // instructions so we need to do the conversion manually.
+ b := math.Float64bits(f)
+ m := b & ((1 << 52) - 1) // mantissa (a.k.a. significand)
+ // | sign | exponent | mantissa |
+ r := uint32(((b >> 32) & (1 << 31)) | 0x7f800000 | (m >> (52 - 23)))
+ return math.Float32frombits(r)
+}
+
+// extend32Fto64F converts a float32 value to a float64 value preserving the bit
+// pattern of the mantissa.
+func extend32Fto64F(f float32) float64 {
+ if !math.IsNaN(float64(f)) {
+ return float64(f)
+ }
+ // NaN bit patterns aren't necessarily preserved across conversion
+ // instructions so we need to do the conversion manually.
+ b := uint64(math.Float32bits(f))
+ // | sign | exponent | mantissa |
+ r := ((b << 32) & (1 << 63)) | (0x7ff << 52) | ((b & 0x7fffff) << (52 - 23))
+ return math.Float64frombits(r)
+}
+
+// DivisionNeedsFixUp reports whether the division needs fix-up code.
+func DivisionNeedsFixUp(v *Value) bool {
+ return v.AuxInt == 0
+}
+
+// auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
+func auxFrom64F(f float64) int64 {
+ if f != f {
+ panic("can't encode a NaN in AuxInt field")
+ }
+ return int64(math.Float64bits(f))
+}
+
+// auxFrom32F encodes a float32 value so it can be stored in an AuxInt.
+func auxFrom32F(f float32) int64 {
+ if f != f {
+ panic("can't encode a NaN in AuxInt field")
+ }
+ return int64(math.Float64bits(extend32Fto64F(f)))
+}
+
+// auxTo32F decodes a float32 from the AuxInt value provided.
+func auxTo32F(i int64) float32 {
+ return truncate64Fto32F(math.Float64frombits(uint64(i)))
+}
+
+// auxTo64F decodes a float64 from the AuxInt value provided.
+func auxTo64F(i int64) float64 {
+ return math.Float64frombits(uint64(i))
+}
+
+func auxIntToBool(i int64) bool {
+ if i == 0 {
+ return false
+ }
+ return true
+}
+func auxIntToInt8(i int64) int8 {
+ return int8(i)
+}
+func auxIntToInt16(i int64) int16 {
+ return int16(i)
+}
+func auxIntToInt32(i int64) int32 {
+ return int32(i)
+}
+func auxIntToInt64(i int64) int64 {
+ return i
+}
+func auxIntToUint8(i int64) uint8 {
+ return uint8(i)
+}
+func auxIntToFloat32(i int64) float32 {
+ return float32(math.Float64frombits(uint64(i)))
+}
+func auxIntToFloat64(i int64) float64 {
+ return math.Float64frombits(uint64(i))
+}
+func auxIntToValAndOff(i int64) ValAndOff {
+ return ValAndOff(i)
+}
+func auxIntToArm64BitField(i int64) arm64BitField {
+ return arm64BitField(i)
+}
+func auxIntToInt128(x int64) int128 {
+ if x != 0 {
+ panic("nonzero int128 not allowed")
+ }
+ return 0
+}
+func auxIntToFlagConstant(x int64) flagConstant {
+ return flagConstant(x)
+}
+
+func auxIntToOp(cc int64) Op {
+ return Op(cc)
+}
+
+func boolToAuxInt(b bool) int64 {
+ if b {
+ return 1
+ }
+ return 0
+}
+func int8ToAuxInt(i int8) int64 {
+ return int64(i)
+}
+func int16ToAuxInt(i int16) int64 {
+ return int64(i)
+}
+func int32ToAuxInt(i int32) int64 {
+ return int64(i)
+}
+func int64ToAuxInt(i int64) int64 {
+ return int64(i)
+}
+func uint8ToAuxInt(i uint8) int64 {
+ return int64(int8(i))
+}
+func float32ToAuxInt(f float32) int64 {
+ return int64(math.Float64bits(float64(f)))
+}
+func float64ToAuxInt(f float64) int64 {
+ return int64(math.Float64bits(f))
+}
+func valAndOffToAuxInt(v ValAndOff) int64 {
+ return int64(v)
+}
+func arm64BitFieldToAuxInt(v arm64BitField) int64 {
+ return int64(v)
+}
+func int128ToAuxInt(x int128) int64 {
+ if x != 0 {
+ panic("nonzero int128 not allowed")
+ }
+ return 0
+}
+func flagConstantToAuxInt(x flagConstant) int64 {
+ return int64(x)
+}
+
+func opToAuxInt(o Op) int64 {
+ return int64(o)
+}
+
+func auxToString(i interface{}) string {
+ return i.(string)
+}
+func auxToSym(i interface{}) Sym {
+ // TODO: kind of a hack - allows nil interface through
+ s, _ := i.(Sym)
+ return s
+}
+func auxToType(i interface{}) *types.Type {
+ return i.(*types.Type)
+}
+func auxToCall(i interface{}) *AuxCall {
+ return i.(*AuxCall)
+}
+func auxToS390xCCMask(i interface{}) s390x.CCMask {
+ return i.(s390x.CCMask)
+}
+func auxToS390xRotateParams(i interface{}) s390x.RotateParams {
+ return i.(s390x.RotateParams)
+}
+
+func stringToAux(s string) interface{} {
+ return s
+}
+func symToAux(s Sym) interface{} {
+ return s
+}
+func callToAux(s *AuxCall) interface{} {
+ return s
+}
+func typeToAux(t *types.Type) interface{} {
+ return t
+}
+func s390xCCMaskToAux(c s390x.CCMask) interface{} {
+ return c
+}
+func s390xRotateParamsToAux(r s390x.RotateParams) interface{} {
+ return r
+}
+
+// uaddOvf reports whether unsigned a+b would overflow.
+func uaddOvf(a, b int64) bool {
+ return uint64(a)+uint64(b) < uint64(a)
+}
+
+// de-virtualize an InterCall
+// 'sym' is the symbol for the itab
+func devirt(v *Value, aux interface{}, sym Sym, offset int64) *AuxCall {
+ f := v.Block.Func
+ n, ok := sym.(*obj.LSym)
+ if !ok {
+ return nil
+ }
+ lsym := f.fe.DerefItab(n, offset)
+ if f.pass.debug > 0 {
+ if lsym != nil {
+ f.Warnl(v.Pos, "de-virtualizing call")
+ } else {
+ f.Warnl(v.Pos, "couldn't de-virtualize call")
+ }
+ }
+ if lsym == nil {
+ return nil
+ }
+ va := aux.(*AuxCall)
+ return StaticAuxCall(lsym, va.args, va.results)
+}
+
+// de-virtualize an InterLECall
+// 'sym' is the symbol for the itab
+func devirtLESym(v *Value, aux interface{}, sym Sym, offset int64) *obj.LSym {
+ n, ok := sym.(*obj.LSym)
+ if !ok {
+ return nil
+ }
+
+ f := v.Block.Func
+ lsym := f.fe.DerefItab(n, offset)
+ if f.pass.debug > 0 {
+ if lsym != nil {
+ f.Warnl(v.Pos, "de-virtualizing call")
+ } else {
+ f.Warnl(v.Pos, "couldn't de-virtualize call")
+ }
+ }
+ if lsym == nil {
+ return nil
+ }
+ return lsym
+}
+
+func devirtLECall(v *Value, sym *obj.LSym) *Value {
+ v.Op = OpStaticLECall
+ v.Aux.(*AuxCall).Fn = sym
+ v.RemoveArg(0)
+ return v
+}
+
+// isSamePtr reports whether p1 and p2 point to the same address.
+func isSamePtr(p1, p2 *Value) bool {
+ if p1 == p2 {
+ return true
+ }
+ if p1.Op != p2.Op {
+ return false
+ }
+ switch p1.Op {
+ case OpOffPtr:
+ return p1.AuxInt == p2.AuxInt && isSamePtr(p1.Args[0], p2.Args[0])
+ case OpAddr, OpLocalAddr:
+ // OpAddr's 0th arg is either OpSP or OpSB, which means that it is uniquely identified by its Op.
+ // Checking for value equality only works after [z]cse has run.
+ return p1.Aux == p2.Aux && p1.Args[0].Op == p2.Args[0].Op
+ case OpAddPtr:
+ return p1.Args[1] == p2.Args[1] && isSamePtr(p1.Args[0], p2.Args[0])
+ }
+ return false
+}
+
+func isStackPtr(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr {
+ v = v.Args[0]
+ }
+ return v.Op == OpSP || v.Op == OpLocalAddr
+}
+
+// disjoint reports whether the memory region specified by [p1:p1+n1)
+// does not overlap with [p2:p2+n2).
+// A return value of false does not imply the regions overlap.
+func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool {
+ if n1 == 0 || n2 == 0 {
+ return true
+ }
+ if p1 == p2 {
+ return false
+ }
+ baseAndOffset := func(ptr *Value) (base *Value, offset int64) {
+ base, offset = ptr, 0
+ for base.Op == OpOffPtr {
+ offset += base.AuxInt
+ base = base.Args[0]
+ }
+ return base, offset
+ }
+ p1, off1 := baseAndOffset(p1)
+ p2, off2 := baseAndOffset(p2)
+ if isSamePtr(p1, p2) {
+ return !overlap(off1, n1, off2, n2)
+ }
+ // p1 and p2 are not the same, so if they are both OpAddrs then
+ // they point to different variables.
+ // If one pointer is on the stack and the other is an argument
+ // then they can't overlap.
+ switch p1.Op {
+ case OpAddr, OpLocalAddr:
+ if p2.Op == OpAddr || p2.Op == OpLocalAddr || p2.Op == OpSP {
+ return true
+ }
+ return p2.Op == OpArg && p1.Args[0].Op == OpSP
+ case OpArg:
+ if p2.Op == OpSP || p2.Op == OpLocalAddr {
+ return true
+ }
+ case OpSP:
+ return p2.Op == OpAddr || p2.Op == OpLocalAddr || p2.Op == OpArg || p2.Op == OpSP
+ }
+ return false
+}
+
+// moveSize returns the number of bytes an aligned MOV instruction moves
+func moveSize(align int64, c *Config) int64 {
+ switch {
+ case align%8 == 0 && c.PtrSize == 8:
+ return 8
+ case align%4 == 0:
+ return 4
+ case align%2 == 0:
+ return 2
+ }
+ return 1
+}
+
+// mergePoint finds a block among a's blocks which dominates b and is itself
+// dominated by all of a's blocks. Returns nil if it can't find one.
+// Might return nil even if one does exist.
+func mergePoint(b *Block, a ...*Value) *Block {
+ // Walk backward from b looking for one of the a's blocks.
+
+ // Max distance
+ d := 100
+
+ for d > 0 {
+ for _, x := range a {
+ if b == x.Block {
+ goto found
+ }
+ }
+ if len(b.Preds) > 1 {
+ // Don't know which way to go back. Abort.
+ return nil
+ }
+ b = b.Preds[0].b
+ d--
+ }
+ return nil // too far away
+found:
+ // At this point, r is the first value in a that we find by walking backwards.
+ // if we return anything, r will be it.
+ r := b
+
+ // Keep going, counting the other a's that we find. They must all dominate r.
+ na := 0
+ for d > 0 {
+ for _, x := range a {
+ if b == x.Block {
+ na++
+ }
+ }
+ if na == len(a) {
+ // Found all of a in a backwards walk. We can return r.
+ return r
+ }
+ if len(b.Preds) > 1 {
+ return nil
+ }
+ b = b.Preds[0].b
+ d--
+
+ }
+ return nil // too far away
+}
+
+// clobber invalidates values. Returns true.
+// clobber is used by rewrite rules to:
+// A) make sure the values are really dead and never used again.
+// B) decrement use counts of the values' args.
+func clobber(vv ...*Value) bool {
+ for _, v := range vv {
+ v.reset(OpInvalid)
+ // Note: leave v.Block intact. The Block field is used after clobber.
+ }
+ return true
+}
+
+// clobberIfDead resets v when use count is 1. Returns true.
+// clobberIfDead is used by rewrite rules to decrement
+// use counts of v's args when v is dead and never used.
+func clobberIfDead(v *Value) bool {
+ if v.Uses == 1 {
+ v.reset(OpInvalid)
+ }
+ // Note: leave v.Block intact. The Block field is used after clobberIfDead.
+ return true
+}
+
+// noteRule is an easy way to track if a rule is matched when writing
+// new ones. Make the rule of interest also conditional on
+// noteRule("note to self: rule of interest matched")
+// and that message will print when the rule matches.
+func noteRule(s string) bool {
+ fmt.Println(s)
+ return true
+}
+
+// countRule increments Func.ruleMatches[key].
+// If Func.ruleMatches is non-nil at the end
+// of compilation, it will be printed to stdout.
+// This is intended to make it easier to find which functions
+// which contain lots of rules matches when developing new rules.
+func countRule(v *Value, key string) bool {
+ f := v.Block.Func
+ if f.ruleMatches == nil {
+ f.ruleMatches = make(map[string]int)
+ }
+ f.ruleMatches[key]++
+ return true
+}
+
+// warnRule generates compiler debug output with string s when
+// v is not in autogenerated code, cond is true and the rule has fired.
+func warnRule(cond bool, v *Value, s string) bool {
+ if pos := v.Pos; pos.Line() > 1 && cond {
+ v.Block.Func.Warnl(pos, s)
+ }
+ return true
+}
+
+// for a pseudo-op like (LessThan x), extract x
+func flagArg(v *Value) *Value {
+ if len(v.Args) != 1 || !v.Args[0].Type.IsFlags() {
+ return nil
+ }
+ return v.Args[0]
+}
+
+// arm64Negate finds the complement to an ARM64 condition code,
+// for example !Equal -> NotEqual or !LessThan -> GreaterEqual
+//
+// For floating point, it's more subtle because NaN is unordered. We do
+// !LessThanF -> NotLessThanF, the latter takes care of NaNs.
+func arm64Negate(op Op) Op {
+ switch op {
+ case OpARM64LessThan:
+ return OpARM64GreaterEqual
+ case OpARM64LessThanU:
+ return OpARM64GreaterEqualU
+ case OpARM64GreaterThan:
+ return OpARM64LessEqual
+ case OpARM64GreaterThanU:
+ return OpARM64LessEqualU
+ case OpARM64LessEqual:
+ return OpARM64GreaterThan
+ case OpARM64LessEqualU:
+ return OpARM64GreaterThanU
+ case OpARM64GreaterEqual:
+ return OpARM64LessThan
+ case OpARM64GreaterEqualU:
+ return OpARM64LessThanU
+ case OpARM64Equal:
+ return OpARM64NotEqual
+ case OpARM64NotEqual:
+ return OpARM64Equal
+ case OpARM64LessThanF:
+ return OpARM64NotLessThanF
+ case OpARM64NotLessThanF:
+ return OpARM64LessThanF
+ case OpARM64LessEqualF:
+ return OpARM64NotLessEqualF
+ case OpARM64NotLessEqualF:
+ return OpARM64LessEqualF
+ case OpARM64GreaterThanF:
+ return OpARM64NotGreaterThanF
+ case OpARM64NotGreaterThanF:
+ return OpARM64GreaterThanF
+ case OpARM64GreaterEqualF:
+ return OpARM64NotGreaterEqualF
+ case OpARM64NotGreaterEqualF:
+ return OpARM64GreaterEqualF
+ default:
+ panic("unreachable")
+ }
+}
+
+// arm64Invert evaluates (InvertFlags op), which
+// is the same as altering the condition codes such
+// that the same result would be produced if the arguments
+// to the flag-generating instruction were reversed, e.g.
+// (InvertFlags (CMP x y)) -> (CMP y x)
+func arm64Invert(op Op) Op {
+ switch op {
+ case OpARM64LessThan:
+ return OpARM64GreaterThan
+ case OpARM64LessThanU:
+ return OpARM64GreaterThanU
+ case OpARM64GreaterThan:
+ return OpARM64LessThan
+ case OpARM64GreaterThanU:
+ return OpARM64LessThanU
+ case OpARM64LessEqual:
+ return OpARM64GreaterEqual
+ case OpARM64LessEqualU:
+ return OpARM64GreaterEqualU
+ case OpARM64GreaterEqual:
+ return OpARM64LessEqual
+ case OpARM64GreaterEqualU:
+ return OpARM64LessEqualU
+ case OpARM64Equal, OpARM64NotEqual:
+ return op
+ case OpARM64LessThanF:
+ return OpARM64GreaterThanF
+ case OpARM64GreaterThanF:
+ return OpARM64LessThanF
+ case OpARM64LessEqualF:
+ return OpARM64GreaterEqualF
+ case OpARM64GreaterEqualF:
+ return OpARM64LessEqualF
+ case OpARM64NotLessThanF:
+ return OpARM64NotGreaterThanF
+ case OpARM64NotGreaterThanF:
+ return OpARM64NotLessThanF
+ case OpARM64NotLessEqualF:
+ return OpARM64NotGreaterEqualF
+ case OpARM64NotGreaterEqualF:
+ return OpARM64NotLessEqualF
+ default:
+ panic("unreachable")
+ }
+}
+
+// evaluate an ARM64 op against a flags value
+// that is potentially constant; return 1 for true,
+// -1 for false, and 0 for not constant.
+func ccARM64Eval(op Op, flags *Value) int {
+ fop := flags.Op
+ if fop == OpARM64InvertFlags {
+ return -ccARM64Eval(op, flags.Args[0])
+ }
+ if fop != OpARM64FlagConstant {
+ return 0
+ }
+ fc := flagConstant(flags.AuxInt)
+ b2i := func(b bool) int {
+ if b {
+ return 1
+ }
+ return -1
+ }
+ switch op {
+ case OpARM64Equal:
+ return b2i(fc.eq())
+ case OpARM64NotEqual:
+ return b2i(fc.ne())
+ case OpARM64LessThan:
+ return b2i(fc.lt())
+ case OpARM64LessThanU:
+ return b2i(fc.ult())
+ case OpARM64GreaterThan:
+ return b2i(fc.gt())
+ case OpARM64GreaterThanU:
+ return b2i(fc.ugt())
+ case OpARM64LessEqual:
+ return b2i(fc.le())
+ case OpARM64LessEqualU:
+ return b2i(fc.ule())
+ case OpARM64GreaterEqual:
+ return b2i(fc.ge())
+ case OpARM64GreaterEqualU:
+ return b2i(fc.uge())
+ }
+ return 0
+}
+
+// logRule logs the use of the rule s. This will only be enabled if
+// rewrite rules were generated with the -log option, see gen/rulegen.go.
+func logRule(s string) {
+ if ruleFile == nil {
+ // Open a log file to write log to. We open in append
+ // mode because all.bash runs the compiler lots of times,
+ // and we want the concatenation of all of those logs.
+ // This means, of course, that users need to rm the old log
+ // to get fresh data.
+ // TODO: all.bash runs compilers in parallel. Need to synchronize logging somehow?
+ w, err := os.OpenFile(filepath.Join(os.Getenv("GOROOT"), "src", "rulelog"),
+ os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+ if err != nil {
+ panic(err)
+ }
+ ruleFile = w
+ }
+ _, err := fmt.Fprintln(ruleFile, s)
+ if err != nil {
+ panic(err)
+ }
+}
+
+var ruleFile io.Writer
+
+func min(x, y int64) int64 {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func isConstZero(v *Value) bool {
+ switch v.Op {
+ case OpConstNil:
+ return true
+ case OpConst64, OpConst32, OpConst16, OpConst8, OpConstBool, OpConst32F, OpConst64F:
+ return v.AuxInt == 0
+ }
+ return false
+}
+
+// reciprocalExact64 reports whether 1/c is exactly representable.
+func reciprocalExact64(c float64) bool {
+ b := math.Float64bits(c)
+ man := b & (1<<52 - 1)
+ if man != 0 {
+ return false // not a power of 2, denormal, or NaN
+ }
+ exp := b >> 52 & (1<<11 - 1)
+ // exponent bias is 0x3ff. So taking the reciprocal of a number
+ // changes the exponent to 0x7fe-exp.
+ switch exp {
+ case 0:
+ return false // ±0
+ case 0x7ff:
+ return false // ±inf
+ case 0x7fe:
+ return false // exponent is not representable
+ default:
+ return true
+ }
+}
+
+// reciprocalExact32 reports whether 1/c is exactly representable.
+func reciprocalExact32(c float32) bool {
+ b := math.Float32bits(c)
+ man := b & (1<<23 - 1)
+ if man != 0 {
+ return false // not a power of 2, denormal, or NaN
+ }
+ exp := b >> 23 & (1<<8 - 1)
+ // exponent bias is 0x7f. So taking the reciprocal of a number
+ // changes the exponent to 0xfe-exp.
+ switch exp {
+ case 0:
+ return false // ±0
+ case 0xff:
+ return false // ±inf
+ case 0xfe:
+ return false // exponent is not representable
+ default:
+ return true
+ }
+}
+
+// check if an immediate can be directly encoded into an ARM's instruction
+func isARMImmRot(v uint32) bool {
+ for i := 0; i < 16; i++ {
+ if v&^0xff == 0 {
+ return true
+ }
+ v = v<<2 | v>>30
+ }
+
+ return false
+}
+
+// overlap reports whether the ranges given by the given offset and
+// size pairs overlap.
+func overlap(offset1, size1, offset2, size2 int64) bool {
+ if offset1 >= offset2 && offset2+size2 > offset1 {
+ return true
+ }
+ if offset2 >= offset1 && offset1+size1 > offset2 {
+ return true
+ }
+ return false
+}
+
+func areAdjacentOffsets(off1, off2, size int64) bool {
+ return off1+size == off2 || off1 == off2+size
+}
+
+// check if value zeroes out upper 32-bit of 64-bit register.
+// depth limits recursion depth. In AMD64.rules 3 is used as limit,
+// because it catches same amount of cases as 4.
+func zeroUpper32Bits(x *Value, depth int) bool {
+ switch x.Op {
+ case OpAMD64MOVLconst, OpAMD64MOVLload, OpAMD64MOVLQZX, OpAMD64MOVLloadidx1,
+ OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVBload, OpAMD64MOVBloadidx1,
+ OpAMD64MOVLloadidx4, OpAMD64ADDLload, OpAMD64SUBLload, OpAMD64ANDLload,
+ OpAMD64ORLload, OpAMD64XORLload, OpAMD64CVTTSD2SL,
+ OpAMD64ADDL, OpAMD64ADDLconst, OpAMD64SUBL, OpAMD64SUBLconst,
+ OpAMD64ANDL, OpAMD64ANDLconst, OpAMD64ORL, OpAMD64ORLconst,
+ OpAMD64XORL, OpAMD64XORLconst, OpAMD64NEGL, OpAMD64NOTL,
+ OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst,
+ OpAMD64SHLL, OpAMD64SHLLconst:
+ return true
+ case OpArg:
+ return x.Type.Width == 4
+ case OpPhi, OpSelect0, OpSelect1:
+ // Phis can use each-other as an arguments, instead of tracking visited values,
+ // just limit recursion depth.
+ if depth <= 0 {
+ return false
+ }
+ for i := range x.Args {
+ if !zeroUpper32Bits(x.Args[i], depth-1) {
+ return false
+ }
+ }
+ return true
+
+ }
+ return false
+}
+
+// zeroUpper48Bits is similar to zeroUpper32Bits, but for upper 48 bits
+func zeroUpper48Bits(x *Value, depth int) bool {
+ switch x.Op {
+ case OpAMD64MOVWQZX, OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVWloadidx2:
+ return true
+ case OpArg:
+ return x.Type.Width == 2
+ case OpPhi, OpSelect0, OpSelect1:
+ // Phis can use each-other as an arguments, instead of tracking visited values,
+ // just limit recursion depth.
+ if depth <= 0 {
+ return false
+ }
+ for i := range x.Args {
+ if !zeroUpper48Bits(x.Args[i], depth-1) {
+ return false
+ }
+ }
+ return true
+
+ }
+ return false
+}
+
+// zeroUpper56Bits is similar to zeroUpper32Bits, but for upper 56 bits
+func zeroUpper56Bits(x *Value, depth int) bool {
+ switch x.Op {
+ case OpAMD64MOVBQZX, OpAMD64MOVBload, OpAMD64MOVBloadidx1:
+ return true
+ case OpArg:
+ return x.Type.Width == 1
+ case OpPhi, OpSelect0, OpSelect1:
+ // Phis can use each-other as an arguments, instead of tracking visited values,
+ // just limit recursion depth.
+ if depth <= 0 {
+ return false
+ }
+ for i := range x.Args {
+ if !zeroUpper56Bits(x.Args[i], depth-1) {
+ return false
+ }
+ }
+ return true
+
+ }
+ return false
+}
+
+// isInlinableMemmove reports whether the given arch performs a Move of the given size
+// faster than memmove. It will only return true if replacing the memmove with a Move is
+// safe, either because Move is small or because the arguments are disjoint.
+// This is used as a check for replacing memmove with Move ops.
+func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
+ // It is always safe to convert memmove into Move when its arguments are disjoint.
+ // Move ops may or may not be faster for large sizes depending on how the platform
+ // lowers them, so we only perform this optimization on platforms that we know to
+ // have fast Move ops.
+ switch c.arch {
+ case "amd64":
+ return sz <= 16 || (sz < 1024 && disjoint(dst, sz, src, sz))
+ case "386", "arm64":
+ return sz <= 8
+ case "s390x", "ppc64", "ppc64le":
+ return sz <= 8 || disjoint(dst, sz, src, sz)
+ case "arm", "mips", "mips64", "mipsle", "mips64le":
+ return sz <= 4
+ }
+ return false
+}
+
+// logLargeCopy logs the occurrence of a large copy.
+// The best place to do this is in the rewrite rules where the size of the move is easy to find.
+// "Large" is arbitrarily chosen to be 128 bytes; this may change.
+func logLargeCopy(v *Value, s int64) bool {
+ if s < 128 {
+ return true
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "copy", "lower", v.Block.Func.Name, fmt.Sprintf("%d bytes", s))
+ }
+ return true
+}
+
+// hasSmallRotate reports whether the architecture has rotate instructions
+// for sizes < 32-bit. This is used to decide whether to promote some rotations.
+func hasSmallRotate(c *Config) bool {
+ switch c.arch {
+ case "amd64", "386":
+ return true
+ default:
+ return false
+ }
+}
+
+func newPPC64ShiftAuxInt(sh, mb, me, sz int64) int32 {
+ if sh < 0 || sh >= sz {
+ panic("PPC64 shift arg sh out of range")
+ }
+ if mb < 0 || mb >= sz {
+ panic("PPC64 shift arg mb out of range")
+ }
+ if me < 0 || me >= sz {
+ panic("PPC64 shift arg me out of range")
+ }
+ return int32(sh<<16 | mb<<8 | me)
+}
+
+func GetPPC64Shiftsh(auxint int64) int64 {
+ return int64(int8(auxint >> 16))
+}
+
+func GetPPC64Shiftmb(auxint int64) int64 {
+ return int64(int8(auxint >> 8))
+}
+
+func GetPPC64Shiftme(auxint int64) int64 {
+ return int64(int8(auxint))
+}
+
+// Test if this value can encoded as a mask for a rlwinm like
+// operation. Masks can also extend from the msb and wrap to
+// the lsb too. That is, the valid masks are 32 bit strings
+// of the form: 0..01..10..0 or 1..10..01..1 or 1...1
+func isPPC64WordRotateMask(v64 int64) bool {
+ // Isolate rightmost 1 (if none 0) and add.
+ v := uint32(v64)
+ vp := (v & -v) + v
+ // Likewise, for the wrapping case.
+ vn := ^v
+ vpn := (vn & -vn) + vn
+ return (v&vp == 0 || vn&vpn == 0) && v != 0
+}
+
+// Compress mask and and shift into single value of the form
+// me | mb<<8 | rotate<<16 | nbits<<24 where me and mb can
+// be used to regenerate the input mask.
+func encodePPC64RotateMask(rotate, mask, nbits int64) int64 {
+ var mb, me, mbn, men int
+
+ // Determine boundaries and then decode them
+ if mask == 0 || ^mask == 0 || rotate >= nbits {
+ panic("Invalid PPC64 rotate mask")
+ } else if nbits == 32 {
+ mb = bits.LeadingZeros32(uint32(mask))
+ me = 32 - bits.TrailingZeros32(uint32(mask))
+ mbn = bits.LeadingZeros32(^uint32(mask))
+ men = 32 - bits.TrailingZeros32(^uint32(mask))
+ } else {
+ mb = bits.LeadingZeros64(uint64(mask))
+ me = 64 - bits.TrailingZeros64(uint64(mask))
+ mbn = bits.LeadingZeros64(^uint64(mask))
+ men = 64 - bits.TrailingZeros64(^uint64(mask))
+ }
+ // Check for a wrapping mask (e.g bits at 0 and 63)
+ if mb == 0 && me == int(nbits) {
+ // swap the inverted values
+ mb, me = men, mbn
+ }
+
+ return int64(me) | int64(mb<<8) | int64(rotate<<16) | int64(nbits<<24)
+}
+
+// The inverse operation of encodePPC64RotateMask. The values returned as
+// mb and me satisfy the POWER ISA definition of MASK(x,y) where MASK(mb,me) = mask.
+func DecodePPC64RotateMask(sauxint int64) (rotate, mb, me int64, mask uint64) {
+ auxint := uint64(sauxint)
+ rotate = int64((auxint >> 16) & 0xFF)
+ mb = int64((auxint >> 8) & 0xFF)
+ me = int64((auxint >> 0) & 0xFF)
+ nbits := int64((auxint >> 24) & 0xFF)
+ mask = ((1 << uint(nbits-mb)) - 1) ^ ((1 << uint(nbits-me)) - 1)
+ if mb > me {
+ mask = ^mask
+ }
+ if nbits == 32 {
+ mask = uint64(uint32(mask))
+ }
+
+ // Fixup ME to match ISA definition. The second argument to MASK(..,me)
+ // is inclusive.
+ me = (me - 1) & (nbits - 1)
+ return
+}
+
+// This verifies that the mask is a set of
+// consecutive bits including the least
+// significant bit.
+func isPPC64ValidShiftMask(v int64) bool {
+ if (v != 0) && ((v+1)&v) == 0 {
+ return true
+ }
+ return false
+}
+
+func getPPC64ShiftMaskLength(v int64) int64 {
+ return int64(bits.Len64(uint64(v)))
+}
+
+// Decompose a shift right into an equivalent rotate/mask,
+// and return mask & m.
+func mergePPC64RShiftMask(m, s, nbits int64) int64 {
+ smask := uint64((1<<uint(nbits))-1) >> uint(s)
+ return m & int64(smask)
+}
+
+// Combine (ANDconst [m] (SRWconst [s])) into (RLWINM [y]) or return 0
+func mergePPC64AndSrwi(m, s int64) int64 {
+ mask := mergePPC64RShiftMask(m, s, 32)
+ if !isPPC64WordRotateMask(mask) {
+ return 0
+ }
+ return encodePPC64RotateMask((32-s)&31, mask, 32)
+}
+
+// Test if a shift right feeding into a CLRLSLDI can be merged into RLWINM.
+// Return the encoded RLWINM constant, or 0 if they cannot be merged.
+func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {
+ mask_1 := uint64(0xFFFFFFFF >> uint(srw))
+ // for CLRLSLDI, it's more convient to think of it as a mask left bits then rotate left.
+ mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
+
+ // Rewrite mask to apply after the final left shift.
+ mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(sld))
+
+ r_1 := 32 - srw
+ r_2 := GetPPC64Shiftsh(sld)
+ r_3 := (r_1 + r_2) & 31 // This can wrap.
+
+ if uint64(uint32(mask_3)) != mask_3 || mask_3 == 0 {
+ return 0
+ }
+ return encodePPC64RotateMask(int64(r_3), int64(mask_3), 32)
+}
+
+// Test if a RLWINM feeding into a CLRLSLDI can be merged into RLWINM. Return
+// the encoded RLWINM constant, or 0 if they cannot be merged.
+func mergePPC64ClrlsldiRlwinm(sld int32, rlw int64) int64 {
+ r_1, _, _, mask_1 := DecodePPC64RotateMask(rlw)
+ // for CLRLSLDI, it's more convient to think of it as a mask left bits then rotate left.
+ mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
+
+ // combine the masks, and adjust for the final left shift.
+ mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(int64(sld)))
+ r_2 := GetPPC64Shiftsh(int64(sld))
+ r_3 := (r_1 + r_2) & 31 // This can wrap.
+
+ // Verify the result is still a valid bitmask of <= 32 bits.
+ if !isPPC64WordRotateMask(int64(mask_3)) || uint64(uint32(mask_3)) != mask_3 {
+ return 0
+ }
+ return encodePPC64RotateMask(r_3, int64(mask_3), 32)
+}
+
+// Compute the encoded RLWINM constant from combining (SLDconst [sld] (SRWconst [srw] x)),
+// or return 0 if they cannot be combined.
+func mergePPC64SldiSrw(sld, srw int64) int64 {
+ if sld > srw || srw >= 32 {
+ return 0
+ }
+ mask_r := uint32(0xFFFFFFFF) >> uint(srw)
+ mask_l := uint32(0xFFFFFFFF) >> uint(sld)
+ mask := (mask_r & mask_l) << uint(sld)
+ return encodePPC64RotateMask((32-srw+sld)&31, int64(mask), 32)
+}
+
+// Convenience function to rotate a 32 bit constant value by another constant.
+func rotateLeft32(v, rotate int64) int64 {
+ return int64(bits.RotateLeft32(uint32(v), int(rotate)))
+}
+
+// encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format.
+func armBFAuxInt(lsb, width int64) arm64BitField {
+ if lsb < 0 || lsb > 63 {
+ panic("ARM(64) bit field lsb constant out of range")
+ }
+ if width < 1 || width > 64 {
+ panic("ARM(64) bit field width constant out of range")
+ }
+ return arm64BitField(width | lsb<<8)
+}
+
+// returns the lsb part of the auxInt field of arm64 bitfield ops.
+func (bfc arm64BitField) getARM64BFlsb() int64 {
+ return int64(uint64(bfc) >> 8)
+}
+
+// returns the width part of the auxInt field of arm64 bitfield ops.
+func (bfc arm64BitField) getARM64BFwidth() int64 {
+ return int64(bfc) & 0xff
+}
+
+// checks if mask >> rshift applied at lsb is a valid arm64 bitfield op mask.
+func isARM64BFMask(lsb, mask, rshift int64) bool {
+ shiftedMask := int64(uint64(mask) >> uint64(rshift))
+ return shiftedMask != 0 && isPowerOfTwo64(shiftedMask+1) && nto(shiftedMask)+lsb < 64
+}
+
+// returns the bitfield width of mask >> rshift for arm64 bitfield ops
+func arm64BFWidth(mask, rshift int64) int64 {
+ shiftedMask := int64(uint64(mask) >> uint64(rshift))
+ if shiftedMask == 0 {
+ panic("ARM64 BF mask is zero")
+ }
+ return nto(shiftedMask)
+}
+
+// sizeof returns the size of t in bytes.
+// It will panic if t is not a *types.Type.
+func sizeof(t interface{}) int64 {
+ return t.(*types.Type).Size()
+}
+
+// registerizable reports whether t is a primitive type that fits in
+// a register. It assumes float64 values will always fit into registers
+// even if that isn't strictly true.
+func registerizable(b *Block, typ *types.Type) bool {
+ if typ.IsPtrShaped() || typ.IsFloat() {
+ return true
+ }
+ if typ.IsInteger() {
+ return typ.Size() <= b.Func.Config.RegSize
+ }
+ return false
+}
+
+// needRaceCleanup reports whether this call to racefuncenter/exit isn't needed.
+func needRaceCleanup(sym *AuxCall, v *Value) bool {
+ f := v.Block.Func
+ if !f.Config.Race {
+ return false
+ }
+ if !isSameCall(sym, "runtime.racefuncenter") && !isSameCall(sym, "runtime.racefuncenterfp") && !isSameCall(sym, "runtime.racefuncexit") {
+ return false
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpStaticCall:
+ // Check for racefuncenter/racefuncenterfp will encounter racefuncexit and vice versa.
+ // Allow calls to panic*
+ s := v.Aux.(*AuxCall).Fn.String()
+ switch s {
+ case "runtime.racefuncenter", "runtime.racefuncenterfp", "runtime.racefuncexit",
+ "runtime.panicdivide", "runtime.panicwrap",
+ "runtime.panicshift":
+ continue
+ }
+ // If we encountered any call, we need to keep racefunc*,
+ // for accurate stacktraces.
+ return false
+ case OpPanicBounds, OpPanicExtend:
+ // Note: these are panic generators that are ok (like the static calls above).
+ case OpClosureCall, OpInterCall:
+ // We must keep the race functions if there are any other call types.
+ return false
+ }
+ }
+ }
+ if isSameCall(sym, "runtime.racefuncenter") {
+ // If we're removing racefuncenter, remove its argument as well.
+ if v.Args[0].Op != OpStore {
+ return false
+ }
+ mem := v.Args[0].Args[2]
+ v.Args[0].reset(OpCopy)
+ v.Args[0].AddArg(mem)
+ }
+ return true
+}
+
+// symIsRO reports whether sym is a read-only global.
+func symIsRO(sym interface{}) bool {
+ lsym := sym.(*obj.LSym)
+ return lsym.Type == objabi.SRODATA && len(lsym.R) == 0
+}
+
+// symIsROZero reports whether sym is a read-only global whose data contains all zeros.
+func symIsROZero(sym Sym) bool {
+ lsym := sym.(*obj.LSym)
+ if lsym.Type != objabi.SRODATA || len(lsym.R) != 0 {
+ return false
+ }
+ for _, b := range lsym.P {
+ if b != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// read8 reads one byte from the read-only global sym at offset off.
+func read8(sym interface{}, off int64) uint8 {
+ lsym := sym.(*obj.LSym)
+ if off >= int64(len(lsym.P)) || off < 0 {
+ // Invalid index into the global sym.
+ // This can happen in dead code, so we don't want to panic.
+ // Just return any value, it will eventually get ignored.
+ // See issue 29215.
+ return 0
+ }
+ return lsym.P[off]
+}
+
+// read16 reads two bytes from the read-only global sym at offset off.
+func read16(sym interface{}, off int64, byteorder binary.ByteOrder) uint16 {
+ lsym := sym.(*obj.LSym)
+ // lsym.P is written lazily.
+ // Bytes requested after the end of lsym.P are 0.
+ var src []byte
+ if 0 <= off && off < int64(len(lsym.P)) {
+ src = lsym.P[off:]
+ }
+ buf := make([]byte, 2)
+ copy(buf, src)
+ return byteorder.Uint16(buf)
+}
+
+// read32 reads four bytes from the read-only global sym at offset off.
+func read32(sym interface{}, off int64, byteorder binary.ByteOrder) uint32 {
+ lsym := sym.(*obj.LSym)
+ var src []byte
+ if 0 <= off && off < int64(len(lsym.P)) {
+ src = lsym.P[off:]
+ }
+ buf := make([]byte, 4)
+ copy(buf, src)
+ return byteorder.Uint32(buf)
+}
+
+// read64 reads eight bytes from the read-only global sym at offset off.
+func read64(sym interface{}, off int64, byteorder binary.ByteOrder) uint64 {
+ lsym := sym.(*obj.LSym)
+ var src []byte
+ if 0 <= off && off < int64(len(lsym.P)) {
+ src = lsym.P[off:]
+ }
+ buf := make([]byte, 8)
+ copy(buf, src)
+ return byteorder.Uint64(buf)
+}
+
+// sequentialAddresses reports true if it can prove that x + n == y
+func sequentialAddresses(x, y *Value, n int64) bool {
+ if x.Op == Op386ADDL && y.Op == Op386LEAL1 && y.AuxInt == n && y.Aux == nil &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ if x.Op == Op386LEAL1 && y.Op == Op386LEAL1 && y.AuxInt == x.AuxInt+n && x.Aux == y.Aux &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ if x.Op == OpAMD64ADDQ && y.Op == OpAMD64LEAQ1 && y.AuxInt == n && y.Aux == nil &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ if x.Op == OpAMD64LEAQ1 && y.Op == OpAMD64LEAQ1 && y.AuxInt == x.AuxInt+n && x.Aux == y.Aux &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ return false
+}
+
+// flagConstant represents the result of a compile-time comparison.
+// The sense of these flags does not necessarily represent the hardware's notion
+// of a flags register - these are just a compile-time construct.
+// We happen to match the semantics to those of arm/arm64.
+// Note that these semantics differ from x86: the carry flag has the opposite
+// sense on a subtraction!
+// On amd64, C=1 represents a borrow, e.g. SBB on amd64 does x - y - C.
+// On arm64, C=0 represents a borrow, e.g. SBC on arm64 does x - y - ^C.
+// (because it does x + ^y + C).
+// See https://en.wikipedia.org/wiki/Carry_flag#Vs._borrow_flag
+type flagConstant uint8
+
+// N reports whether the result of an operation is negative (high bit set).
+func (fc flagConstant) N() bool {
+ return fc&1 != 0
+}
+
+// Z reports whether the result of an operation is 0.
+func (fc flagConstant) Z() bool {
+ return fc&2 != 0
+}
+
+// C reports whether an unsigned add overflowed (carry), or an
+// unsigned subtract did not underflow (borrow).
+func (fc flagConstant) C() bool {
+ return fc&4 != 0
+}
+
+// V reports whether a signed operation overflowed or underflowed.
+func (fc flagConstant) V() bool {
+ return fc&8 != 0
+}
+
+func (fc flagConstant) eq() bool {
+ return fc.Z()
+}
+func (fc flagConstant) ne() bool {
+ return !fc.Z()
+}
+func (fc flagConstant) lt() bool {
+ return fc.N() != fc.V()
+}
+func (fc flagConstant) le() bool {
+ return fc.Z() || fc.lt()
+}
+func (fc flagConstant) gt() bool {
+ return !fc.Z() && fc.ge()
+}
+func (fc flagConstant) ge() bool {
+ return fc.N() == fc.V()
+}
+func (fc flagConstant) ult() bool {
+ return !fc.C()
+}
+func (fc flagConstant) ule() bool {
+ return fc.Z() || fc.ult()
+}
+func (fc flagConstant) ugt() bool {
+ return !fc.Z() && fc.uge()
+}
+func (fc flagConstant) uge() bool {
+ return fc.C()
+}
+
+func (fc flagConstant) ltNoov() bool {
+ return fc.lt() && !fc.V()
+}
+func (fc flagConstant) leNoov() bool {
+ return fc.le() && !fc.V()
+}
+func (fc flagConstant) gtNoov() bool {
+ return fc.gt() && !fc.V()
+}
+func (fc flagConstant) geNoov() bool {
+ return fc.ge() && !fc.V()
+}
+
+func (fc flagConstant) String() string {
+ return fmt.Sprintf("N=%v,Z=%v,C=%v,V=%v", fc.N(), fc.Z(), fc.C(), fc.V())
+}
+
+type flagConstantBuilder struct {
+ N bool
+ Z bool
+ C bool
+ V bool
+}
+
+func (fcs flagConstantBuilder) encode() flagConstant {
+ var fc flagConstant
+ if fcs.N {
+ fc |= 1
+ }
+ if fcs.Z {
+ fc |= 2
+ }
+ if fcs.C {
+ fc |= 4
+ }
+ if fcs.V {
+ fc |= 8
+ }
+ return fc
+}
+
+// Note: addFlags(x,y) != subFlags(x,-y) in some situations:
+// - the results of the C flag are different
+// - the results of the V flag when y==minint are different
+
+// addFlags64 returns the flags that would be set from computing x+y.
+func addFlags64(x, y int64) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x+y == 0
+ fcb.N = x+y < 0
+ fcb.C = uint64(x+y) < uint64(x)
+ fcb.V = x >= 0 && y >= 0 && x+y < 0 || x < 0 && y < 0 && x+y >= 0
+ return fcb.encode()
+}
+
+// subFlags64 returns the flags that would be set from computing x-y.
+func subFlags64(x, y int64) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x-y == 0
+ fcb.N = x-y < 0
+ fcb.C = uint64(y) <= uint64(x) // This code follows the arm carry flag model.
+ fcb.V = x >= 0 && y < 0 && x-y < 0 || x < 0 && y >= 0 && x-y >= 0
+ return fcb.encode()
+}
+
+// addFlags32 returns the flags that would be set from computing x+y.
+func addFlags32(x, y int32) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x+y == 0
+ fcb.N = x+y < 0
+ fcb.C = uint32(x+y) < uint32(x)
+ fcb.V = x >= 0 && y >= 0 && x+y < 0 || x < 0 && y < 0 && x+y >= 0
+ return fcb.encode()
+}
+
+// subFlags32 returns the flags that would be set from computing x-y.
+func subFlags32(x, y int32) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x-y == 0
+ fcb.N = x-y < 0
+ fcb.C = uint32(y) <= uint32(x) // This code follows the arm carry flag model.
+ fcb.V = x >= 0 && y < 0 && x-y < 0 || x < 0 && y >= 0 && x-y >= 0
+ return fcb.encode()
+}
+
+// logicFlags64 returns flags set to the sign/zeroness of x.
+// C and V are set to false.
+func logicFlags64(x int64) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x == 0
+ fcb.N = x < 0
+ return fcb.encode()
+}
+
+// logicFlags32 returns flags set to the sign/zeroness of x.
+// C and V are set to false.
+func logicFlags32(x int32) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x == 0
+ fcb.N = x < 0
+ return fcb.encode()
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
new file mode 100644
index 0000000..2acdccd
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -0,0 +1,12575 @@
+// Code generated from gen/386.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValue386(v *Value) bool {
+ switch v.Op {
+ case Op386ADCL:
+ return rewriteValue386_Op386ADCL(v)
+ case Op386ADDL:
+ return rewriteValue386_Op386ADDL(v)
+ case Op386ADDLcarry:
+ return rewriteValue386_Op386ADDLcarry(v)
+ case Op386ADDLconst:
+ return rewriteValue386_Op386ADDLconst(v)
+ case Op386ADDLconstmodify:
+ return rewriteValue386_Op386ADDLconstmodify(v)
+ case Op386ADDLload:
+ return rewriteValue386_Op386ADDLload(v)
+ case Op386ADDLmodify:
+ return rewriteValue386_Op386ADDLmodify(v)
+ case Op386ADDSD:
+ return rewriteValue386_Op386ADDSD(v)
+ case Op386ADDSDload:
+ return rewriteValue386_Op386ADDSDload(v)
+ case Op386ADDSS:
+ return rewriteValue386_Op386ADDSS(v)
+ case Op386ADDSSload:
+ return rewriteValue386_Op386ADDSSload(v)
+ case Op386ANDL:
+ return rewriteValue386_Op386ANDL(v)
+ case Op386ANDLconst:
+ return rewriteValue386_Op386ANDLconst(v)
+ case Op386ANDLconstmodify:
+ return rewriteValue386_Op386ANDLconstmodify(v)
+ case Op386ANDLload:
+ return rewriteValue386_Op386ANDLload(v)
+ case Op386ANDLmodify:
+ return rewriteValue386_Op386ANDLmodify(v)
+ case Op386CMPB:
+ return rewriteValue386_Op386CMPB(v)
+ case Op386CMPBconst:
+ return rewriteValue386_Op386CMPBconst(v)
+ case Op386CMPBload:
+ return rewriteValue386_Op386CMPBload(v)
+ case Op386CMPL:
+ return rewriteValue386_Op386CMPL(v)
+ case Op386CMPLconst:
+ return rewriteValue386_Op386CMPLconst(v)
+ case Op386CMPLload:
+ return rewriteValue386_Op386CMPLload(v)
+ case Op386CMPW:
+ return rewriteValue386_Op386CMPW(v)
+ case Op386CMPWconst:
+ return rewriteValue386_Op386CMPWconst(v)
+ case Op386CMPWload:
+ return rewriteValue386_Op386CMPWload(v)
+ case Op386DIVSD:
+ return rewriteValue386_Op386DIVSD(v)
+ case Op386DIVSDload:
+ return rewriteValue386_Op386DIVSDload(v)
+ case Op386DIVSS:
+ return rewriteValue386_Op386DIVSS(v)
+ case Op386DIVSSload:
+ return rewriteValue386_Op386DIVSSload(v)
+ case Op386LEAL:
+ return rewriteValue386_Op386LEAL(v)
+ case Op386LEAL1:
+ return rewriteValue386_Op386LEAL1(v)
+ case Op386LEAL2:
+ return rewriteValue386_Op386LEAL2(v)
+ case Op386LEAL4:
+ return rewriteValue386_Op386LEAL4(v)
+ case Op386LEAL8:
+ return rewriteValue386_Op386LEAL8(v)
+ case Op386MOVBLSX:
+ return rewriteValue386_Op386MOVBLSX(v)
+ case Op386MOVBLSXload:
+ return rewriteValue386_Op386MOVBLSXload(v)
+ case Op386MOVBLZX:
+ return rewriteValue386_Op386MOVBLZX(v)
+ case Op386MOVBload:
+ return rewriteValue386_Op386MOVBload(v)
+ case Op386MOVBstore:
+ return rewriteValue386_Op386MOVBstore(v)
+ case Op386MOVBstoreconst:
+ return rewriteValue386_Op386MOVBstoreconst(v)
+ case Op386MOVLload:
+ return rewriteValue386_Op386MOVLload(v)
+ case Op386MOVLstore:
+ return rewriteValue386_Op386MOVLstore(v)
+ case Op386MOVLstoreconst:
+ return rewriteValue386_Op386MOVLstoreconst(v)
+ case Op386MOVSDconst:
+ return rewriteValue386_Op386MOVSDconst(v)
+ case Op386MOVSDload:
+ return rewriteValue386_Op386MOVSDload(v)
+ case Op386MOVSDstore:
+ return rewriteValue386_Op386MOVSDstore(v)
+ case Op386MOVSSconst:
+ return rewriteValue386_Op386MOVSSconst(v)
+ case Op386MOVSSload:
+ return rewriteValue386_Op386MOVSSload(v)
+ case Op386MOVSSstore:
+ return rewriteValue386_Op386MOVSSstore(v)
+ case Op386MOVWLSX:
+ return rewriteValue386_Op386MOVWLSX(v)
+ case Op386MOVWLSXload:
+ return rewriteValue386_Op386MOVWLSXload(v)
+ case Op386MOVWLZX:
+ return rewriteValue386_Op386MOVWLZX(v)
+ case Op386MOVWload:
+ return rewriteValue386_Op386MOVWload(v)
+ case Op386MOVWstore:
+ return rewriteValue386_Op386MOVWstore(v)
+ case Op386MOVWstoreconst:
+ return rewriteValue386_Op386MOVWstoreconst(v)
+ case Op386MULL:
+ return rewriteValue386_Op386MULL(v)
+ case Op386MULLconst:
+ return rewriteValue386_Op386MULLconst(v)
+ case Op386MULLload:
+ return rewriteValue386_Op386MULLload(v)
+ case Op386MULSD:
+ return rewriteValue386_Op386MULSD(v)
+ case Op386MULSDload:
+ return rewriteValue386_Op386MULSDload(v)
+ case Op386MULSS:
+ return rewriteValue386_Op386MULSS(v)
+ case Op386MULSSload:
+ return rewriteValue386_Op386MULSSload(v)
+ case Op386NEGL:
+ return rewriteValue386_Op386NEGL(v)
+ case Op386NOTL:
+ return rewriteValue386_Op386NOTL(v)
+ case Op386ORL:
+ return rewriteValue386_Op386ORL(v)
+ case Op386ORLconst:
+ return rewriteValue386_Op386ORLconst(v)
+ case Op386ORLconstmodify:
+ return rewriteValue386_Op386ORLconstmodify(v)
+ case Op386ORLload:
+ return rewriteValue386_Op386ORLload(v)
+ case Op386ORLmodify:
+ return rewriteValue386_Op386ORLmodify(v)
+ case Op386ROLBconst:
+ return rewriteValue386_Op386ROLBconst(v)
+ case Op386ROLLconst:
+ return rewriteValue386_Op386ROLLconst(v)
+ case Op386ROLWconst:
+ return rewriteValue386_Op386ROLWconst(v)
+ case Op386SARB:
+ return rewriteValue386_Op386SARB(v)
+ case Op386SARBconst:
+ return rewriteValue386_Op386SARBconst(v)
+ case Op386SARL:
+ return rewriteValue386_Op386SARL(v)
+ case Op386SARLconst:
+ return rewriteValue386_Op386SARLconst(v)
+ case Op386SARW:
+ return rewriteValue386_Op386SARW(v)
+ case Op386SARWconst:
+ return rewriteValue386_Op386SARWconst(v)
+ case Op386SBBL:
+ return rewriteValue386_Op386SBBL(v)
+ case Op386SBBLcarrymask:
+ return rewriteValue386_Op386SBBLcarrymask(v)
+ case Op386SETA:
+ return rewriteValue386_Op386SETA(v)
+ case Op386SETAE:
+ return rewriteValue386_Op386SETAE(v)
+ case Op386SETB:
+ return rewriteValue386_Op386SETB(v)
+ case Op386SETBE:
+ return rewriteValue386_Op386SETBE(v)
+ case Op386SETEQ:
+ return rewriteValue386_Op386SETEQ(v)
+ case Op386SETG:
+ return rewriteValue386_Op386SETG(v)
+ case Op386SETGE:
+ return rewriteValue386_Op386SETGE(v)
+ case Op386SETL:
+ return rewriteValue386_Op386SETL(v)
+ case Op386SETLE:
+ return rewriteValue386_Op386SETLE(v)
+ case Op386SETNE:
+ return rewriteValue386_Op386SETNE(v)
+ case Op386SHLL:
+ return rewriteValue386_Op386SHLL(v)
+ case Op386SHLLconst:
+ return rewriteValue386_Op386SHLLconst(v)
+ case Op386SHRB:
+ return rewriteValue386_Op386SHRB(v)
+ case Op386SHRBconst:
+ return rewriteValue386_Op386SHRBconst(v)
+ case Op386SHRL:
+ return rewriteValue386_Op386SHRL(v)
+ case Op386SHRLconst:
+ return rewriteValue386_Op386SHRLconst(v)
+ case Op386SHRW:
+ return rewriteValue386_Op386SHRW(v)
+ case Op386SHRWconst:
+ return rewriteValue386_Op386SHRWconst(v)
+ case Op386SUBL:
+ return rewriteValue386_Op386SUBL(v)
+ case Op386SUBLcarry:
+ return rewriteValue386_Op386SUBLcarry(v)
+ case Op386SUBLconst:
+ return rewriteValue386_Op386SUBLconst(v)
+ case Op386SUBLload:
+ return rewriteValue386_Op386SUBLload(v)
+ case Op386SUBLmodify:
+ return rewriteValue386_Op386SUBLmodify(v)
+ case Op386SUBSD:
+ return rewriteValue386_Op386SUBSD(v)
+ case Op386SUBSDload:
+ return rewriteValue386_Op386SUBSDload(v)
+ case Op386SUBSS:
+ return rewriteValue386_Op386SUBSS(v)
+ case Op386SUBSSload:
+ return rewriteValue386_Op386SUBSSload(v)
+ case Op386XORL:
+ return rewriteValue386_Op386XORL(v)
+ case Op386XORLconst:
+ return rewriteValue386_Op386XORLconst(v)
+ case Op386XORLconstmodify:
+ return rewriteValue386_Op386XORLconstmodify(v)
+ case Op386XORLload:
+ return rewriteValue386_Op386XORLload(v)
+ case Op386XORLmodify:
+ return rewriteValue386_Op386XORLmodify(v)
+ case OpAdd16:
+ v.Op = Op386ADDL
+ return true
+ case OpAdd32:
+ v.Op = Op386ADDL
+ return true
+ case OpAdd32F:
+ v.Op = Op386ADDSS
+ return true
+ case OpAdd32carry:
+ v.Op = Op386ADDLcarry
+ return true
+ case OpAdd32withcarry:
+ v.Op = Op386ADCL
+ return true
+ case OpAdd64F:
+ v.Op = Op386ADDSD
+ return true
+ case OpAdd8:
+ v.Op = Op386ADDL
+ return true
+ case OpAddPtr:
+ v.Op = Op386ADDL
+ return true
+ case OpAddr:
+ return rewriteValue386_OpAddr(v)
+ case OpAnd16:
+ v.Op = Op386ANDL
+ return true
+ case OpAnd32:
+ v.Op = Op386ANDL
+ return true
+ case OpAnd8:
+ v.Op = Op386ANDL
+ return true
+ case OpAndB:
+ v.Op = Op386ANDL
+ return true
+ case OpAvg32u:
+ v.Op = Op386AVGLU
+ return true
+ case OpBswap32:
+ v.Op = Op386BSWAPL
+ return true
+ case OpClosureCall:
+ v.Op = Op386CALLclosure
+ return true
+ case OpCom16:
+ v.Op = Op386NOTL
+ return true
+ case OpCom32:
+ v.Op = Op386NOTL
+ return true
+ case OpCom8:
+ v.Op = Op386NOTL
+ return true
+ case OpConst16:
+ return rewriteValue386_OpConst16(v)
+ case OpConst32:
+ v.Op = Op386MOVLconst
+ return true
+ case OpConst32F:
+ v.Op = Op386MOVSSconst
+ return true
+ case OpConst64F:
+ v.Op = Op386MOVSDconst
+ return true
+ case OpConst8:
+ return rewriteValue386_OpConst8(v)
+ case OpConstBool:
+ return rewriteValue386_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValue386_OpConstNil(v)
+ case OpCtz16:
+ return rewriteValue386_OpCtz16(v)
+ case OpCtz16NonZero:
+ v.Op = Op386BSFL
+ return true
+ case OpCvt32Fto32:
+ v.Op = Op386CVTTSS2SL
+ return true
+ case OpCvt32Fto64F:
+ v.Op = Op386CVTSS2SD
+ return true
+ case OpCvt32to32F:
+ v.Op = Op386CVTSL2SS
+ return true
+ case OpCvt32to64F:
+ v.Op = Op386CVTSL2SD
+ return true
+ case OpCvt64Fto32:
+ v.Op = Op386CVTTSD2SL
+ return true
+ case OpCvt64Fto32F:
+ v.Op = Op386CVTSD2SS
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ v.Op = Op386DIVW
+ return true
+ case OpDiv16u:
+ v.Op = Op386DIVWU
+ return true
+ case OpDiv32:
+ v.Op = Op386DIVL
+ return true
+ case OpDiv32F:
+ v.Op = Op386DIVSS
+ return true
+ case OpDiv32u:
+ v.Op = Op386DIVLU
+ return true
+ case OpDiv64F:
+ v.Op = Op386DIVSD
+ return true
+ case OpDiv8:
+ return rewriteValue386_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValue386_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValue386_OpEq16(v)
+ case OpEq32:
+ return rewriteValue386_OpEq32(v)
+ case OpEq32F:
+ return rewriteValue386_OpEq32F(v)
+ case OpEq64F:
+ return rewriteValue386_OpEq64F(v)
+ case OpEq8:
+ return rewriteValue386_OpEq8(v)
+ case OpEqB:
+ return rewriteValue386_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValue386_OpEqPtr(v)
+ case OpGetCallerPC:
+ v.Op = Op386LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = Op386LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = Op386LoweredGetClosurePtr
+ return true
+ case OpGetG:
+ v.Op = Op386LoweredGetG
+ return true
+ case OpHmul32:
+ v.Op = Op386HMULL
+ return true
+ case OpHmul32u:
+ v.Op = Op386HMULLU
+ return true
+ case OpInterCall:
+ v.Op = Op386CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValue386_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValue386_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValue386_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValue386_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValue386_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValue386_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValue386_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValue386_OpLeq32U(v)
+ case OpLeq64F:
+ return rewriteValue386_OpLeq64F(v)
+ case OpLeq8:
+ return rewriteValue386_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValue386_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValue386_OpLess16(v)
+ case OpLess16U:
+ return rewriteValue386_OpLess16U(v)
+ case OpLess32:
+ return rewriteValue386_OpLess32(v)
+ case OpLess32F:
+ return rewriteValue386_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValue386_OpLess32U(v)
+ case OpLess64F:
+ return rewriteValue386_OpLess64F(v)
+ case OpLess8:
+ return rewriteValue386_OpLess8(v)
+ case OpLess8U:
+ return rewriteValue386_OpLess8U(v)
+ case OpLoad:
+ return rewriteValue386_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValue386_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValue386_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValue386_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValue386_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValue386_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValue386_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValue386_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValue386_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValue386_OpLsh32x8(v)
+ case OpLsh8x16:
+ return rewriteValue386_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValue386_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValue386_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValue386_OpLsh8x8(v)
+ case OpMod16:
+ v.Op = Op386MODW
+ return true
+ case OpMod16u:
+ v.Op = Op386MODWU
+ return true
+ case OpMod32:
+ v.Op = Op386MODL
+ return true
+ case OpMod32u:
+ v.Op = Op386MODLU
+ return true
+ case OpMod8:
+ return rewriteValue386_OpMod8(v)
+ case OpMod8u:
+ return rewriteValue386_OpMod8u(v)
+ case OpMove:
+ return rewriteValue386_OpMove(v)
+ case OpMul16:
+ v.Op = Op386MULL
+ return true
+ case OpMul32:
+ v.Op = Op386MULL
+ return true
+ case OpMul32F:
+ v.Op = Op386MULSS
+ return true
+ case OpMul32uhilo:
+ v.Op = Op386MULLQU
+ return true
+ case OpMul64F:
+ v.Op = Op386MULSD
+ return true
+ case OpMul8:
+ v.Op = Op386MULL
+ return true
+ case OpNeg16:
+ v.Op = Op386NEGL
+ return true
+ case OpNeg32:
+ v.Op = Op386NEGL
+ return true
+ case OpNeg32F:
+ return rewriteValue386_OpNeg32F(v)
+ case OpNeg64F:
+ return rewriteValue386_OpNeg64F(v)
+ case OpNeg8:
+ v.Op = Op386NEGL
+ return true
+ case OpNeq16:
+ return rewriteValue386_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValue386_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValue386_OpNeq32F(v)
+ case OpNeq64F:
+ return rewriteValue386_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValue386_OpNeq8(v)
+ case OpNeqB:
+ return rewriteValue386_OpNeqB(v)
+ case OpNeqPtr:
+ return rewriteValue386_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = Op386LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValue386_OpNot(v)
+ case OpOffPtr:
+ return rewriteValue386_OpOffPtr(v)
+ case OpOr16:
+ v.Op = Op386ORL
+ return true
+ case OpOr32:
+ v.Op = Op386ORL
+ return true
+ case OpOr8:
+ v.Op = Op386ORL
+ return true
+ case OpOrB:
+ v.Op = Op386ORL
+ return true
+ case OpPanicBounds:
+ return rewriteValue386_OpPanicBounds(v)
+ case OpPanicExtend:
+ return rewriteValue386_OpPanicExtend(v)
+ case OpRotateLeft16:
+ return rewriteValue386_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValue386_OpRotateLeft32(v)
+ case OpRotateLeft8:
+ return rewriteValue386_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValue386_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValue386_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValue386_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValue386_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValue386_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValue386_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValue386_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValue386_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValue386_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValue386_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValue386_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValue386_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValue386_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValue386_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValue386_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValue386_OpRsh32x8(v)
+ case OpRsh8Ux16:
+ return rewriteValue386_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValue386_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValue386_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValue386_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValue386_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValue386_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValue386_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValue386_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValue386_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValue386_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = Op386MOVWLSX
+ return true
+ case OpSignExt8to16:
+ v.Op = Op386MOVBLSX
+ return true
+ case OpSignExt8to32:
+ v.Op = Op386MOVBLSX
+ return true
+ case OpSignmask:
+ return rewriteValue386_OpSignmask(v)
+ case OpSlicemask:
+ return rewriteValue386_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = Op386SQRTSD
+ return true
+ case OpStaticCall:
+ v.Op = Op386CALLstatic
+ return true
+ case OpStore:
+ return rewriteValue386_OpStore(v)
+ case OpSub16:
+ v.Op = Op386SUBL
+ return true
+ case OpSub32:
+ v.Op = Op386SUBL
+ return true
+ case OpSub32F:
+ v.Op = Op386SUBSS
+ return true
+ case OpSub32carry:
+ v.Op = Op386SUBLcarry
+ return true
+ case OpSub32withcarry:
+ v.Op = Op386SBBL
+ return true
+ case OpSub64F:
+ v.Op = Op386SUBSD
+ return true
+ case OpSub8:
+ v.Op = Op386SUBL
+ return true
+ case OpSubPtr:
+ v.Op = Op386SUBL
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = Op386LoweredWB
+ return true
+ case OpXor16:
+ v.Op = Op386XORL
+ return true
+ case OpXor32:
+ v.Op = Op386XORL
+ return true
+ case OpXor8:
+ v.Op = Op386XORL
+ return true
+ case OpZero:
+ return rewriteValue386_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = Op386MOVWLZX
+ return true
+ case OpZeroExt8to16:
+ v.Op = Op386MOVBLZX
+ return true
+ case OpZeroExt8to32:
+ v.Op = Op386MOVBLZX
+ return true
+ case OpZeromask:
+ return rewriteValue386_OpZeromask(v)
+ }
+ return false
+}
+func rewriteValue386_Op386ADCL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADCL x (MOVLconst [c]) f)
+ // result: (ADCLconst [c] x f)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ f := v_2
+ v.reset(Op386ADCLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, f)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDL x (MOVLconst [c]))
+ // result: (ADDLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL (SHLLconst [c] x) (SHRLconst [d] x))
+ // cond: d == 32-c
+ // result: (ROLLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRLconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(Op386ROLLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d]))
+ // cond: c < 16 && d == int16(16-c) && t.Size() == 2
+ // result: (ROLWconst x [int16(c)])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRWconst {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) {
+ continue
+ }
+ v.reset(Op386ROLWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d]))
+ // cond: c < 8 && d == int8(8-c) && t.Size() == 1
+ // result: (ROLBconst x [int8(c)])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRBconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) {
+ continue
+ }
+ v.reset(Op386ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [3] y))
+ // result: (LEAL8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [2] y))
+ // result: (LEAL4 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL4)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [1] y))
+ // result: (LEAL2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (ADDL y y))
+ // result: (LEAL2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386ADDL {
+ continue
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] {
+ continue
+ }
+ v.reset(Op386LEAL2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (ADDL x y))
+ // result: (LEAL2 y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386ADDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(Op386LEAL2)
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ADDL (ADDLconst [c] x) y)
+ // result: (LEAL1 [c] x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386ADDLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (LEAL [c] {s} y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386LEAL {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ s := auxToSym(v_1.Aux)
+ y := v_1.Args[0]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ADDLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (NEGL y))
+ // result: (SUBL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386NEGL {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SUBL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLcarry(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDLcarry x (MOVLconst [c]))
+ // result: (ADDLconstcarry [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ADDLconstcarry)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDLconst [c] (ADDL x y))
+ // result: (LEAL1 [c] x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL [d] {s} x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [c] x:(SP))
+ // result: (LEAL [c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL1 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL2 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL2 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL4 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL4 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL8 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL8 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDLconst [c] (ADDLconst [d] x))
+ // result: (ADDLconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (ADDLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ADDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ADDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ADDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDSDload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSDload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ADDSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDSDload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ADDSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDSSload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSSload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ADDSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDSSload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ADDSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDL x (MOVLconst [c]))
+ // result: (ANDLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ANDLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ANDLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDLconst [c] (ANDLconst [d] x))
+ // result: (ANDLconst [c & d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [c] _)
+ // cond: c==0
+ // result: (MOVLconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(c == 0) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (ANDLconst [c] x)
+ // cond: c==-1
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == -1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ANDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (ANDLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2)) {
+ break
+ }
+ v.reset(Op386ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ANDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ANDLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ANDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ANDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ANDLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ANDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPB x (MOVLconst [c]))
+ // result: (CMPBconst x [int8(c)])
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386CMPBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPB (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPBconst x [int8(c)]))
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPB x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPB y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPBload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386CMPBload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPBload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(l.Pos, Op386CMPBload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) == y) {
+ break
+ }
+ v.reset(Op386FlagEQ)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<y && uint8(x)<uint8(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<y && uint8(x)>uint8(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_UGT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>y && uint8(x)<uint8(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>y && uint8(x)>uint8(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_UGT)
+ return true
+ }
+ // match: (CMPBconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int8(m) && int8(m) < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= int8(m) && int8(m) < n) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst l:(ANDL x y) [0])
+ // cond: l.Uses==1
+ // result: (TESTB x y)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDL {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPBconst l:(ANDLconst [c] x) [0])
+ // cond: l.Uses==1
+ // result: (TESTBconst [int8(c)] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPBconst x [0])
+ // result: (TESTB x x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(Op386TESTB)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l)
+ // result: @l.Block (CMPBconstload {sym} [makeValAndOff32(int32(c),int32(off))] ptr mem)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ l := v_0
+ if l.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, Op386CMPBconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), int32(off)))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
+ // cond: validValAndOff(int64(int8(c)),int64(off))
+ // result: (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ if !(validValAndOff(int64(int8(c)), int64(off))) {
+ break
+ }
+ v.reset(Op386CMPBconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPL x (MOVLconst [c]))
+ // result: (CMPLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386CMPLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPL (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPLconst x [c]))
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPL x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPL y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPLload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != Op386MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386CMPLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPLload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(l.Pos, Op386CMPLload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(Op386FlagEQ)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x<y && uint32(x)<uint32(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x<y && uint32(x)>uint32(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_UGT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x>y && uint32(x)<uint32(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x>y && uint32(x)>uint32(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_UGT)
+ return true
+ }
+ // match: (CMPLconst (SHRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386SHRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst l:(ANDL x y) [0])
+ // cond: l.Uses==1
+ // result: (TESTL x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDL {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPLconst l:(ANDLconst [c] x) [0])
+ // cond: l.Uses==1
+ // result: (TESTLconst [c] x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPLconst x [0])
+ // result: (TESTL x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(Op386TESTL)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l)
+ // result: @l.Block (CMPLconstload {sym} [makeValAndOff32(int32(c),int32(off))] ptr mem)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ l := v_0
+ if l.Op != Op386MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, Op386CMPLconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), int32(off)))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
+ // cond: validValAndOff(int64(c),int64(off))
+ // result: (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ if !(validValAndOff(int64(c), int64(off))) {
+ break
+ }
+ v.reset(Op386CMPLconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVLconst [c]))
+ // result: (CMPWconst x [int16(c)])
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386CMPWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPWconst x [int16(c)]))
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v0.AuxInt = int16ToAuxInt(int16(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPWload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386CMPWload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPWload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(l.Pos, Op386CMPWload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) == y) {
+ break
+ }
+ v.reset(Op386FlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<y && uint16(x)<uint16(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<y && uint16(x)>uint16(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_UGT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>y && uint16(x)<uint16(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>y && uint16(x)>uint16(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_UGT)
+ return true
+ }
+ // match: (CMPWconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int16(m) && int16(m) < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= int16(m) && int16(m) < n) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst l:(ANDL x y) [0])
+ // cond: l.Uses==1
+ // result: (TESTW x y)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDL {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWconst l:(ANDLconst [c] x) [0])
+ // cond: l.Uses==1
+ // result: (TESTWconst [int16(c)] x)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWconst x [0])
+ // result: (TESTW x x)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(Op386TESTW)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l)
+ // result: @l.Block (CMPWconstload {sym} [makeValAndOff32(int32(c),int32(off))] ptr mem)
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ l := v_0
+ if l.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, Op386CMPWconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), int32(off)))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
+ // cond: validValAndOff(int64(int16(c)),int64(off))
+ // result: (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ if !(validValAndOff(int64(int16(c)), int64(off))) {
+ break
+ }
+ v.reset(Op386CMPWconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386DIVSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (DIVSDload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSDload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386DIVSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386DIVSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (DIVSDload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (DIVSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386DIVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (DIVSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386DIVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386DIVSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (DIVSSload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSSload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386DIVSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386DIVSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (DIVSSload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (DIVSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386DIVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (DIVSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386DIVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LEAL [c] {s} (ADDLconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL [c] {s} (ADDL x y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL [off1] {sym1} (LEAL [off2] {sym2} x))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL2 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL4 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL8 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL1(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386ADDLconst {
+ continue
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL2 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [2] y))
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [3] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386LEAL {
+ continue
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ continue
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ continue
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (LEAL1 [0] {nil} x y)
+ // result: (ADDL x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || auxToSym(v.Aux) != nil {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(Op386ADDL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL2(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
+ // result: (LEAL2 [c+2*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(c + 2*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [2] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y))
+ // cond: is32Bit(int64(off1)+2*int64(off2))
+ // result: (LEAL4 [off1+2*off2] {sym} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ if auxToSym(v_1.Aux) != nil {
+ break
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1) + 2*int64(off2))) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(off1 + 2*off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
+ // result: (LEAL4 [c+4*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c + 4*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y))
+ // cond: is32Bit(int64(off1)+4*int64(off2))
+ // result: (LEAL8 [off1+4*off2] {sym} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ if auxToSym(v_1.Aux) != nil {
+ break
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1) + 4*int64(off2))) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(off1 + 4*off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL8 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
+ // result: (LEAL8 [c+8*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c + 8*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBLSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBLSX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVBLSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBLSX (ANDLconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDLconst [c & 0x7f] x)
+ for {
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBLSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBLSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(Op386MOVBLSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBLSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBLZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBLZX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBLZX (ANDLconst [c] x))
+ // result: (ANDLconst [c & 0xff] x)
+ for {
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0xff)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBLZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(Op386MOVBLZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstore [off] {sym} ptr (MOVBLSX x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVBLSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVBLZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // cond: validOff(int64(off))
+ // result: (MOVBstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ if !(validOff(int64(off))) {
+ break
+ }
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != Op386SHRWconst || auxIntToInt16(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != Op386SHRLconst || auxIntToInt32(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != Op386SHRWconst || auxIntToInt16(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != Op386SHRLconst || auxIntToInt32(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != Op386SHRLconst {
+ break
+ }
+ j := auxIntToInt32(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != Op386SHRLconst || auxIntToInt32(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRWconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != Op386SHRWconst || auxIntToInt16(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRLconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != Op386SHRLconst || auxIntToInt32(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p0 w x:(MOVBstore {s} [i] p1 (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ w := v_1
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p1 := x.Args[0]
+ x_1 := x.Args[1]
+ if x_1.Op != Op386SHRWconst || auxIntToInt16(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p0 w x:(MOVBstore {s} [i] p1 (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ w := v_1
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p1 := x.Args[0]
+ x_1 := x.Args[1]
+ if x_1.Op != Op386SHRLconst || auxIntToInt32(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRLconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != Op386SHRLconst {
+ break
+ }
+ j := auxIntToInt32(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != Op386SHRLconst || auxIntToInt32(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sc.canAdd32(off)) {
+ break
+ }
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != Op386MOVBstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), a.Off32()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != Op386MOVBstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), a.Off32()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [c] {s} p1 x:(MOVBstoreconst [a] {s} p0 mem))
+ // cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ x := v_1
+ if x.Op != Op386MOVBstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ p0 := x.Args[0]
+ if !(x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), a.Off32()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [a] {s} p0 x:(MOVBstoreconst [c] {s} p1 mem))
+ // cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff32(int32(a.Val()&0xff | c.Val()<<8), a.Off32())] {s} p0 mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ x := v_1
+ if x.Op != Op386MOVBstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ p1 := x.Args[0]
+ if !(x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xff|c.Val()<<8), a.Off32()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVLload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // cond: validOff(int64(off))
+ // result: (MOVLstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ if !(validOff(int64(off))) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ADDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ANDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386ORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386XORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ADDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ADDL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(Op386ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (SUBLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386SUBL {
+ break
+ }
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(Op386SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ANDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ANDL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(Op386ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ORL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(Op386ORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386XORL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(Op386XORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off))
+ // result: (ADDLconstmodify [makeValAndOff32(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ADDLconst {
+ break
+ }
+ c := auxIntToInt32(y.AuxInt)
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(int64(c), int64(off))) {
+ break
+ }
+ v.reset(Op386ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off))
+ // result: (ANDLconstmodify [makeValAndOff32(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(y.AuxInt)
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(int64(c), int64(off))) {
+ break
+ }
+ v.reset(Op386ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off))
+ // result: (ORLconstmodify [makeValAndOff32(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ORLconst {
+ break
+ }
+ c := auxIntToInt32(y.AuxInt)
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(int64(c), int64(off))) {
+ break
+ }
+ v.reset(Op386ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) && validValAndOff(int64(c),int64(off))
+ // result: (XORLconstmodify [makeValAndOff32(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386XORLconst {
+ break
+ }
+ c := auxIntToInt32(y.AuxInt)
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l) && validValAndOff(int64(c), int64(off))) {
+ break
+ }
+ v.reset(Op386XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVLstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sc.canAdd32(off)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDconst(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVSDconst [c])
+ // cond: config.ctxt.Flag_shared
+ // result: (MOVSDconst2 (MOVSDconst1 [c]))
+ for {
+ c := auxIntToFloat64(v.AuxInt)
+ if !(config.ctxt.Flag_shared) {
+ break
+ }
+ v.reset(Op386MOVSDconst2)
+ v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, typ.UInt32)
+ v0.AuxInt = float64ToAuxInt(c)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSconst(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVSSconst [c])
+ // cond: config.ctxt.Flag_shared
+ // result: (MOVSSconst2 (MOVSSconst1 [c]))
+ for {
+ c := auxIntToFloat32(v.AuxInt)
+ if !(config.ctxt.Flag_shared) {
+ break
+ }
+ v.reset(Op386MOVSSconst2)
+ v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, typ.UInt32)
+ v0.AuxInt = float32ToAuxInt(c)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWLSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVWLSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWLSX (ANDLconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDLconst [c & 0x7fff] x)
+ for {
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWLSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWLSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(Op386MOVWLSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWLSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWLZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVWload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWLZX (ANDLconst [c] x))
+ // result: (ANDLconst [c & 0xffff] x)
+ for {
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWLZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(Op386MOVWLZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVWLSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVWLZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // cond: validOff(int64(off))
+ // result: (MOVWstoreconst [makeValAndOff32(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ if !(validOff(int64(off))) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != Op386SHRLconst || auxIntToInt32(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != Op386SHRLconst {
+ break
+ }
+ j := auxIntToInt32(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != Op386SHRLconst || auxIntToInt32(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p1 (SHRLconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != Op386SHRLconst || auxIntToInt32(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p1 (SHRLconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != Op386SHRLconst {
+ break
+ }
+ j := auxIntToInt32(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != Op386SHRLconst || auxIntToInt32(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sc.canAdd32(off)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != Op386MOVWstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), a.Off32()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != Op386MOVWstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), a.Off32()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [c] {s} p1 x:(MOVWstoreconst [a] {s} p0 mem))
+ // cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ x := v_1
+ if x.Op != Op386MOVWstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ p0 := x.Args[0]
+ if !(x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), a.Off32()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [a] {s} p0 x:(MOVWstoreconst [c] {s} p1 mem))
+ // cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff32(int32(a.Val()&0xffff | c.Val()<<16), a.Off32())] {s} p0 mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ x := v_1
+ if x.Op != Op386MOVWstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ p1 := x.Args[0]
+ if !(x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(a.Val()&0xffff|c.Val()<<16), a.Off32()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULL x (MOVLconst [c]))
+ // result: (MULLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386MULLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386MULLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386MULLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLconst [c] (MULLconst [d] x))
+ // result: (MULLconst [c * d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MULLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386MULLconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [-9] x)
+ // result: (NEGL (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -9 {
+ break
+ }
+ x := v_0
+ v.reset(Op386NEGL)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-5] x)
+ // result: (NEGL (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -5 {
+ break
+ }
+ x := v_0
+ v.reset(Op386NEGL)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-3] x)
+ // result: (NEGL (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -3 {
+ break
+ }
+ x := v_0
+ v.reset(Op386NEGL)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-1] x)
+ // result: (NEGL x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(Op386NEGL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [0] _)
+ // result: (MOVLconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (MULLconst [1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (MULLconst [3] x)
+ // result: (LEAL2 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL2)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [5] x)
+ // result: (LEAL4 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL4)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [7] x)
+ // result: (LEAL2 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 7 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL2)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [9] x)
+ // result: (LEAL8 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 9 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [11] x)
+ // result: (LEAL2 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 11 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL2)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [13] x)
+ // result: (LEAL4 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 13 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL4)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [19] x)
+ // result: (LEAL2 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 19 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL2)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [21] x)
+ // result: (LEAL4 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 21 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL4)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [25] x)
+ // result: (LEAL8 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 25 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [27] x)
+ // result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 27 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [37] x)
+ // result: (LEAL4 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 37 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL4)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [41] x)
+ // result: (LEAL8 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 41 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [45] x)
+ // result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 45 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [73] x)
+ // result: (LEAL8 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 73 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [81] x)
+ // result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 81 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c+1) && c >= 15
+ // result: (SUBL (SHLLconst <v.Type> [int32(log32(c+1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c+1) && c >= 15) {
+ break
+ }
+ v.reset(Op386SUBL)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-1) && c >= 17
+ // result: (LEAL1 (SHLLconst <v.Type> [int32(log32(c-1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-1) && c >= 17) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-2) && c >= 34
+ // result: (LEAL2 (SHLLconst <v.Type> [int32(log32(c-2))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-2) && c >= 34) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 2)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-4) && c >= 68
+ // result: (LEAL4 (SHLLconst <v.Type> [int32(log32(c-4))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-4) && c >= 68) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 4)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-8) && c >= 136
+ // result: (LEAL8 (SHLLconst <v.Type> [int32(log32(c-8))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-8) && c >= 136) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 8)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (SHLLconst [int32(log32(c/3))] (LEAL2 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (SHLLconst [int32(log32(c/5))] (LEAL4 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (SHLLconst [int32(log32(c/9))] (LEAL8 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c*d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MULLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MULLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MULLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MULLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULSDload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSDload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386MULSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386MULSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MULSDload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MULSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MULSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULSSload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSSload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386MULSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386MULSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MULSSload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MULSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MULSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386NEGL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGL (MOVLconst [c]))
+ // result: (MOVLconst [-c])
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386NOTL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NOTL (MOVLconst [c]))
+ // result: (MOVLconst [^c])
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(^c)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORL x (MOVLconst [c]))
+ // result: (ORLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ORLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: ( ORL (SHLLconst [c] x) (SHRLconst [d] x))
+ // cond: d == 32-c
+ // result: (ROLLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRLconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(Op386ROLLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: ( ORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
+ // cond: c < 16 && d == int16(16-c) && t.Size() == 2
+ // result: (ROLWconst x [int16(c)])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRWconst {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) {
+ continue
+ }
+ v.reset(Op386ROLWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: ( ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
+ // cond: c < 8 && d == int8(8-c) && t.Size() == 1
+ // result: (ROLBconst x [int8(c)])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRBconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) {
+ continue
+ }
+ v.reset(Op386ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ORLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ORLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORL x0:(MOVBload [i0] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, s0)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != Op386MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ s0 := v_1
+ if s0.Op != Op386SHLLconst || auxIntToInt32(s0.AuxInt) != 8 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, Op386MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL x0:(MOVBload [i] {s} p0 mem) s0:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, s0)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != Op386MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ s0 := v_1
+ if s0.Op != Op386SHLLconst || auxIntToInt32(s0.AuxInt) != 8 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, Op386MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL o0:(ORL x0:(MOVWload [i0] {s} p mem) s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem))) s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem)))
+ // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != Op386ORL {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ x0 := o0_0
+ if x0.Op != Op386MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ s0 := o0_1
+ if s0.Op != Op386SHLLconst || auxIntToInt32(s0.AuxInt) != 16 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ s1 := v_1
+ if s1.Op != Op386SHLLconst || auxIntToInt32(s1.AuxInt) != 24 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != Op386MOVBload {
+ continue
+ }
+ i3 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, Op386MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL o0:(ORL x0:(MOVWload [i] {s} p0 mem) s0:(SHLLconst [16] x1:(MOVBload [i] {s} p1 mem))) s1:(SHLLconst [24] x2:(MOVBload [i] {s} p2 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && sequentialAddresses(p0, p1, 2) && sequentialAddresses(p1, p2, 1) && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != Op386ORL {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ x0 := o0_0
+ if x0.Op != Op386MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ s0 := o0_1
+ if s0.Op != Op386SHLLconst || auxIntToInt32(s0.AuxInt) != 16 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] {
+ continue
+ }
+ s1 := v_1
+ if s1.Op != Op386SHLLconst || auxIntToInt32(s1.AuxInt) != 24 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != Op386MOVBload || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ p2 := x2.Args[0]
+ if mem != x2.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && sequentialAddresses(p0, p1, 2) && sequentialAddresses(p1, p2, 1) && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, Op386MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ORLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORLconst [c] _)
+ // cond: c==-1
+ // result: (MOVLconst [-1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(c == -1) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (ORLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (ORLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2)) {
+ break
+ }
+ v.reset(Op386ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ORLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ORLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLBconst [c] (ROLBconst [d] x))
+ // result: (ROLBconst [(c+d)& 7] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386ROLBconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ROLBconst)
+ v.AuxInt = int8ToAuxInt((c + d) & 7)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLLconst [c] (ROLLconst [d] x))
+ // result: (ROLLconst [(c+d)&31] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ROLLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ROLLconst)
+ v.AuxInt = int32ToAuxInt((c + d) & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLLconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLWconst [c] (ROLWconst [d] x))
+ // result: (ROLWconst [(c+d)&15] x)
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386ROLWconst {
+ break
+ }
+ d := auxIntToInt16(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ROLWconst)
+ v.AuxInt = int16ToAuxInt((c + d) & 15)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLWconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARB x (MOVLconst [c]))
+ // result: (SARBconst [int8(min(int64(c&31),7))] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SARBconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c&31), 7)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARBconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARL x (MOVLconst [c]))
+ // result: (SARLconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARL x (ANDLconst [31] y))
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARW x (MOVLconst [c]))
+ // result: (SARWconst [int16(min(int64(c&31),15))] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SARWconst)
+ v.AuxInt = int16ToAuxInt(int16(min(int64(c&31), 15)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARWconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SBBL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBBL x (MOVLconst [c]) f)
+ // result: (SBBLconst [c] x f)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ f := v_2
+ v.reset(Op386SBBLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, f)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SBBLcarrymask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SBBLcarrymask (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_ULT))
+ // result: (MOVLconst [-1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_ULT))
+ // result: (MOVLconst [-1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETA (InvertFlags x))
+ // result: (SETB x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETB)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETA (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETA (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETAE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETAE (InvertFlags x))
+ // result: (SETBE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETBE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETAE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETAE (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETAE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETAE (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETAE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETB(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETB (InvertFlags x))
+ // result: (SETA x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETA)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETB (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETB (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETB (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETB (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETB (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETBE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETBE (InvertFlags x))
+ // result: (SETAE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETAE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETBE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETBE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETEQ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETEQ (InvertFlags x))
+ // result: (SETEQ x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETEQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETEQ (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETEQ (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETG (InvertFlags x))
+ // result: (SETL x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETG (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETG (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETGE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETGE (InvertFlags x))
+ // result: (SETLE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETLE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETGE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETGE (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETGE (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETGE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETGE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETL (InvertFlags x))
+ // result: (SETG x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETL (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETL (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETL (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETL (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETL (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETLE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETLE (InvertFlags x))
+ // result: (SETGE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETGE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETLE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETLE (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETNE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETNE (InvertFlags x))
+ // result: (SETNE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETNE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETNE (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETNE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHLL x (MOVLconst [c]))
+ // result: (SHLLconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLL x (ANDLconst [31] y))
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHLLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRB x (MOVLconst [c]))
+ // cond: c&31 < 8
+ // result: (SHRBconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 < 8) {
+ break
+ }
+ v.reset(Op386SHRBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRB _ (MOVLconst [c]))
+ // cond: c&31 >= 8
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 >= 8) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRL x (MOVLconst [c]))
+ // result: (SHRLconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SHRLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRL x (ANDLconst [31] y))
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRW x (MOVLconst [c]))
+ // cond: c&31 < 16
+ // result: (SHRWconst [int16(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 < 16) {
+ break
+ }
+ v.reset(Op386SHRWconst)
+ v.AuxInt = int16ToAuxInt(int16(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRW _ (MOVLconst [c]))
+ // cond: c&31 >= 16
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 >= 16) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBL x (MOVLconst [c]))
+ // result: (SUBLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SUBLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBL (MOVLconst [c]) x)
+ // result: (NEGL (SUBLconst <v.Type> x [c]))
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(Op386NEGL)
+ v0 := b.NewValue0(v.Pos, Op386SUBLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBLload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386SUBLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (SUBL x x)
+ // result: (MOVLconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBLcarry(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBLcarry x (MOVLconst [c]))
+ // result: (SUBLconstcarry [c] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SUBLconstcarry)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBLconst [c] x)
+ // result: (ADDLconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_Op386SUBLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SUBLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386SUBLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SUBLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SUBLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBSDload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSDload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386SUBSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SUBSDload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386SUBSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBSSload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSSload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386SUBSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SUBSSload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386SUBSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORL x (MOVLconst [c]))
+ // result: (XORLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386XORLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL (SHLLconst [c] x) (SHRLconst [d] x))
+ // cond: d == 32-c
+ // result: (ROLLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRLconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(Op386ROLLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
+ // cond: c < 16 && d == int16(16-c) && t.Size() == 2
+ // result: (ROLWconst x [int16(c)])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRWconst {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) {
+ continue
+ }
+ v.reset(Op386ROLWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
+ // cond: c < 8 && d == int8(8-c) && t.Size() == 1
+ // result: (ROLBconst x [int8(c)])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRBconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) {
+ continue
+ }
+ v.reset(Op386ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (XORLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386XORLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (XORL x x)
+ // result: (MOVLconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORLconst [c] (XORLconst [d] x))
+ // result: (XORLconst [c ^ d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386XORLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386XORLconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (XORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (XORLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2)) {
+ break
+ }
+ v.reset(Op386XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (XORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (XORLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386XORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386XORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (XORLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386XORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (XORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386XORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (LEAL {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(Op386LEAL)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValue386_OpConst16(v *Value) bool {
+ // match: (Const16 [c])
+ // result: (MOVLconst [int32(c)])
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+}
+func rewriteValue386_OpConst8(v *Value) bool {
+ // match: (Const8 [c])
+ // result: (MOVLconst [int32(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+}
+func rewriteValue386_OpConstBool(v *Value) bool {
+ // match: (ConstBool [c])
+ // result: (MOVLconst [b2i32(c)])
+ for {
+ c := auxIntToBool(v.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(b2i32(c))
+ return true
+ }
+}
+func rewriteValue386_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVLconst [0])
+ for {
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValue386_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 x)
+ // result: (BSFL (ORLconst <typ.UInt32> [0x10000] x))
+ for {
+ x := v_0
+ v.reset(Op386BSFL)
+ v0 := b.NewValue0(v.Pos, Op386ORLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0x10000)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (SignExt8to16 x) (SignExt8to16 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValue386_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386DIVWU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValue386_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq16 x y)
+ // result: (SETEQ (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (SETEQF (UCOMISS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (SETEQF (UCOMISD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq8 x y)
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqB x y)
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (SETB (CMPL idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil p)
+ // result: (SETNE (TESTL p p))
+ for {
+ p := v_0
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386TESTL, types.TypeFlags)
+ v0.AddArg2(p, p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (SETBE (CMPL idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16 x y)
+ // result: (SETLE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16U x y)
+ // result: (SETBE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (SETLE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (SETGEF (UCOMISS y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32U x y)
+ // result: (SETBE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (SETGEF (UCOMISD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8 x y)
+ // result: (SETLE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8U x y)
+ // result: (SETBE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16 x y)
+ // result: (SETL (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16U x y)
+ // result: (SETB (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (SETL (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (SETGF (UCOMISS y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32U x y)
+ // result: (SETB (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (SETGF (UCOMISD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8 x y)
+ // result: (SETL (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8U x y)
+ // result: (SETB (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVLload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVSSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVSDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (LEAL {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(Op386LEAL)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValue386_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SHLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SHLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SHLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (MODW (SignExt8to16 x) (SignExt8to16 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386MODW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValue386_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386MODWU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValue386_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVWstore)
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVLstore dst (MOVLload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVLstore)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVLstore [4] dst (MOVLload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && s%4 != 0
+ // result: (Move [s-s%4] (ADDLconst <dst.Type> dst [int32(s%4)]) (ADDLconst <src.Type> src [int32(s%4)]) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && s%4 != 0) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%4)
+ v0 := b.NewValue0(v.Pos, Op386ADDLconst, dst.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s % 4))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, Op386ADDLconst, src.Type)
+ v1.AuxInt = int32ToAuxInt(int32(s % 4))
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [10*(128-s/4)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(Op386DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(10 * (128 - s/4))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s)
+ // result: (REPMOVSL dst src (MOVLconst [int32(s/4)]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !((s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(Op386REPMOVSL)
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(s / 4))
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpNeg32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg32F x)
+ // result: (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
+ for {
+ x := v_0
+ v.reset(Op386PXOR)
+ v0 := b.NewValue0(v.Pos, Op386MOVSSconst, typ.Float32)
+ v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeg64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg64F x)
+ // result: (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
+ for {
+ x := v_0
+ v.reset(Op386PXOR)
+ v0 := b.NewValue0(v.Pos, Op386MOVSDconst, typ.Float64)
+ v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq16 x y)
+ // result: (SETNE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (SETNEF (UCOMISS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNEF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (SETNEF (UCOMISD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNEF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq8 x y)
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqB x y)
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORLconst [1] x)
+ for {
+ x := v_0
+ v.reset(Op386XORLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr)
+ // result: (ADDLconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValue386_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(Op386LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(Op386LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(Op386LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpPanicExtend(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicExtendA [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(Op386LoweredPanicExtendA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicExtendB [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(Op386LoweredPanicExtendB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicExtendC [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(Op386LoweredPanicExtendC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft16 x (MOVLconst [c]))
+ // result: (ROLWconst [int16(c&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ROLWconst)
+ v.AuxInt = int16ToAuxInt(int16(c & 15))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft32 x (MOVLconst [c]))
+ // result: (ROLLconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ROLLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft8 x (MOVLconst [c]))
+ // result: (ROLBconst [int8(c&7)] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 7))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRW)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRW)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SHRWconst x [int16(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SHRWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRW)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SARWconst x [int16(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SARWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SARWconst x [15])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(Op386SARWconst)
+ v.AuxInt = int16ToAuxInt(15)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SHRLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SHRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SARLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SARLconst x [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRB)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRB)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SHRBconst x [int8(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SHRBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRB)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SARBconst x [int8(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SARBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SARBconst x [7])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(Op386SARBconst)
+ v.AuxInt = int8ToAuxInt(7)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Mul32uover x y))
+ // result: (Select0 <typ.UInt32> (MULLU x y))
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Mul32uover x y))
+ // result: (SETO (Select1 <types.TypeFlags> (MULLU x y)))
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(Op386SETO)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpSignmask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Signmask x)
+ // result: (SARLconst x [31])
+ for {
+ x := v_0
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SARLconst (NEGL <t> x) [31])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, Op386NEGL, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (MOVSDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (MOVSSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4
+ // result: (MOVLstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (MOVBstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (MOVWstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (MOVLstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff32(0,2)] destptr (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 2))
+ v0 := b.NewValue0(v.Pos, Op386MOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (MOVWstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff32(0,3)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 3))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%4 != 0 && s > 4
+ // result: (Zero [s-s%4] (ADDLconst destptr [int32(s%4)]) (MOVLstoreconst [0] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%4 != 0 && s > 4) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%4)
+ v0 := b.NewValue0(v.Pos, Op386ADDLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(s % 4))
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(0)
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [8] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [12] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff32(0,8)] destptr (MOVLstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 8))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4))
+ v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [16] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff32(0,12)] destptr (MOVLstoreconst [makeValAndOff32(0,8)] destptr (MOVLstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 12))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 8))
+ v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4))
+ v2 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v2.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v2.AddArg2(destptr, mem)
+ v1.AddArg2(destptr, v2)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 16 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [1*(128-s/4)] destptr (MOVLconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 16 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(Op386DUFFZERO)
+ v.AuxInt = int64ToAuxInt(1 * (128 - s/4))
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: (s > 4*128 || (config.noDuffDevice && s > 16)) && s%4 == 0
+ // result: (REPSTOSL destptr (MOVLconst [int32(s/4)]) (MOVLconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !((s > 4*128 || (config.noDuffDevice && s > 16)) && s%4 == 0) {
+ break
+ }
+ v.reset(Op386REPSTOSL)
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(s / 4))
+ v1 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg4(destptr, v0, v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpZeromask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Zeromask <t> x)
+ // result: (XORLconst [-1] (SBBLcarrymask <t> (CMPLconst x [1])))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(Op386XORLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ v0 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v1 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlock386(b *Block) bool {
+ switch b.Kind {
+ case Block386EQ:
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386EQ, cmp)
+ return true
+ }
+ // match: (EQ (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case Block386GE:
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386LE, cmp)
+ return true
+ }
+ // match: (GE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case Block386GT:
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386LT, cmp)
+ return true
+ }
+ // match: (GT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockIf:
+ // match: (If (SETL cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == Op386SETL {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386LT, cmp)
+ return true
+ }
+ // match: (If (SETLE cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == Op386SETLE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386LE, cmp)
+ return true
+ }
+ // match: (If (SETG cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == Op386SETG {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386GT, cmp)
+ return true
+ }
+ // match: (If (SETGE cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == Op386SETGE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386GE, cmp)
+ return true
+ }
+ // match: (If (SETEQ cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == Op386SETEQ {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386EQ, cmp)
+ return true
+ }
+ // match: (If (SETNE cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == Op386SETNE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386NE, cmp)
+ return true
+ }
+ // match: (If (SETB cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == Op386SETB {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386ULT, cmp)
+ return true
+ }
+ // match: (If (SETBE cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == Op386SETBE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386ULE, cmp)
+ return true
+ }
+ // match: (If (SETA cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386SETA {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (If (SETAE cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386SETAE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (If (SETO cmp) yes no)
+ // result: (OS cmp yes no)
+ for b.Controls[0].Op == Op386SETO {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386OS, cmp)
+ return true
+ }
+ // match: (If (SETGF cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386SETGF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (If (SETGEF cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386SETGEF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (If (SETEQF cmp) yes no)
+ // result: (EQF cmp yes no)
+ for b.Controls[0].Op == Op386SETEQF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386EQF, cmp)
+ return true
+ }
+ // match: (If (SETNEF cmp) yes no)
+ // result: (NEF cmp yes no)
+ for b.Controls[0].Op == Op386SETNEF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386NEF, cmp)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (NE (TESTB cond cond) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, Op386TESTB, types.TypeFlags)
+ v0.AddArg2(cond, cond)
+ b.resetWithControl(Block386NE, v0)
+ return true
+ }
+ case Block386LE:
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386GE, cmp)
+ return true
+ }
+ // match: (LE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case Block386LT:
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386GT, cmp)
+ return true
+ }
+ // match: (LT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case Block386NE:
+ // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETL {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETL || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386LT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETLE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETLE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386LE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETG {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETG || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386GT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETGE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETGE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386GE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETEQ {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETEQ || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386EQ, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETNE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETNE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386NE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETB {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETB || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386ULT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETBE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETBE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386ULE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETA {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETA || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETAE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETAE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
+ // result: (OS cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETO {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETO || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386OS, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETGF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETGF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETGEF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETGEF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
+ // result: (EQF cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETEQF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETEQF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386EQF, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
+ // result: (NEF cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETNEF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETNEF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386NEF, cmp)
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386NE, cmp)
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case Block386UGE:
+ // match: (UGE (InvertFlags cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386ULE, cmp)
+ return true
+ }
+ // match: (UGE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case Block386UGT:
+ // match: (UGT (InvertFlags cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386ULT, cmp)
+ return true
+ }
+ // match: (UGT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGT (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case Block386ULE:
+ // match: (ULE (InvertFlags cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (ULE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case Block386ULT:
+ // match: (ULT (InvertFlags cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (ULT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite386splitload.go b/src/cmd/compile/internal/ssa/rewrite386splitload.go
new file mode 100644
index 0000000..fff26fa
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewrite386splitload.go
@@ -0,0 +1,162 @@
+// Code generated from gen/386splitload.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+func rewriteValue386splitload(v *Value) bool {
+ switch v.Op {
+ case Op386CMPBconstload:
+ return rewriteValue386splitload_Op386CMPBconstload(v)
+ case Op386CMPBload:
+ return rewriteValue386splitload_Op386CMPBload(v)
+ case Op386CMPLconstload:
+ return rewriteValue386splitload_Op386CMPLconstload(v)
+ case Op386CMPLload:
+ return rewriteValue386splitload_Op386CMPLload(v)
+ case Op386CMPWconstload:
+ return rewriteValue386splitload_Op386CMPWconstload(v)
+ case Op386CMPWload:
+ return rewriteValue386splitload_Op386CMPWload(v)
+ }
+ return false
+}
+func rewriteValue386splitload_Op386CMPBconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBconstload {sym} [vo] ptr mem)
+ // result: (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ v.reset(Op386CMPBconst)
+ v.AuxInt = int8ToAuxInt(vo.Val8())
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBload {sym} [off] ptr x mem)
+ // result: (CMPB (MOVBload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(Op386CMPB)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPLconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLconstload {sym} [vo] ptr mem)
+ // result: (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ v.reset(Op386CMPLconst)
+ v.AuxInt = int32ToAuxInt(vo.Val32())
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLload {sym} [off] ptr x mem)
+ // result: (CMPL (MOVLload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(Op386CMPL)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPWconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWconstload {sym} [vo] ptr mem)
+ // result: (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ v.reset(Op386CMPWconst)
+ v.AuxInt = int16ToAuxInt(vo.Val16())
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWload {sym} [off] ptr x mem)
+ // result: (CMPW (MOVWload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(Op386CMPW)
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteBlock386splitload(b *Block) bool {
+ switch b.Kind {
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
new file mode 100644
index 0000000..8272a40
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -0,0 +1,35989 @@
+// Code generated from gen/AMD64.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValueAMD64(v *Value) bool {
+ switch v.Op {
+ case OpAMD64ADCQ:
+ return rewriteValueAMD64_OpAMD64ADCQ(v)
+ case OpAMD64ADCQconst:
+ return rewriteValueAMD64_OpAMD64ADCQconst(v)
+ case OpAMD64ADDL:
+ return rewriteValueAMD64_OpAMD64ADDL(v)
+ case OpAMD64ADDLconst:
+ return rewriteValueAMD64_OpAMD64ADDLconst(v)
+ case OpAMD64ADDLconstmodify:
+ return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
+ case OpAMD64ADDLload:
+ return rewriteValueAMD64_OpAMD64ADDLload(v)
+ case OpAMD64ADDLmodify:
+ return rewriteValueAMD64_OpAMD64ADDLmodify(v)
+ case OpAMD64ADDQ:
+ return rewriteValueAMD64_OpAMD64ADDQ(v)
+ case OpAMD64ADDQcarry:
+ return rewriteValueAMD64_OpAMD64ADDQcarry(v)
+ case OpAMD64ADDQconst:
+ return rewriteValueAMD64_OpAMD64ADDQconst(v)
+ case OpAMD64ADDQconstmodify:
+ return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
+ case OpAMD64ADDQload:
+ return rewriteValueAMD64_OpAMD64ADDQload(v)
+ case OpAMD64ADDQmodify:
+ return rewriteValueAMD64_OpAMD64ADDQmodify(v)
+ case OpAMD64ADDSD:
+ return rewriteValueAMD64_OpAMD64ADDSD(v)
+ case OpAMD64ADDSDload:
+ return rewriteValueAMD64_OpAMD64ADDSDload(v)
+ case OpAMD64ADDSS:
+ return rewriteValueAMD64_OpAMD64ADDSS(v)
+ case OpAMD64ADDSSload:
+ return rewriteValueAMD64_OpAMD64ADDSSload(v)
+ case OpAMD64ANDL:
+ return rewriteValueAMD64_OpAMD64ANDL(v)
+ case OpAMD64ANDLconst:
+ return rewriteValueAMD64_OpAMD64ANDLconst(v)
+ case OpAMD64ANDLconstmodify:
+ return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
+ case OpAMD64ANDLload:
+ return rewriteValueAMD64_OpAMD64ANDLload(v)
+ case OpAMD64ANDLmodify:
+ return rewriteValueAMD64_OpAMD64ANDLmodify(v)
+ case OpAMD64ANDQ:
+ return rewriteValueAMD64_OpAMD64ANDQ(v)
+ case OpAMD64ANDQconst:
+ return rewriteValueAMD64_OpAMD64ANDQconst(v)
+ case OpAMD64ANDQconstmodify:
+ return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
+ case OpAMD64ANDQload:
+ return rewriteValueAMD64_OpAMD64ANDQload(v)
+ case OpAMD64ANDQmodify:
+ return rewriteValueAMD64_OpAMD64ANDQmodify(v)
+ case OpAMD64BSFQ:
+ return rewriteValueAMD64_OpAMD64BSFQ(v)
+ case OpAMD64BTCLconst:
+ return rewriteValueAMD64_OpAMD64BTCLconst(v)
+ case OpAMD64BTCLconstmodify:
+ return rewriteValueAMD64_OpAMD64BTCLconstmodify(v)
+ case OpAMD64BTCLmodify:
+ return rewriteValueAMD64_OpAMD64BTCLmodify(v)
+ case OpAMD64BTCQconst:
+ return rewriteValueAMD64_OpAMD64BTCQconst(v)
+ case OpAMD64BTCQconstmodify:
+ return rewriteValueAMD64_OpAMD64BTCQconstmodify(v)
+ case OpAMD64BTCQmodify:
+ return rewriteValueAMD64_OpAMD64BTCQmodify(v)
+ case OpAMD64BTLconst:
+ return rewriteValueAMD64_OpAMD64BTLconst(v)
+ case OpAMD64BTQconst:
+ return rewriteValueAMD64_OpAMD64BTQconst(v)
+ case OpAMD64BTRLconst:
+ return rewriteValueAMD64_OpAMD64BTRLconst(v)
+ case OpAMD64BTRLconstmodify:
+ return rewriteValueAMD64_OpAMD64BTRLconstmodify(v)
+ case OpAMD64BTRLmodify:
+ return rewriteValueAMD64_OpAMD64BTRLmodify(v)
+ case OpAMD64BTRQconst:
+ return rewriteValueAMD64_OpAMD64BTRQconst(v)
+ case OpAMD64BTRQconstmodify:
+ return rewriteValueAMD64_OpAMD64BTRQconstmodify(v)
+ case OpAMD64BTRQmodify:
+ return rewriteValueAMD64_OpAMD64BTRQmodify(v)
+ case OpAMD64BTSLconst:
+ return rewriteValueAMD64_OpAMD64BTSLconst(v)
+ case OpAMD64BTSLconstmodify:
+ return rewriteValueAMD64_OpAMD64BTSLconstmodify(v)
+ case OpAMD64BTSLmodify:
+ return rewriteValueAMD64_OpAMD64BTSLmodify(v)
+ case OpAMD64BTSQconst:
+ return rewriteValueAMD64_OpAMD64BTSQconst(v)
+ case OpAMD64BTSQconstmodify:
+ return rewriteValueAMD64_OpAMD64BTSQconstmodify(v)
+ case OpAMD64BTSQmodify:
+ return rewriteValueAMD64_OpAMD64BTSQmodify(v)
+ case OpAMD64CMOVLCC:
+ return rewriteValueAMD64_OpAMD64CMOVLCC(v)
+ case OpAMD64CMOVLCS:
+ return rewriteValueAMD64_OpAMD64CMOVLCS(v)
+ case OpAMD64CMOVLEQ:
+ return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
+ case OpAMD64CMOVLGE:
+ return rewriteValueAMD64_OpAMD64CMOVLGE(v)
+ case OpAMD64CMOVLGT:
+ return rewriteValueAMD64_OpAMD64CMOVLGT(v)
+ case OpAMD64CMOVLHI:
+ return rewriteValueAMD64_OpAMD64CMOVLHI(v)
+ case OpAMD64CMOVLLE:
+ return rewriteValueAMD64_OpAMD64CMOVLLE(v)
+ case OpAMD64CMOVLLS:
+ return rewriteValueAMD64_OpAMD64CMOVLLS(v)
+ case OpAMD64CMOVLLT:
+ return rewriteValueAMD64_OpAMD64CMOVLLT(v)
+ case OpAMD64CMOVLNE:
+ return rewriteValueAMD64_OpAMD64CMOVLNE(v)
+ case OpAMD64CMOVQCC:
+ return rewriteValueAMD64_OpAMD64CMOVQCC(v)
+ case OpAMD64CMOVQCS:
+ return rewriteValueAMD64_OpAMD64CMOVQCS(v)
+ case OpAMD64CMOVQEQ:
+ return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
+ case OpAMD64CMOVQGE:
+ return rewriteValueAMD64_OpAMD64CMOVQGE(v)
+ case OpAMD64CMOVQGT:
+ return rewriteValueAMD64_OpAMD64CMOVQGT(v)
+ case OpAMD64CMOVQHI:
+ return rewriteValueAMD64_OpAMD64CMOVQHI(v)
+ case OpAMD64CMOVQLE:
+ return rewriteValueAMD64_OpAMD64CMOVQLE(v)
+ case OpAMD64CMOVQLS:
+ return rewriteValueAMD64_OpAMD64CMOVQLS(v)
+ case OpAMD64CMOVQLT:
+ return rewriteValueAMD64_OpAMD64CMOVQLT(v)
+ case OpAMD64CMOVQNE:
+ return rewriteValueAMD64_OpAMD64CMOVQNE(v)
+ case OpAMD64CMOVWCC:
+ return rewriteValueAMD64_OpAMD64CMOVWCC(v)
+ case OpAMD64CMOVWCS:
+ return rewriteValueAMD64_OpAMD64CMOVWCS(v)
+ case OpAMD64CMOVWEQ:
+ return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
+ case OpAMD64CMOVWGE:
+ return rewriteValueAMD64_OpAMD64CMOVWGE(v)
+ case OpAMD64CMOVWGT:
+ return rewriteValueAMD64_OpAMD64CMOVWGT(v)
+ case OpAMD64CMOVWHI:
+ return rewriteValueAMD64_OpAMD64CMOVWHI(v)
+ case OpAMD64CMOVWLE:
+ return rewriteValueAMD64_OpAMD64CMOVWLE(v)
+ case OpAMD64CMOVWLS:
+ return rewriteValueAMD64_OpAMD64CMOVWLS(v)
+ case OpAMD64CMOVWLT:
+ return rewriteValueAMD64_OpAMD64CMOVWLT(v)
+ case OpAMD64CMOVWNE:
+ return rewriteValueAMD64_OpAMD64CMOVWNE(v)
+ case OpAMD64CMPB:
+ return rewriteValueAMD64_OpAMD64CMPB(v)
+ case OpAMD64CMPBconst:
+ return rewriteValueAMD64_OpAMD64CMPBconst(v)
+ case OpAMD64CMPBconstload:
+ return rewriteValueAMD64_OpAMD64CMPBconstload(v)
+ case OpAMD64CMPBload:
+ return rewriteValueAMD64_OpAMD64CMPBload(v)
+ case OpAMD64CMPL:
+ return rewriteValueAMD64_OpAMD64CMPL(v)
+ case OpAMD64CMPLconst:
+ return rewriteValueAMD64_OpAMD64CMPLconst(v)
+ case OpAMD64CMPLconstload:
+ return rewriteValueAMD64_OpAMD64CMPLconstload(v)
+ case OpAMD64CMPLload:
+ return rewriteValueAMD64_OpAMD64CMPLload(v)
+ case OpAMD64CMPQ:
+ return rewriteValueAMD64_OpAMD64CMPQ(v)
+ case OpAMD64CMPQconst:
+ return rewriteValueAMD64_OpAMD64CMPQconst(v)
+ case OpAMD64CMPQconstload:
+ return rewriteValueAMD64_OpAMD64CMPQconstload(v)
+ case OpAMD64CMPQload:
+ return rewriteValueAMD64_OpAMD64CMPQload(v)
+ case OpAMD64CMPW:
+ return rewriteValueAMD64_OpAMD64CMPW(v)
+ case OpAMD64CMPWconst:
+ return rewriteValueAMD64_OpAMD64CMPWconst(v)
+ case OpAMD64CMPWconstload:
+ return rewriteValueAMD64_OpAMD64CMPWconstload(v)
+ case OpAMD64CMPWload:
+ return rewriteValueAMD64_OpAMD64CMPWload(v)
+ case OpAMD64CMPXCHGLlock:
+ return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
+ case OpAMD64CMPXCHGQlock:
+ return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
+ case OpAMD64DIVSD:
+ return rewriteValueAMD64_OpAMD64DIVSD(v)
+ case OpAMD64DIVSDload:
+ return rewriteValueAMD64_OpAMD64DIVSDload(v)
+ case OpAMD64DIVSS:
+ return rewriteValueAMD64_OpAMD64DIVSS(v)
+ case OpAMD64DIVSSload:
+ return rewriteValueAMD64_OpAMD64DIVSSload(v)
+ case OpAMD64HMULL:
+ return rewriteValueAMD64_OpAMD64HMULL(v)
+ case OpAMD64HMULLU:
+ return rewriteValueAMD64_OpAMD64HMULLU(v)
+ case OpAMD64HMULQ:
+ return rewriteValueAMD64_OpAMD64HMULQ(v)
+ case OpAMD64HMULQU:
+ return rewriteValueAMD64_OpAMD64HMULQU(v)
+ case OpAMD64LEAL:
+ return rewriteValueAMD64_OpAMD64LEAL(v)
+ case OpAMD64LEAL1:
+ return rewriteValueAMD64_OpAMD64LEAL1(v)
+ case OpAMD64LEAL2:
+ return rewriteValueAMD64_OpAMD64LEAL2(v)
+ case OpAMD64LEAL4:
+ return rewriteValueAMD64_OpAMD64LEAL4(v)
+ case OpAMD64LEAL8:
+ return rewriteValueAMD64_OpAMD64LEAL8(v)
+ case OpAMD64LEAQ:
+ return rewriteValueAMD64_OpAMD64LEAQ(v)
+ case OpAMD64LEAQ1:
+ return rewriteValueAMD64_OpAMD64LEAQ1(v)
+ case OpAMD64LEAQ2:
+ return rewriteValueAMD64_OpAMD64LEAQ2(v)
+ case OpAMD64LEAQ4:
+ return rewriteValueAMD64_OpAMD64LEAQ4(v)
+ case OpAMD64LEAQ8:
+ return rewriteValueAMD64_OpAMD64LEAQ8(v)
+ case OpAMD64MOVBQSX:
+ return rewriteValueAMD64_OpAMD64MOVBQSX(v)
+ case OpAMD64MOVBQSXload:
+ return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
+ case OpAMD64MOVBQZX:
+ return rewriteValueAMD64_OpAMD64MOVBQZX(v)
+ case OpAMD64MOVBatomicload:
+ return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
+ case OpAMD64MOVBload:
+ return rewriteValueAMD64_OpAMD64MOVBload(v)
+ case OpAMD64MOVBstore:
+ return rewriteValueAMD64_OpAMD64MOVBstore(v)
+ case OpAMD64MOVBstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
+ case OpAMD64MOVLQSX:
+ return rewriteValueAMD64_OpAMD64MOVLQSX(v)
+ case OpAMD64MOVLQSXload:
+ return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
+ case OpAMD64MOVLQZX:
+ return rewriteValueAMD64_OpAMD64MOVLQZX(v)
+ case OpAMD64MOVLatomicload:
+ return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
+ case OpAMD64MOVLf2i:
+ return rewriteValueAMD64_OpAMD64MOVLf2i(v)
+ case OpAMD64MOVLi2f:
+ return rewriteValueAMD64_OpAMD64MOVLi2f(v)
+ case OpAMD64MOVLload:
+ return rewriteValueAMD64_OpAMD64MOVLload(v)
+ case OpAMD64MOVLstore:
+ return rewriteValueAMD64_OpAMD64MOVLstore(v)
+ case OpAMD64MOVLstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
+ case OpAMD64MOVOload:
+ return rewriteValueAMD64_OpAMD64MOVOload(v)
+ case OpAMD64MOVOstore:
+ return rewriteValueAMD64_OpAMD64MOVOstore(v)
+ case OpAMD64MOVQatomicload:
+ return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
+ case OpAMD64MOVQf2i:
+ return rewriteValueAMD64_OpAMD64MOVQf2i(v)
+ case OpAMD64MOVQi2f:
+ return rewriteValueAMD64_OpAMD64MOVQi2f(v)
+ case OpAMD64MOVQload:
+ return rewriteValueAMD64_OpAMD64MOVQload(v)
+ case OpAMD64MOVQstore:
+ return rewriteValueAMD64_OpAMD64MOVQstore(v)
+ case OpAMD64MOVQstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
+ case OpAMD64MOVSDload:
+ return rewriteValueAMD64_OpAMD64MOVSDload(v)
+ case OpAMD64MOVSDstore:
+ return rewriteValueAMD64_OpAMD64MOVSDstore(v)
+ case OpAMD64MOVSSload:
+ return rewriteValueAMD64_OpAMD64MOVSSload(v)
+ case OpAMD64MOVSSstore:
+ return rewriteValueAMD64_OpAMD64MOVSSstore(v)
+ case OpAMD64MOVWQSX:
+ return rewriteValueAMD64_OpAMD64MOVWQSX(v)
+ case OpAMD64MOVWQSXload:
+ return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
+ case OpAMD64MOVWQZX:
+ return rewriteValueAMD64_OpAMD64MOVWQZX(v)
+ case OpAMD64MOVWload:
+ return rewriteValueAMD64_OpAMD64MOVWload(v)
+ case OpAMD64MOVWstore:
+ return rewriteValueAMD64_OpAMD64MOVWstore(v)
+ case OpAMD64MOVWstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
+ case OpAMD64MULL:
+ return rewriteValueAMD64_OpAMD64MULL(v)
+ case OpAMD64MULLconst:
+ return rewriteValueAMD64_OpAMD64MULLconst(v)
+ case OpAMD64MULQ:
+ return rewriteValueAMD64_OpAMD64MULQ(v)
+ case OpAMD64MULQconst:
+ return rewriteValueAMD64_OpAMD64MULQconst(v)
+ case OpAMD64MULSD:
+ return rewriteValueAMD64_OpAMD64MULSD(v)
+ case OpAMD64MULSDload:
+ return rewriteValueAMD64_OpAMD64MULSDload(v)
+ case OpAMD64MULSS:
+ return rewriteValueAMD64_OpAMD64MULSS(v)
+ case OpAMD64MULSSload:
+ return rewriteValueAMD64_OpAMD64MULSSload(v)
+ case OpAMD64NEGL:
+ return rewriteValueAMD64_OpAMD64NEGL(v)
+ case OpAMD64NEGQ:
+ return rewriteValueAMD64_OpAMD64NEGQ(v)
+ case OpAMD64NOTL:
+ return rewriteValueAMD64_OpAMD64NOTL(v)
+ case OpAMD64NOTQ:
+ return rewriteValueAMD64_OpAMD64NOTQ(v)
+ case OpAMD64ORL:
+ return rewriteValueAMD64_OpAMD64ORL(v)
+ case OpAMD64ORLconst:
+ return rewriteValueAMD64_OpAMD64ORLconst(v)
+ case OpAMD64ORLconstmodify:
+ return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
+ case OpAMD64ORLload:
+ return rewriteValueAMD64_OpAMD64ORLload(v)
+ case OpAMD64ORLmodify:
+ return rewriteValueAMD64_OpAMD64ORLmodify(v)
+ case OpAMD64ORQ:
+ return rewriteValueAMD64_OpAMD64ORQ(v)
+ case OpAMD64ORQconst:
+ return rewriteValueAMD64_OpAMD64ORQconst(v)
+ case OpAMD64ORQconstmodify:
+ return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
+ case OpAMD64ORQload:
+ return rewriteValueAMD64_OpAMD64ORQload(v)
+ case OpAMD64ORQmodify:
+ return rewriteValueAMD64_OpAMD64ORQmodify(v)
+ case OpAMD64ROLB:
+ return rewriteValueAMD64_OpAMD64ROLB(v)
+ case OpAMD64ROLBconst:
+ return rewriteValueAMD64_OpAMD64ROLBconst(v)
+ case OpAMD64ROLL:
+ return rewriteValueAMD64_OpAMD64ROLL(v)
+ case OpAMD64ROLLconst:
+ return rewriteValueAMD64_OpAMD64ROLLconst(v)
+ case OpAMD64ROLQ:
+ return rewriteValueAMD64_OpAMD64ROLQ(v)
+ case OpAMD64ROLQconst:
+ return rewriteValueAMD64_OpAMD64ROLQconst(v)
+ case OpAMD64ROLW:
+ return rewriteValueAMD64_OpAMD64ROLW(v)
+ case OpAMD64ROLWconst:
+ return rewriteValueAMD64_OpAMD64ROLWconst(v)
+ case OpAMD64RORB:
+ return rewriteValueAMD64_OpAMD64RORB(v)
+ case OpAMD64RORL:
+ return rewriteValueAMD64_OpAMD64RORL(v)
+ case OpAMD64RORQ:
+ return rewriteValueAMD64_OpAMD64RORQ(v)
+ case OpAMD64RORW:
+ return rewriteValueAMD64_OpAMD64RORW(v)
+ case OpAMD64SARB:
+ return rewriteValueAMD64_OpAMD64SARB(v)
+ case OpAMD64SARBconst:
+ return rewriteValueAMD64_OpAMD64SARBconst(v)
+ case OpAMD64SARL:
+ return rewriteValueAMD64_OpAMD64SARL(v)
+ case OpAMD64SARLconst:
+ return rewriteValueAMD64_OpAMD64SARLconst(v)
+ case OpAMD64SARQ:
+ return rewriteValueAMD64_OpAMD64SARQ(v)
+ case OpAMD64SARQconst:
+ return rewriteValueAMD64_OpAMD64SARQconst(v)
+ case OpAMD64SARW:
+ return rewriteValueAMD64_OpAMD64SARW(v)
+ case OpAMD64SARWconst:
+ return rewriteValueAMD64_OpAMD64SARWconst(v)
+ case OpAMD64SBBLcarrymask:
+ return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
+ case OpAMD64SBBQ:
+ return rewriteValueAMD64_OpAMD64SBBQ(v)
+ case OpAMD64SBBQcarrymask:
+ return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
+ case OpAMD64SBBQconst:
+ return rewriteValueAMD64_OpAMD64SBBQconst(v)
+ case OpAMD64SETA:
+ return rewriteValueAMD64_OpAMD64SETA(v)
+ case OpAMD64SETAE:
+ return rewriteValueAMD64_OpAMD64SETAE(v)
+ case OpAMD64SETAEstore:
+ return rewriteValueAMD64_OpAMD64SETAEstore(v)
+ case OpAMD64SETAstore:
+ return rewriteValueAMD64_OpAMD64SETAstore(v)
+ case OpAMD64SETB:
+ return rewriteValueAMD64_OpAMD64SETB(v)
+ case OpAMD64SETBE:
+ return rewriteValueAMD64_OpAMD64SETBE(v)
+ case OpAMD64SETBEstore:
+ return rewriteValueAMD64_OpAMD64SETBEstore(v)
+ case OpAMD64SETBstore:
+ return rewriteValueAMD64_OpAMD64SETBstore(v)
+ case OpAMD64SETEQ:
+ return rewriteValueAMD64_OpAMD64SETEQ(v)
+ case OpAMD64SETEQstore:
+ return rewriteValueAMD64_OpAMD64SETEQstore(v)
+ case OpAMD64SETG:
+ return rewriteValueAMD64_OpAMD64SETG(v)
+ case OpAMD64SETGE:
+ return rewriteValueAMD64_OpAMD64SETGE(v)
+ case OpAMD64SETGEstore:
+ return rewriteValueAMD64_OpAMD64SETGEstore(v)
+ case OpAMD64SETGstore:
+ return rewriteValueAMD64_OpAMD64SETGstore(v)
+ case OpAMD64SETL:
+ return rewriteValueAMD64_OpAMD64SETL(v)
+ case OpAMD64SETLE:
+ return rewriteValueAMD64_OpAMD64SETLE(v)
+ case OpAMD64SETLEstore:
+ return rewriteValueAMD64_OpAMD64SETLEstore(v)
+ case OpAMD64SETLstore:
+ return rewriteValueAMD64_OpAMD64SETLstore(v)
+ case OpAMD64SETNE:
+ return rewriteValueAMD64_OpAMD64SETNE(v)
+ case OpAMD64SETNEstore:
+ return rewriteValueAMD64_OpAMD64SETNEstore(v)
+ case OpAMD64SHLL:
+ return rewriteValueAMD64_OpAMD64SHLL(v)
+ case OpAMD64SHLLconst:
+ return rewriteValueAMD64_OpAMD64SHLLconst(v)
+ case OpAMD64SHLQ:
+ return rewriteValueAMD64_OpAMD64SHLQ(v)
+ case OpAMD64SHLQconst:
+ return rewriteValueAMD64_OpAMD64SHLQconst(v)
+ case OpAMD64SHRB:
+ return rewriteValueAMD64_OpAMD64SHRB(v)
+ case OpAMD64SHRBconst:
+ return rewriteValueAMD64_OpAMD64SHRBconst(v)
+ case OpAMD64SHRL:
+ return rewriteValueAMD64_OpAMD64SHRL(v)
+ case OpAMD64SHRLconst:
+ return rewriteValueAMD64_OpAMD64SHRLconst(v)
+ case OpAMD64SHRQ:
+ return rewriteValueAMD64_OpAMD64SHRQ(v)
+ case OpAMD64SHRQconst:
+ return rewriteValueAMD64_OpAMD64SHRQconst(v)
+ case OpAMD64SHRW:
+ return rewriteValueAMD64_OpAMD64SHRW(v)
+ case OpAMD64SHRWconst:
+ return rewriteValueAMD64_OpAMD64SHRWconst(v)
+ case OpAMD64SUBL:
+ return rewriteValueAMD64_OpAMD64SUBL(v)
+ case OpAMD64SUBLconst:
+ return rewriteValueAMD64_OpAMD64SUBLconst(v)
+ case OpAMD64SUBLload:
+ return rewriteValueAMD64_OpAMD64SUBLload(v)
+ case OpAMD64SUBLmodify:
+ return rewriteValueAMD64_OpAMD64SUBLmodify(v)
+ case OpAMD64SUBQ:
+ return rewriteValueAMD64_OpAMD64SUBQ(v)
+ case OpAMD64SUBQborrow:
+ return rewriteValueAMD64_OpAMD64SUBQborrow(v)
+ case OpAMD64SUBQconst:
+ return rewriteValueAMD64_OpAMD64SUBQconst(v)
+ case OpAMD64SUBQload:
+ return rewriteValueAMD64_OpAMD64SUBQload(v)
+ case OpAMD64SUBQmodify:
+ return rewriteValueAMD64_OpAMD64SUBQmodify(v)
+ case OpAMD64SUBSD:
+ return rewriteValueAMD64_OpAMD64SUBSD(v)
+ case OpAMD64SUBSDload:
+ return rewriteValueAMD64_OpAMD64SUBSDload(v)
+ case OpAMD64SUBSS:
+ return rewriteValueAMD64_OpAMD64SUBSS(v)
+ case OpAMD64SUBSSload:
+ return rewriteValueAMD64_OpAMD64SUBSSload(v)
+ case OpAMD64TESTB:
+ return rewriteValueAMD64_OpAMD64TESTB(v)
+ case OpAMD64TESTBconst:
+ return rewriteValueAMD64_OpAMD64TESTBconst(v)
+ case OpAMD64TESTL:
+ return rewriteValueAMD64_OpAMD64TESTL(v)
+ case OpAMD64TESTLconst:
+ return rewriteValueAMD64_OpAMD64TESTLconst(v)
+ case OpAMD64TESTQ:
+ return rewriteValueAMD64_OpAMD64TESTQ(v)
+ case OpAMD64TESTQconst:
+ return rewriteValueAMD64_OpAMD64TESTQconst(v)
+ case OpAMD64TESTW:
+ return rewriteValueAMD64_OpAMD64TESTW(v)
+ case OpAMD64TESTWconst:
+ return rewriteValueAMD64_OpAMD64TESTWconst(v)
+ case OpAMD64XADDLlock:
+ return rewriteValueAMD64_OpAMD64XADDLlock(v)
+ case OpAMD64XADDQlock:
+ return rewriteValueAMD64_OpAMD64XADDQlock(v)
+ case OpAMD64XCHGL:
+ return rewriteValueAMD64_OpAMD64XCHGL(v)
+ case OpAMD64XCHGQ:
+ return rewriteValueAMD64_OpAMD64XCHGQ(v)
+ case OpAMD64XORL:
+ return rewriteValueAMD64_OpAMD64XORL(v)
+ case OpAMD64XORLconst:
+ return rewriteValueAMD64_OpAMD64XORLconst(v)
+ case OpAMD64XORLconstmodify:
+ return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
+ case OpAMD64XORLload:
+ return rewriteValueAMD64_OpAMD64XORLload(v)
+ case OpAMD64XORLmodify:
+ return rewriteValueAMD64_OpAMD64XORLmodify(v)
+ case OpAMD64XORQ:
+ return rewriteValueAMD64_OpAMD64XORQ(v)
+ case OpAMD64XORQconst:
+ return rewriteValueAMD64_OpAMD64XORQconst(v)
+ case OpAMD64XORQconstmodify:
+ return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
+ case OpAMD64XORQload:
+ return rewriteValueAMD64_OpAMD64XORQload(v)
+ case OpAMD64XORQmodify:
+ return rewriteValueAMD64_OpAMD64XORQmodify(v)
+ case OpAdd16:
+ v.Op = OpAMD64ADDL
+ return true
+ case OpAdd32:
+ v.Op = OpAMD64ADDL
+ return true
+ case OpAdd32F:
+ v.Op = OpAMD64ADDSS
+ return true
+ case OpAdd64:
+ v.Op = OpAMD64ADDQ
+ return true
+ case OpAdd64F:
+ v.Op = OpAMD64ADDSD
+ return true
+ case OpAdd8:
+ v.Op = OpAMD64ADDL
+ return true
+ case OpAddPtr:
+ v.Op = OpAMD64ADDQ
+ return true
+ case OpAddr:
+ return rewriteValueAMD64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpAMD64ANDL
+ return true
+ case OpAnd32:
+ v.Op = OpAMD64ANDL
+ return true
+ case OpAnd64:
+ v.Op = OpAMD64ANDQ
+ return true
+ case OpAnd8:
+ v.Op = OpAMD64ANDL
+ return true
+ case OpAndB:
+ v.Op = OpAMD64ANDL
+ return true
+ case OpAtomicAdd32:
+ return rewriteValueAMD64_OpAtomicAdd32(v)
+ case OpAtomicAdd64:
+ return rewriteValueAMD64_OpAtomicAdd64(v)
+ case OpAtomicAnd32:
+ return rewriteValueAMD64_OpAtomicAnd32(v)
+ case OpAtomicAnd8:
+ return rewriteValueAMD64_OpAtomicAnd8(v)
+ case OpAtomicCompareAndSwap32:
+ return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
+ case OpAtomicExchange32:
+ return rewriteValueAMD64_OpAtomicExchange32(v)
+ case OpAtomicExchange64:
+ return rewriteValueAMD64_OpAtomicExchange64(v)
+ case OpAtomicLoad32:
+ return rewriteValueAMD64_OpAtomicLoad32(v)
+ case OpAtomicLoad64:
+ return rewriteValueAMD64_OpAtomicLoad64(v)
+ case OpAtomicLoad8:
+ return rewriteValueAMD64_OpAtomicLoad8(v)
+ case OpAtomicLoadPtr:
+ return rewriteValueAMD64_OpAtomicLoadPtr(v)
+ case OpAtomicOr32:
+ return rewriteValueAMD64_OpAtomicOr32(v)
+ case OpAtomicOr8:
+ return rewriteValueAMD64_OpAtomicOr8(v)
+ case OpAtomicStore32:
+ return rewriteValueAMD64_OpAtomicStore32(v)
+ case OpAtomicStore64:
+ return rewriteValueAMD64_OpAtomicStore64(v)
+ case OpAtomicStore8:
+ return rewriteValueAMD64_OpAtomicStore8(v)
+ case OpAtomicStorePtrNoWB:
+ return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
+ case OpAvg64u:
+ v.Op = OpAMD64AVGQU
+ return true
+ case OpBitLen16:
+ return rewriteValueAMD64_OpBitLen16(v)
+ case OpBitLen32:
+ return rewriteValueAMD64_OpBitLen32(v)
+ case OpBitLen64:
+ return rewriteValueAMD64_OpBitLen64(v)
+ case OpBitLen8:
+ return rewriteValueAMD64_OpBitLen8(v)
+ case OpBswap32:
+ v.Op = OpAMD64BSWAPL
+ return true
+ case OpBswap64:
+ v.Op = OpAMD64BSWAPQ
+ return true
+ case OpCeil:
+ return rewriteValueAMD64_OpCeil(v)
+ case OpClosureCall:
+ v.Op = OpAMD64CALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpAMD64NOTL
+ return true
+ case OpCom32:
+ v.Op = OpAMD64NOTL
+ return true
+ case OpCom64:
+ v.Op = OpAMD64NOTQ
+ return true
+ case OpCom8:
+ v.Op = OpAMD64NOTL
+ return true
+ case OpCondSelect:
+ return rewriteValueAMD64_OpCondSelect(v)
+ case OpConst16:
+ return rewriteValueAMD64_OpConst16(v)
+ case OpConst32:
+ v.Op = OpAMD64MOVLconst
+ return true
+ case OpConst32F:
+ v.Op = OpAMD64MOVSSconst
+ return true
+ case OpConst64:
+ v.Op = OpAMD64MOVQconst
+ return true
+ case OpConst64F:
+ v.Op = OpAMD64MOVSDconst
+ return true
+ case OpConst8:
+ return rewriteValueAMD64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueAMD64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueAMD64_OpConstNil(v)
+ case OpCtz16:
+ return rewriteValueAMD64_OpCtz16(v)
+ case OpCtz16NonZero:
+ v.Op = OpAMD64BSFL
+ return true
+ case OpCtz32:
+ return rewriteValueAMD64_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpAMD64BSFL
+ return true
+ case OpCtz64:
+ return rewriteValueAMD64_OpCtz64(v)
+ case OpCtz64NonZero:
+ return rewriteValueAMD64_OpCtz64NonZero(v)
+ case OpCtz8:
+ return rewriteValueAMD64_OpCtz8(v)
+ case OpCtz8NonZero:
+ v.Op = OpAMD64BSFL
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpAMD64CVTTSS2SL
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpAMD64CVTTSS2SQ
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpAMD64CVTSS2SD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpAMD64CVTSL2SS
+ return true
+ case OpCvt32to64F:
+ v.Op = OpAMD64CVTSL2SD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpAMD64CVTTSD2SL
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpAMD64CVTSD2SS
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpAMD64CVTTSD2SQ
+ return true
+ case OpCvt64to32F:
+ v.Op = OpAMD64CVTSQ2SS
+ return true
+ case OpCvt64to64F:
+ v.Op = OpAMD64CVTSQ2SD
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv128u:
+ v.Op = OpAMD64DIVQU2
+ return true
+ case OpDiv16:
+ return rewriteValueAMD64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueAMD64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueAMD64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpAMD64DIVSS
+ return true
+ case OpDiv32u:
+ return rewriteValueAMD64_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValueAMD64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpAMD64DIVSD
+ return true
+ case OpDiv64u:
+ return rewriteValueAMD64_OpDiv64u(v)
+ case OpDiv8:
+ return rewriteValueAMD64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueAMD64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueAMD64_OpEq16(v)
+ case OpEq32:
+ return rewriteValueAMD64_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueAMD64_OpEq32F(v)
+ case OpEq64:
+ return rewriteValueAMD64_OpEq64(v)
+ case OpEq64F:
+ return rewriteValueAMD64_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueAMD64_OpEq8(v)
+ case OpEqB:
+ return rewriteValueAMD64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueAMD64_OpEqPtr(v)
+ case OpFMA:
+ return rewriteValueAMD64_OpFMA(v)
+ case OpFloor:
+ return rewriteValueAMD64_OpFloor(v)
+ case OpGetCallerPC:
+ v.Op = OpAMD64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpAMD64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpAMD64LoweredGetClosurePtr
+ return true
+ case OpGetG:
+ v.Op = OpAMD64LoweredGetG
+ return true
+ case OpHasCPUFeature:
+ return rewriteValueAMD64_OpHasCPUFeature(v)
+ case OpHmul32:
+ v.Op = OpAMD64HMULL
+ return true
+ case OpHmul32u:
+ v.Op = OpAMD64HMULLU
+ return true
+ case OpHmul64:
+ v.Op = OpAMD64HMULQ
+ return true
+ case OpHmul64u:
+ v.Op = OpAMD64HMULQU
+ return true
+ case OpInterCall:
+ v.Op = OpAMD64CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueAMD64_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueAMD64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueAMD64_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueAMD64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueAMD64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueAMD64_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueAMD64_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueAMD64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueAMD64_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValueAMD64_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValueAMD64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueAMD64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueAMD64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueAMD64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueAMD64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueAMD64_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueAMD64_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueAMD64_OpLess32U(v)
+ case OpLess64:
+ return rewriteValueAMD64_OpLess64(v)
+ case OpLess64F:
+ return rewriteValueAMD64_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValueAMD64_OpLess64U(v)
+ case OpLess8:
+ return rewriteValueAMD64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueAMD64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueAMD64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueAMD64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueAMD64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueAMD64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueAMD64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueAMD64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueAMD64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueAMD64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueAMD64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueAMD64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueAMD64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueAMD64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueAMD64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueAMD64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueAMD64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueAMD64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueAMD64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueAMD64_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueAMD64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueAMD64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueAMD64_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueAMD64_OpMod32u(v)
+ case OpMod64:
+ return rewriteValueAMD64_OpMod64(v)
+ case OpMod64u:
+ return rewriteValueAMD64_OpMod64u(v)
+ case OpMod8:
+ return rewriteValueAMD64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueAMD64_OpMod8u(v)
+ case OpMove:
+ return rewriteValueAMD64_OpMove(v)
+ case OpMul16:
+ v.Op = OpAMD64MULL
+ return true
+ case OpMul32:
+ v.Op = OpAMD64MULL
+ return true
+ case OpMul32F:
+ v.Op = OpAMD64MULSS
+ return true
+ case OpMul64:
+ v.Op = OpAMD64MULQ
+ return true
+ case OpMul64F:
+ v.Op = OpAMD64MULSD
+ return true
+ case OpMul64uhilo:
+ v.Op = OpAMD64MULQU2
+ return true
+ case OpMul8:
+ v.Op = OpAMD64MULL
+ return true
+ case OpNeg16:
+ v.Op = OpAMD64NEGL
+ return true
+ case OpNeg32:
+ v.Op = OpAMD64NEGL
+ return true
+ case OpNeg32F:
+ return rewriteValueAMD64_OpNeg32F(v)
+ case OpNeg64:
+ v.Op = OpAMD64NEGQ
+ return true
+ case OpNeg64F:
+ return rewriteValueAMD64_OpNeg64F(v)
+ case OpNeg8:
+ v.Op = OpAMD64NEGL
+ return true
+ case OpNeq16:
+ return rewriteValueAMD64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueAMD64_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueAMD64_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValueAMD64_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValueAMD64_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueAMD64_OpNeq8(v)
+ case OpNeqB:
+ return rewriteValueAMD64_OpNeqB(v)
+ case OpNeqPtr:
+ return rewriteValueAMD64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpAMD64LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueAMD64_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueAMD64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpAMD64ORL
+ return true
+ case OpOr32:
+ v.Op = OpAMD64ORL
+ return true
+ case OpOr64:
+ v.Op = OpAMD64ORQ
+ return true
+ case OpOr8:
+ v.Op = OpAMD64ORL
+ return true
+ case OpOrB:
+ v.Op = OpAMD64ORL
+ return true
+ case OpPanicBounds:
+ return rewriteValueAMD64_OpPanicBounds(v)
+ case OpPopCount16:
+ return rewriteValueAMD64_OpPopCount16(v)
+ case OpPopCount32:
+ v.Op = OpAMD64POPCNTL
+ return true
+ case OpPopCount64:
+ v.Op = OpAMD64POPCNTQ
+ return true
+ case OpPopCount8:
+ return rewriteValueAMD64_OpPopCount8(v)
+ case OpRotateLeft16:
+ v.Op = OpAMD64ROLW
+ return true
+ case OpRotateLeft32:
+ v.Op = OpAMD64ROLL
+ return true
+ case OpRotateLeft64:
+ v.Op = OpAMD64ROLQ
+ return true
+ case OpRotateLeft8:
+ v.Op = OpAMD64ROLB
+ return true
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRoundToEven:
+ return rewriteValueAMD64_OpRoundToEven(v)
+ case OpRsh16Ux16:
+ return rewriteValueAMD64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueAMD64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueAMD64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueAMD64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueAMD64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueAMD64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueAMD64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueAMD64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueAMD64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueAMD64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueAMD64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueAMD64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueAMD64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueAMD64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueAMD64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueAMD64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueAMD64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueAMD64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueAMD64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueAMD64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueAMD64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueAMD64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueAMD64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueAMD64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueAMD64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueAMD64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueAMD64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueAMD64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueAMD64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueAMD64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueAMD64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueAMD64_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueAMD64_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueAMD64_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpAMD64MOVWQSX
+ return true
+ case OpSignExt16to64:
+ v.Op = OpAMD64MOVWQSX
+ return true
+ case OpSignExt32to64:
+ v.Op = OpAMD64MOVLQSX
+ return true
+ case OpSignExt8to16:
+ v.Op = OpAMD64MOVBQSX
+ return true
+ case OpSignExt8to32:
+ v.Op = OpAMD64MOVBQSX
+ return true
+ case OpSignExt8to64:
+ v.Op = OpAMD64MOVBQSX
+ return true
+ case OpSlicemask:
+ return rewriteValueAMD64_OpSlicemask(v)
+ case OpSpectreIndex:
+ return rewriteValueAMD64_OpSpectreIndex(v)
+ case OpSpectreSliceIndex:
+ return rewriteValueAMD64_OpSpectreSliceIndex(v)
+ case OpSqrt:
+ v.Op = OpAMD64SQRTSD
+ return true
+ case OpStaticCall:
+ v.Op = OpAMD64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValueAMD64_OpStore(v)
+ case OpSub16:
+ v.Op = OpAMD64SUBL
+ return true
+ case OpSub32:
+ v.Op = OpAMD64SUBL
+ return true
+ case OpSub32F:
+ v.Op = OpAMD64SUBSS
+ return true
+ case OpSub64:
+ v.Op = OpAMD64SUBQ
+ return true
+ case OpSub64F:
+ v.Op = OpAMD64SUBSD
+ return true
+ case OpSub8:
+ v.Op = OpAMD64SUBL
+ return true
+ case OpSubPtr:
+ v.Op = OpAMD64SUBQ
+ return true
+ case OpTrunc:
+ return rewriteValueAMD64_OpTrunc(v)
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpAMD64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpAMD64XORL
+ return true
+ case OpXor32:
+ v.Op = OpAMD64XORL
+ return true
+ case OpXor64:
+ v.Op = OpAMD64XORQ
+ return true
+ case OpXor8:
+ v.Op = OpAMD64XORL
+ return true
+ case OpZero:
+ return rewriteValueAMD64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpAMD64MOVWQZX
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpAMD64MOVWQZX
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpAMD64MOVLQZX
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpAMD64MOVBQZX
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpAMD64MOVBQZX
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpAMD64MOVBQZX
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADCQ x (MOVQconst [c]) carry)
+ // cond: is32Bit(c)
+ // result: (ADCQconst x [int32(c)] carry)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ carry := v_2
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ADCQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, carry)
+ return true
+ }
+ break
+ }
+ // match: (ADCQ x y (FlagEQ))
+ // result: (ADDQcarry x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64ADDQcarry)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADCQconst x [c] (FlagEQ))
+ // result: (ADDQconstcarry x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64ADDQconstcarry)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDL x (MOVLconst [c]))
+ // result: (ADDLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ADDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d]))
+ // cond: d==32-c
+ // result: (ROLLconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRLconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d]))
+ // cond: d==16-c && c < 16 && t.Size() == 2
+ // result: (ROLWconst x [c])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRWconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d]))
+ // cond: d==8-c && c < 8 && t.Size() == 1
+ // result: (ROLBconst x [c])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRBconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [3] y))
+ // result: (LEAL8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [2] y))
+ // result: (LEAL4 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL4)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [1] y))
+ // result: (LEAL2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (ADDL y y))
+ // result: (LEAL2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDL {
+ continue
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64LEAL2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (ADDL x y))
+ // result: (LEAL2 y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAMD64LEAL2)
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ADDL (ADDLconst [c] x) y)
+ // result: (LEAL1 [c] x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64ADDLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (LEAL [c] {s} y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64LEAL {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ s := auxToSym(v_1.Aux)
+ y := v_1.Args[0]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (NEGL y))
+ // result: (SUBL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SUBL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDLconst [c] (ADDL x y))
+ // result: (LEAL1 [c] x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ADDL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (SHLLconst [1] x))
+ // result: (LEAL1 [c] x x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL [d] {s} x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL1 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL1 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL2 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL2 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL4 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL4 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL8 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL8 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDLconst [c] (ADDLconst [d] x))
+ // result: (ADDLconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ADDLconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [off] x:(SP))
+ // result: (LEAL [off] x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = int32ToAuxInt(off)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: (ADDL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ADDL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ADDQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (MOVLconst [c]))
+ // result: (ADDQconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d]))
+ // cond: d==64-c
+ // result: (ROLQconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRQconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 64-c) {
+ continue
+ }
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (SHLQconst [3] y))
+ // result: (LEAQ8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (SHLQconst [2] y))
+ // result: (LEAQ4 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ4)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (SHLQconst [1] y))
+ // result: (LEAQ2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (ADDQ y y))
+ // result: (LEAQ2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQ {
+ continue
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (ADDQ x y))
+ // result: (LEAQ2 y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAMD64LEAQ2)
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ADDQ (ADDQconst [c] x) y)
+ // result: (LEAQ1 [c] x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64ADDQconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (LEAQ [c] {s} y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAQ1 [c] {s} x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ s := auxToSym(v_1.Aux)
+ y := v_1.Args[0]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (NEGQ y))
+ // result: (SUBQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SUBQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDQload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDQcarry x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ADDQconstcarry x [int32(c)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ADDQconstcarry)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDQconst [c] (ADDQ x y))
+ // result: (LEAQ1 [c] x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ADDQ {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [c] (SHLQconst [1] x))
+ // result: (LEAQ1 [c] x x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ [d] {s} x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ1 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ1 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ2 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ4 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ8 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) + d)
+ return true
+ }
+ // match: (ADDQconst [c] (ADDQconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (ADDQconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDQconst [off] x:(SP))
+ // result: (LEAQ [off] x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: (ADDQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ADDQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDSDload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSDload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
+ // result: (ADDSD x (MOVQi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ADDSD)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDSSload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSSload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
+ // result: (ADDSS x (MOVLi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ADDSS)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x)
+ // result: (BTRL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64NOTL {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ y := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ANDL (MOVLconst [c]) x)
+ // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
+ // result: (BTRLconst [int8(log32(^c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
+ continue
+ }
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(^c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x (MOVLconst [c]))
+ // result: (ANDLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ANDLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ANDLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDLconst [c] x)
+ // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
+ // result: (BTRLconst [int8(log32(^c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
+ break
+ }
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(^c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [c] (ANDLconst [d] x))
+ // result: (ANDLconst [c & d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [c] (BTRLconst [d] x))
+ // result: (ANDLconst [c &^ (1<<uint32(d))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64BTRLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [ 0xFF] x)
+ // result: (MOVBQZX x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xFF {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [0xFFFF] x)
+ // result: (MOVWQZX x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xFFFF {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [c] _)
+ // cond: c==0
+ // result: (MOVLconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (ANDLconst [c] x)
+ // cond: c==-1
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == -1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ANDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: (ANDL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDLmodify [off] {sym} ptr (NOTL s:(SHLL (MOVLconst [1]) <t> x)) mem)
+ // result: (BTRLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64NOTL {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64SHLL {
+ break
+ }
+ t := s.Type
+ x := s.Args[1]
+ s_0 := s.Args[0]
+ if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64BTRLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t)
+ v0.AuxInt = int32ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x)
+ // result: (BTRQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64NOTQ {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ y := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ (MOVQconst [c]) x)
+ // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128
+ // result: (BTRQconst [int8(log64(^c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) {
+ continue
+ }
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(int8(log64(^c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ANDQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ANDQload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ANDQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDQconst [c] x)
+ // cond: isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128
+ // result: (BTRQconst [int8(log32(^c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
+ break
+ }
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(^c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [c] (ANDQconst [d] x))
+ // result: (ANDQconst [c & d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [c] (BTRQconst [d] x))
+ // cond: is32Bit(int64(c) &^ (1<<uint32(d)))
+ // result: (ANDQconst [c &^ (1<<uint32(d))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64BTRQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) &^ (1 << uint32(d)))) {
+ break
+ }
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [ 0xFF] x)
+ // result: (MOVBQZX x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xFF {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [0xFFFF] x)
+ // result: (MOVWQZX x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xFFFF {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [0] _)
+ // result: (MOVQconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDQconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) & d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ANDQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: (ANDQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDQmodify [off] {sym} ptr (NOTQ s:(SHLQ (MOVQconst [1]) <t> x)) mem)
+ // result: (BTRQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64NOTQ {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64SHLQ {
+ break
+ }
+ t := s.Type
+ x := s.Args[1]
+ s_0 := s.Args[0]
+ if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64BTRQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t)
+ v0.AuxInt = int32ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ANDQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x)))
+ // result: (BSFQ (ORQconst <t> [1<<8] x))
+ for {
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt32(v_0.AuxInt) != 1<<8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpAMD64BSFQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
+ v0.AuxInt = int32ToAuxInt(1 << 8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x)))
+ // result: (BSFQ (ORQconst <t> [1<<16] x))
+ for {
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt32(v_0.AuxInt) != 1<<16 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVWQZX {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpAMD64BSFQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
+ v0.AuxInt = int32ToAuxInt(1 << 16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTCLconst [c] (XORLconst [d] x))
+ // result: (XORLconst [d ^ 1<<uint32(c)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64XORLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCLconst [c] (BTCLconst [d] x))
+ // result: (XORLconst [1<<uint32(c) | 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d^(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(d ^ (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BTCLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (BTCLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTCLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (BTCLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTCLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BTCLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (BTCLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64BTCLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (BTCLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTCLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTCQconst [c] (XORQconst [d] x))
+ // cond: is32Bit(int64(d) ^ 1<<uint32(c))
+ // result: (XORQconst [d ^ 1<<uint32(c)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64XORQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(d) ^ 1<<uint32(c))) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCQconst [c] (BTCQconst [d] x))
+ // cond: is32Bit(1<<uint32(c) ^ 1<<uint32(d))
+ // result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(1<<uint32(c) ^ 1<<uint32(d))) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = int32ToAuxInt(1<<uint32(c) ^ 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [d^(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BTCQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (BTCQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTCQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (BTCQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTCQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BTCQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (BTCQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64BTCQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (BTCQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTCQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTLconst [c] (SHRQconst [d] x))
+ // cond: (c+d)<64
+ // result: (BTQconst [c+d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !((c + d) < 64) {
+ break
+ }
+ v.reset(OpAMD64BTQconst)
+ v.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [c] (SHLQconst [d] x))
+ // cond: c>d
+ // result: (BTLconst [c-d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > d) {
+ break
+ }
+ v.reset(OpAMD64BTLconst)
+ v.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [0] s:(SHRQ x y))
+ // result: (BTQ y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRQ {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTQ)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (BTLconst [c] (SHRLconst [d] x))
+ // cond: (c+d)<32
+ // result: (BTLconst [c+d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHRLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !((c + d) < 32) {
+ break
+ }
+ v.reset(OpAMD64BTLconst)
+ v.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [c] (SHLLconst [d] x))
+ // cond: c>d
+ // result: (BTLconst [c-d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > d) {
+ break
+ }
+ v.reset(OpAMD64BTLconst)
+ v.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [0] s:(SHRL x y))
+ // result: (BTL y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRL {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTL)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTQconst [c] (SHRQconst [d] x))
+ // cond: (c+d)<64
+ // result: (BTQconst [c+d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !((c + d) < 64) {
+ break
+ }
+ v.reset(OpAMD64BTQconst)
+ v.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTQconst [c] (SHLQconst [d] x))
+ // cond: c>d
+ // result: (BTQconst [c-d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > d) {
+ break
+ }
+ v.reset(OpAMD64BTQconst)
+ v.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTQconst [0] s:(SHRQ x y))
+ // result: (BTQ y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRQ {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTQ)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTRLconst [c] (BTSLconst [c] x))
+ // result: (BTRLconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTSLconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRLconst [c] (BTCLconst [c] x))
+ // result: (BTRLconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRLconst [c] (ANDLconst [d] x))
+ // result: (ANDLconst [d &^ (1<<uint32(c))] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRLconst [c] (BTRLconst [d] x))
+ // result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTRLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d&^(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BTRLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (BTRLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTRLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (BTRLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTRLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BTRLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (BTRLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64BTRLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (BTRLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTRLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTRQconst [c] (BTSQconst [c] x))
+ // result: (BTRQconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (BTCQconst [c] x))
+ // result: (BTRQconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (ANDQconst [d] x))
+ // cond: is32Bit(int64(d) &^ (1<<uint32(c)))
+ // result: (ANDQconst [d &^ (1<<uint32(c))] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(d) &^ (1 << uint32(c)))) {
+ break
+ }
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (BTRQconst [d] x))
+ // cond: is32Bit(^(1<<uint32(c) | 1<<uint32(d)))
+ // result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTRQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(^(1<<uint32(c) | 1<<uint32(d)))) {
+ break
+ }
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [d&^(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BTRQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (BTRQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTRQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (BTRQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTRQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BTRQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (BTRQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64BTRQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (BTRQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTRQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTSLconst [c] (BTRLconst [c] x))
+ // result: (BTSLconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTRLconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSLconst [c] (BTCLconst [c] x))
+ // result: (BTSLconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSLconst [c] (ORLconst [d] x))
+ // result: (ORLconst [d | 1<<uint32(c)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ORLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSLconst [c] (BTSLconst [d] x))
+ // result: (ORLconst [1<<uint32(c) | 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTSLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d|(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(d | (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BTSLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (BTSLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTSLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (BTSLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTSLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BTSLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (BTSLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64BTSLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (BTSLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTSLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTSQconst [c] (BTRQconst [c] x))
+ // result: (BTSQconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (BTCQconst [c] x))
+ // result: (BTSQconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (ORQconst [d] x))
+ // cond: is32Bit(int64(d) | 1<<uint32(c))
+ // result: (ORQconst [d | 1<<uint32(c)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(d) | 1<<uint32(c))) {
+ break
+ }
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (BTSQconst [d] x))
+ // cond: is32Bit(1<<uint32(c) | 1<<uint32(d))
+ // result: (ORQconst [1<<uint32(c) | 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTSQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(1<<uint32(c) | 1<<uint32(d))) {
+ break
+ }
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [d|(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BTSQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (BTSQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTSQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (BTSQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTSQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BTSQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (BTSQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64BTSQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (BTSQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTSQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLCC x y (InvertFlags cond))
+ // result: (CMOVLLS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLLS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLCC _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLCC _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLCC y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLCC y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLCC _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLCS x y (InvertFlags cond))
+ // result: (CMOVLHI x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLHI)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLCS y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLCS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLCS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLCS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLCS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLEQ x y (InvertFlags cond))
+ // result: (CMOVLEQ x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLEQ)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLEQ _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLEQ y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLEQ y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLEQ y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLEQ y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLGE x y (InvertFlags cond))
+ // result: (CMOVLLE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLLE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLGE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGE y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLGE y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLGT x y (InvertFlags cond))
+ // result: (CMOVLLT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLLT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLGT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLGT _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGT _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGT y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLGT y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLHI x y (InvertFlags cond))
+ // result: (CMOVLCS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLCS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLHI y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLHI _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLHI y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLHI y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLHI _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLLE x y (InvertFlags cond))
+ // result: (CMOVLGE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLGE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLLE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLE y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLE y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLLS x y (InvertFlags cond))
+ // result: (CMOVLCC x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLCC)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLLS _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLLT x y (InvertFlags cond))
+ // result: (CMOVLGT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLGT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLLT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLT y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLT y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLT _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLT _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLNE x y (InvertFlags cond))
+ // result: (CMOVLNE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLNE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLNE y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLNE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLNE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLNE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLNE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQCC x y (InvertFlags cond))
+ // result: (CMOVQLS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQLS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQCC _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQCC _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQCC y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQCC y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQCC _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQCS x y (InvertFlags cond))
+ // result: (CMOVQHI x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQHI)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQCS y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQCS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQCS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQCS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQCS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQEQ x y (InvertFlags cond))
+ // result: (CMOVQEQ x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQEQ)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQEQ _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _))))
+ // cond: c != 0
+ // result: x
+ for {
+ x := v_0
+ if v_2.Op != OpSelect1 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpAMD64BSFQ {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpAMD64ORQconst {
+ break
+ }
+ c := auxIntToInt32(v_2_0_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQGE x y (InvertFlags cond))
+ // result: (CMOVQLE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQLE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQGE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGE y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQGE y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQGT x y (InvertFlags cond))
+ // result: (CMOVQLT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQLT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQGT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQGT _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGT _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGT y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQGT y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQHI x y (InvertFlags cond))
+ // result: (CMOVQCS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQCS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQHI y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQHI _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQHI y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQHI y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQHI _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQLE x y (InvertFlags cond))
+ // result: (CMOVQGE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQGE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQLE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLE y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLE y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQLS x y (InvertFlags cond))
+ // result: (CMOVQCC x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQCC)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQLS _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQLT x y (InvertFlags cond))
+ // result: (CMOVQGT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQGT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQLT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLT y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLT y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLT _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLT _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQNE x y (InvertFlags cond))
+ // result: (CMOVQNE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQNE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQNE y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWCC x y (InvertFlags cond))
+ // result: (CMOVWLS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWLS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWCC _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWCC _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWCC y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWCC y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWCC _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWCS x y (InvertFlags cond))
+ // result: (CMOVWHI x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWHI)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWCS y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWCS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWCS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWCS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWCS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWEQ x y (InvertFlags cond))
+ // result: (CMOVWEQ x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWEQ)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWEQ _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWEQ y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWEQ y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWEQ y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWEQ y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWGE x y (InvertFlags cond))
+ // result: (CMOVWLE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWLE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWGE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGE y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWGE y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWGT x y (InvertFlags cond))
+ // result: (CMOVWLT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWLT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWGT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWGT _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGT _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGT y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWGT y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWHI x y (InvertFlags cond))
+ // result: (CMOVWCS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWCS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWHI y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWHI _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWHI y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWHI y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWHI _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWLE x y (InvertFlags cond))
+ // result: (CMOVWGE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWGE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWLE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLE y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLE y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWLS x y (InvertFlags cond))
+ // result: (CMOVWCC x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWCC)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWLS _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWLT x y (InvertFlags cond))
+ // result: (CMOVWGT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWGT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWLT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLT y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLT y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLT _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLT _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWNE x y (InvertFlags cond))
+ // result: (CMOVWNE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWNE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWNE y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWNE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWNE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWNE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWNE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPB x (MOVLconst [c]))
+ // result: (CMPBconst x [int8(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64CMPBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPB (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPBconst x [int8(c)]))
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPB x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPB y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPBload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64CMPBload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPBload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<y && uint8(x)<uint8(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<y && uint8(x)>uint8(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>y && uint8(x)<uint8(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>y && uint8(x)>uint8(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPBconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int8(m) && int8(m) < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= int8(m) && int8(m) < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst a:(ANDL x y) [0])
+ // cond: a.Uses == 1
+ // result: (TESTB x y)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDL {
+ break
+ }
+ y := a.Args[1]
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPBconst a:(ANDLconst [c] x) [0])
+ // cond: a.Uses == 1
+ // result: (TESTBconst [int8(c)] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPBconst x [0])
+ // result: (TESTB x x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64TESTB)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPBconstload {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ l := v_0
+ if l.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPBconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPBconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPBload [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
+ // cond: validValAndOff(int64(int8(c)),int64(off))
+ // result: (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ if !(validValAndOff(int64(int8(c)), int64(off))) {
+ break
+ }
+ v.reset(OpAMD64CMPBconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPL x (MOVLconst [c]))
+ // result: (CMPLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPL (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPLconst x [c]))
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPL x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPL y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPLload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64CMPLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPLload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x<y && uint32(x)<uint32(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x<y && uint32(x)>uint32(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x>y && uint32(x)<uint32(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x>y && uint32(x)>uint32(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPLconst (SHRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SHRLconst {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst a:(ANDL x y) [0])
+ // cond: a.Uses == 1
+ // result: (TESTL x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDL {
+ break
+ }
+ y := a.Args[1]
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPLconst a:(ANDLconst [c] x) [0])
+ // cond: a.Uses == 1
+ // result: (TESTLconst [c] x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPLconst x [0])
+ // result: (TESTL x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64TESTL)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPLconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPLconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPLload [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
+ // cond: validValAndOff(int64(c),int64(off))
+ // result: (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ if !(validValAndOff(int64(c), int64(off))) {
+ break
+ }
+ v.reset(OpAMD64CMPLconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (CMPQconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (InvertFlags (CMPQconst x [int32(c)]))
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPQ x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPQ y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x<y && uint64(x)<uint64(y)
+ // result: (FlagLT_ULT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x < y && uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x<y && uint64(x)>uint64(y)
+ // result: (FlagLT_UGT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x < y && uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x>y && uint64(x)<uint64(y)
+ // result: (FlagGT_ULT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x > y && uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x>y && uint64(x)>uint64(y)
+ // result: (FlagGT_UGT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x > y && uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPQload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64CMPQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPQload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32])
+ // result: (FlagLT_ULT)
+ for {
+ if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -16 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 15 {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32])
+ // result: (FlagLT_ULT)
+ for {
+ if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -8 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 7 {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x==int64(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x == int64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x<int64(y) && uint64(x)<uint64(int64(y))
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x<int64(y) && uint64(x)>uint64(int64(y))
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x>int64(y) && uint64(x)<uint64(int64(y))
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x>int64(y) && uint64(x)>uint64(int64(y))
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPQconst (MOVBQZX _) [c])
+ // cond: 0xFF < c
+ // result: (FlagLT_ULT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (MOVWQZX _) [c])
+ // cond: 0xFFFF < c
+ // result: (FlagLT_ULT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (SHRQconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (ANDQconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst a:(ANDQ x y) [0])
+ // cond: a.Uses == 1
+ // result: (TESTQ x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDQ {
+ break
+ }
+ y := a.Args[1]
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPQconst a:(ANDQconst [c] x) [0])
+ // cond: a.Uses == 1
+ // result: (TESTQconst [c] x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPQconst x [0])
+ // result: (TESTQ x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64TESTQ)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPQconstload {sym} [makeValAndOff32(c,off)] ptr mem)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPQconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPQconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPQload [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem)
+ // cond: validValAndOff(c,int64(off))
+ // result: (CMPQconstload {sym} [makeValAndOff64(c,int64(off))] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(validValAndOff(c, int64(off))) {
+ break
+ }
+ v.reset(OpAMD64CMPQconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff64(c, int64(off)))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVLconst [c]))
+ // result: (CMPWconst x [int16(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPWconst x [int16(c)]))
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int16ToAuxInt(int16(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPWload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64CMPWload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPWload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<y && uint16(x)<uint16(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<y && uint16(x)>uint16(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>y && uint16(x)<uint16(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>y && uint16(x)>uint16(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPWconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int16(m) && int16(m) < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= int16(m) && int16(m) < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst a:(ANDL x y) [0])
+ // cond: a.Uses == 1
+ // result: (TESTW x y)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDL {
+ break
+ }
+ y := a.Args[1]
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWconst a:(ANDLconst [c] x) [0])
+ // cond: a.Uses == 1
+ // result: (TESTWconst [int16(c)] x)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWconst x [0])
+ // result: (TESTW x x)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64TESTW)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPWconstload {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ l := v_0
+ if l.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPWconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPWconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPWload [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
+ // cond: validValAndOff(int64(int16(c)),int64(off))
+ // result: (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ if !(validValAndOff(int64(int16(c)), int64(off))) {
+ break
+ }
+ v.reset(OpAMD64CMPWconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPXCHGLlock)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPXCHGQlock)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (DIVSDload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSDload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64DIVSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (DIVSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64DIVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64DIVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (DIVSSload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSSload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64DIVSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (DIVSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64DIVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64DIVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (HMULL x y)
+ // cond: !x.rematerializeable() && y.rematerializeable()
+ // result: (HMULL y x)
+ for {
+ x := v_0
+ y := v_1
+ if !(!x.rematerializeable() && y.rematerializeable()) {
+ break
+ }
+ v.reset(OpAMD64HMULL)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (HMULLU x y)
+ // cond: !x.rematerializeable() && y.rematerializeable()
+ // result: (HMULLU y x)
+ for {
+ x := v_0
+ y := v_1
+ if !(!x.rematerializeable() && y.rematerializeable()) {
+ break
+ }
+ v.reset(OpAMD64HMULLU)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (HMULQ x y)
+ // cond: !x.rematerializeable() && y.rematerializeable()
+ // result: (HMULQ y x)
+ for {
+ x := v_0
+ y := v_1
+ if !(!x.rematerializeable() && y.rematerializeable()) {
+ break
+ }
+ v.reset(OpAMD64HMULQ)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (HMULQU x y)
+ // cond: !x.rematerializeable() && y.rematerializeable()
+ // result: (HMULQU y x)
+ for {
+ x := v_0
+ y := v_1
+ if !(!x.rematerializeable() && y.rematerializeable()) {
+ break
+ }
+ v.reset(OpAMD64HMULQU)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LEAL [c] {s} (ADDLconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL [c] {s} (ADDL x y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64ADDLconst {
+ continue
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL2 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL2)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [2] y))
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [3] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
+ // result: (LEAL2 [c+2*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL2)
+ v.AuxInt = int32ToAuxInt(c + 2*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [2] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
+ // result: (LEAL4 [c+4*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c + 4*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL8 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
+ // result: (LEAL8 [c+8*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c + 8*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LEAQ [c] {s} (ADDQconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ [c] {s} (ADDQ x y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAQ1 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ1 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ2 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ4 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ8 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAQ1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64ADDQconst {
+ continue
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
+ // result: (LEAQ2 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
+ // result: (LEAQ4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
+ // result: (LEAQ8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64LEAQ {
+ continue
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ1 {
+ continue
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ1 {
+ continue
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (LEAQ1 [0] x y)
+ // cond: v.Aux == nil
+ // result: (ADDQ x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if !(v.Aux == nil) {
+ break
+ }
+ v.reset(OpAMD64ADDQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAQ2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
+ // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
+ // result: (LEAQ2 [c+2*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(c + 2*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
+ // result: (LEAQ4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
+ // result: (LEAQ8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
+ // cond: is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil
+ // result: (LEAQ4 [off1+2*off2] {sym1} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ1 {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(off1 + 2*off2)
+ v.Aux = symToAux(sym1)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [off] {sym} x (MOVQconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*2)
+ // result: (LEAQ [off+int32(scale)*2] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ scale := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ2 [off] {sym} x (MOVLconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*2)
+ // result: (LEAQ [off+int32(scale)*2] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ scale := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAQ4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
+ // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
+ // result: (LEAQ4 [c+4*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c + 4*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
+ // result: (LEAQ8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
+ // cond: is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil
+ // result: (LEAQ8 [off1+4*off2] {sym1} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ1 {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(off1 + 4*off2)
+ v.Aux = symToAux(sym1)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [off] {sym} x (MOVQconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*4)
+ // result: (LEAQ [off+int32(scale)*4] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ scale := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*4)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ4 [off] {sym} x (MOVLconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*4)
+ // result: (LEAQ [off+int32(scale)*4] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ scale := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*4)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAQ8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
+ // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
+ // result: (LEAQ8 [c+8*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c + 8*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ8 [off] {sym} x (MOVQconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*8)
+ // result: (LEAQ [off+int32(scale)*8] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ scale := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*8)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ8 [off] {sym} x (MOVLconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*8)
+ // result: (LEAQ [off+int32(scale)*8] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ scale := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*8)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQSX (ANDLconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDLconst [c & 0x7f] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBQSX (MOVBQSX x))
+ // result: (MOVBQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQSX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBQSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBQSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQZX x)
+ // cond: zeroUpper56Bits(x,3)
+ // result: x
+ for {
+ x := v_0
+ if !(zeroUpper56Bits(x, 3)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBQZX (ANDLconst [c] x))
+ // result: (ANDLconst [c & 0xff] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0xff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBQZX (MOVBQZX x))
+ // result: (MOVBQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBatomicload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVBatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBQZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
+ // cond: y.Uses == 1
+ // result: (SETLstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETL {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETLEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETLE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETLEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem)
+ // cond: y.Uses == 1
+ // result: (SETGstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETG {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETGstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETGEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETGE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETGEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem)
+ // cond: y.Uses == 1
+ // result: (SETEQstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETEQ {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETNEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETNE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem)
+ // cond: y.Uses == 1
+ // result: (SETBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETB {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETBEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETBE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETBEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem)
+ // cond: y.Uses == 1
+ // result: (SETAstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETA {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETAstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETAEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETAE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVBQSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
+ // cond: x0.Uses == 1 && clobber(x0)
+ // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
+ v0.AuxInt = int8ToAuxInt(8)
+ v0.AddArg(w)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 w x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem))
+ // cond: x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)
+ // result: (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ p0 := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
+ v0.AuxInt = int8ToAuxInt(8)
+ v0.AddArg(w)
+ v.AddArg3(p0, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
+ // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x2 := v_2
+ if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-1 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if p != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
+ break
+ }
+ x1 := x2.Args[2]
+ if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x0 := x1.Args[2]
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-3 || auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p3 w x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w) x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w) x0:(MOVBstore [i] {s} p0 (SHRLconst [24] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)
+ // result: (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p3 := v_0
+ w := v_1
+ x2 := v_2
+ if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ p2 := x2.Args[0]
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
+ break
+ }
+ x1 := x2.Args[2]
+ if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ p1 := x1.Args[0]
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x0 := x1.Args[2]
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ p0 := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(p0, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)
+ // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x6 := v_2
+ if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i-1 || auxToSym(x6.Aux) != s {
+ break
+ }
+ _ = x6.Args[2]
+ if p != x6.Args[0] {
+ break
+ }
+ x6_1 := x6.Args[1]
+ if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
+ break
+ }
+ x5 := x6.Args[2]
+ if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i-2 || auxToSym(x5.Aux) != s {
+ break
+ }
+ _ = x5.Args[2]
+ if p != x5.Args[0] {
+ break
+ }
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
+ break
+ }
+ x4 := x5.Args[2]
+ if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i-3 || auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[2]
+ if p != x4.Args[0] {
+ break
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
+ break
+ }
+ x3 := x4.Args[2]
+ if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[2]
+ if p != x3.Args[0] {
+ break
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
+ break
+ }
+ x2 := x3.Args[2]
+ if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-5 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if p != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
+ break
+ }
+ x1 := x2.Args[2]
+ if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-6 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
+ break
+ }
+ x0 := x1.Args[2]
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-7 || auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i - 7)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p7 w x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w) x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w) x4:(MOVBstore [i] {s} p4 (SHRQconst [24] w) x3:(MOVBstore [i] {s} p3 (SHRQconst [32] w) x2:(MOVBstore [i] {s} p2 (SHRQconst [40] w) x1:(MOVBstore [i] {s} p1 (SHRQconst [48] w) x0:(MOVBstore [i] {s} p0 (SHRQconst [56] w) mem))))))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)
+ // result: (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p7 := v_0
+ w := v_1
+ x6 := v_2
+ if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i || auxToSym(x6.Aux) != s {
+ break
+ }
+ _ = x6.Args[2]
+ p6 := x6.Args[0]
+ x6_1 := x6.Args[1]
+ if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
+ break
+ }
+ x5 := x6.Args[2]
+ if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i || auxToSym(x5.Aux) != s {
+ break
+ }
+ _ = x5.Args[2]
+ p5 := x5.Args[0]
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
+ break
+ }
+ x4 := x5.Args[2]
+ if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i || auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[2]
+ p4 := x4.Args[0]
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
+ break
+ }
+ x3 := x4.Args[2]
+ if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i || auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[2]
+ p3 := x3.Args[0]
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
+ break
+ }
+ x2 := x3.Args[2]
+ if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ p2 := x2.Args[0]
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
+ break
+ }
+ x1 := x2.Args[2]
+ if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ p1 := x1.Args[0]
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
+ break
+ }
+ x0 := x1.Args[2]
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ p0 := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(p0, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRLconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRWconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRLconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRQconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p1 := x.Args[0]
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p1 := x.Args[0]
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRQconst [8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p1 := x.Args[0]
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRLconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRLconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRQconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRQconst [j-8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem))
+ // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
+ // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x1 := v_1
+ if x1.Op != OpAMD64MOVBload {
+ break
+ }
+ j := auxIntToInt32(x1.AuxInt)
+ s2 := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p2 := x1.Args[0]
+ mem2 := v_2
+ if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s {
+ break
+ }
+ _ = mem2.Args[2]
+ if p != mem2.Args[0] {
+ break
+ }
+ x2 := mem2.Args[1]
+ if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 {
+ break
+ }
+ _ = x2.Args[1]
+ if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(j - 1)
+ v0.Aux = symToAux(s2)
+ v0.AddArg2(p2, mem)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVBstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xff|c.Val()<<8, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVBstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xff|c.Val()<<8, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLQSX (ANDLconst [c] x))
+ // cond: uint32(c) & 0x80000000 == 0
+ // result: (ANDLconst [c & 0x7fffffff] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(uint32(c)&0x80000000 == 0) {
+ break
+ }
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQSX (MOVLQSX x))
+ // result: (MOVLQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVLQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVLQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQSX (MOVWQSX x))
+ // result: (MOVWQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVWQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVWQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQSX (MOVBQSX x))
+ // result: (MOVBQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQSX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVLQSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLQSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLQZX x)
+ // cond: zeroUpper32Bits(x,3)
+ // result: x
+ for {
+ x := v_0
+ if !(zeroUpper32Bits(x, 3)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVLQZX (ANDLconst [c] x))
+ // result: (ANDLconst [c] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQZX (MOVLQZX x))
+ // result: (MOVLQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVLQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVLQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQZX (MOVWQZX x))
+ // result: (MOVWQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVWQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQZX (MOVBQZX x))
+ // result: (MOVBQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLatomicload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVLatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVLf2i <t> (Arg <u> [off] {sym}))
+ // cond: t.Size() == u.Size()
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ u := v_0.Type
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ if !(t.Size() == u.Size()) {
+ break
+ }
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVLi2f <t> (Arg <u> [off] {sym}))
+ // cond: t.Size() == u.Size()
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ u := v_0.Type
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ if !(t.Size() == u.Size()) {
+ break
+ }
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVLQZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _))
+ // result: (MOVLf2i val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MOVLf2i)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVLload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
+ // result: (MOVLstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLQSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
+ // result: (MOVLstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLQZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVLstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (MOVLstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVQstore [i-4] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVQstore [i-4] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVLstore [i] {s} p1 (SHRQconst [32] w) x:(MOVLstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)
+ // result: (MOVQstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVLstore [i] {s} p1 (SHRQconst [j] w) x:(MOVLstore [i] {s} p0 w0:(SHRQconst [j-32] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)
+ // result: (MOVQstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem))
+ // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
+ // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x1 := v_1
+ if x1.Op != OpAMD64MOVLload {
+ break
+ }
+ j := auxIntToInt32(x1.AuxInt)
+ s2 := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p2 := x1.Args[0]
+ mem2 := v_2
+ if mem2.Op != OpAMD64MOVLstore || auxIntToInt32(mem2.AuxInt) != i-4 || auxToSym(mem2.Aux) != s {
+ break
+ }
+ _ = mem2.Args[2]
+ if p != mem2.Args[0] {
+ break
+ }
+ x2 := mem2.Args[1]
+ if x2.Op != OpAMD64MOVLload || auxIntToInt32(x2.AuxInt) != j-4 || auxToSym(x2.Aux) != s2 {
+ break
+ }
+ _ = x2.Args[1]
+ if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(j - 4)
+ v0.Aux = symToAux(s2)
+ v0.AddArg2(p2, mem)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ADDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ANDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ADDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ADDL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (SUBLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SUBL {
+ break
+ }
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(OpAMD64SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ANDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ANDL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ORL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64XORL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) <t> x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (BTCLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64BTCL {
+ break
+ }
+ t := y.Type
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(OpAMD64BTCLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
+ v0.AuxInt = int32ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) <t> x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (BTRLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64BTRL {
+ break
+ }
+ t := y.Type
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(OpAMD64BTRLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
+ v0.AuxInt = int32ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) <t> x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (BTSLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64BTSL {
+ break
+ }
+ t := y.Type
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(OpAMD64BTSLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
+ v0.AuxInt = int32ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (ADDLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (ANDLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (ORLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ORLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (XORLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64XORLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (BTCLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64BTCLconst {
+ break
+ }
+ c := auxIntToInt8(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64BTCLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (BTRLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64BTRLconst {
+ break
+ }
+ c := auxIntToInt8(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64BTRLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (BTSLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64BTSLconst {
+ break
+ }
+ c := auxIntToInt8(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64BTSLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem)
+ // result: (MOVSSstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLf2i {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x)
+ // result: (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVLstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(a.Off32())
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(a.Val()&0xffffffff | c.Val()<<32)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x)
+ // result: (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVLstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(a.Off32())
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(a.Val()&0xffffffff | c.Val()<<32)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVLstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVLstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVOload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVOload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVOload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVOstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem)
+ // cond: symIsRO(srcSym)
+ // result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
+ for {
+ dstOff := auxIntToInt32(v.AuxInt)
+ dstSym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVOload {
+ break
+ }
+ srcOff := auxIntToInt32(v_1.AuxInt)
+ srcSym := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ mem := v_2
+ if !(symIsRO(srcSym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(dstOff + 8)
+ v.Aux = symToAux(dstSym)
+ v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
+ v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(dstOff)
+ v1.Aux = symToAux(dstSym)
+ v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
+ v1.AddArg3(ptr, v2, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQatomicload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVQatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVQatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVQf2i <t> (Arg <u> [off] {sym}))
+ // cond: t.Size() == u.Size()
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ u := v_0.Type
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ if !(t.Size() == u.Size()) {
+ break
+ }
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVQi2f <t> (Arg <u> [off] {sym}))
+ // cond: t.Size() == u.Size()
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ u := v_0.Type
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ if !(t.Size() == u.Size()) {
+ break
+ }
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _))
+ // result: (MOVQf2i val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MOVQf2i)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVQload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // cond: validVal(c)
+ // result: (MOVQstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(validVal(c)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ADDQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ANDQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ANDQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ORQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ORQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (XORQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64XORQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ADDQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ADDQ {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (SUBQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SUBQ {
+ break
+ }
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(OpAMD64SUBQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ANDQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ANDQ {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ANDQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ORQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ORQ {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ORQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (XORQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64XORQ {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64XORQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) <t> x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (BTCQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64BTCQ {
+ break
+ }
+ t := y.Type
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(OpAMD64BTCQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t)
+ v0.AuxInt = int32ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) <t> x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (BTRQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64BTRQ {
+ break
+ }
+ t := y.Type
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(OpAMD64BTRQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t)
+ v0.AuxInt = int32ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) <t> x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (BTSQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64BTSQ {
+ break
+ }
+ t := y.Type
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(OpAMD64BTSQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t)
+ v0.AuxInt = int32ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (ADDQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (ANDQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ANDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (ORQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ORQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (XORQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64XORQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64XORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (BTCQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64BTCQconst {
+ break
+ }
+ c := auxIntToInt8(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64BTCQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (BTRQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64BTRQconst {
+ break
+ }
+ c := auxIntToInt8(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64BTRQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a)
+ // result: (BTSQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64BTSQconst {
+ break
+ }
+ c := auxIntToInt8(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64BTSQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem)
+ // result: (MOVSDstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQf2i {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem))
+ // cond: config.useSSE && x.Uses == 1 && c2.Off() + 8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x)
+ // result: (MOVOstore [c2.Off32()] {s} p (MOVOconst [0]) mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVQstoreconst {
+ break
+ }
+ c2 := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && c2.Off()+8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v.AuxInt = int32ToAuxInt(c2.Off32())
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVOconst, types.TypeInt128)
+ v0.AuxInt = int128ToAuxInt(0)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVQstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVQstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _))
+ // result: (MOVQi2f val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MOVQi2f)
+ v.AddArg(val)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem)
+ // result: (MOVQstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQi2f {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _))
+ // result: (MOVLi2f val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MOVLi2f)
+ v.AddArg(val)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem)
+ // result: (MOVLstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLi2f {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQSX (ANDLconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDLconst [c & 0x7fff] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQSX (MOVWQSX x))
+ // result: (MOVWQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVWQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVWQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQSX (MOVBQSX x))
+ // result: (MOVBQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQSX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWQSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWQSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQZX x)
+ // cond: zeroUpper48Bits(x,3)
+ // result: x
+ for {
+ x := v_0
+ if !(zeroUpper48Bits(x, 3)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWQZX (ANDLconst [c] x))
+ // result: (ANDLconst [c & 0xffff] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQZX (MOVWQZX x))
+ // result: (MOVWQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVWQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQZX (MOVBQZX x))
+ // result: (MOVBQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWQZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVWQSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVWQZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVWstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (MOVWstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRLconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p1 (SHRLconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p1 (SHRQconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p1 (SHRLconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRLconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p1 (SHRQconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRQconst [j-16] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem))
+ // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
+ // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x1 := v_1
+ if x1.Op != OpAMD64MOVWload {
+ break
+ }
+ j := auxIntToInt32(x1.AuxInt)
+ s2 := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p2 := x1.Args[0]
+ mem2 := v_2
+ if mem2.Op != OpAMD64MOVWstore || auxIntToInt32(mem2.AuxInt) != i-2 || auxToSym(mem2.Aux) != s {
+ break
+ }
+ _ = mem2.Args[2]
+ if p != mem2.Args[0] {
+ break
+ }
+ x2 := mem2.Args[1]
+ if x2.Op != OpAMD64MOVWload || auxIntToInt32(x2.AuxInt) != j-2 || auxToSym(x2.Aux) != s2 {
+ break
+ }
+ _ = x2.Args[1]
+ if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(j - 2)
+ v0.Aux = symToAux(s2)
+ v0.AddArg2(p2, mem)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVWstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xffff|c.Val()<<16, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVWstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xffff|c.Val()<<16, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULL x (MOVLconst [c]))
+ // result: (MULLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64MULLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLconst [c] (MULLconst [d] x))
+ // result: (MULLconst [c * d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MULLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64MULLconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [-9] x)
+ // result: (NEGL (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -9 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-5] x)
+ // result: (NEGL (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -5 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-3] x)
+ // result: (NEGL (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -3 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-1] x)
+ // result: (NEGL x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [ 0] _)
+ // result: (MOVLconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (MULLconst [ 1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (MULLconst [ 3] x)
+ // result: (LEAL2 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL2)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [ 5] x)
+ // result: (LEAL4 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL4)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [ 7] x)
+ // result: (LEAL2 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 7 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [ 9] x)
+ // result: (LEAL8 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 9 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [11] x)
+ // result: (LEAL2 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 11 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [13] x)
+ // result: (LEAL4 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 13 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [19] x)
+ // result: (LEAL2 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 19 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [21] x)
+ // result: (LEAL4 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 21 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [25] x)
+ // result: (LEAL8 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 25 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [27] x)
+ // result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 27 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [37] x)
+ // result: (LEAL4 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 37 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [41] x)
+ // result: (LEAL8 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 41 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [45] x)
+ // result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 45 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [73] x)
+ // result: (LEAL8 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 73 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [81] x)
+ // result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 81 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo64(int64(c)+1) && c >= 15
+ // result: (SUBL (SHLLconst <v.Type> [int8(log64(int64(c)+1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
+ break
+ }
+ v.reset(OpAMD64SUBL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-1) && c >= 17
+ // result: (LEAL1 (SHLLconst <v.Type> [int8(log32(c-1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-1) && c >= 17) {
+ break
+ }
+ v.reset(OpAMD64LEAL1)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-2) && c >= 34
+ // result: (LEAL2 (SHLLconst <v.Type> [int8(log32(c-2))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-2) && c >= 34) {
+ break
+ }
+ v.reset(OpAMD64LEAL2)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-4) && c >= 68
+ // result: (LEAL4 (SHLLconst <v.Type> [int8(log32(c-4))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-4) && c >= 68) {
+ break
+ }
+ v.reset(OpAMD64LEAL4)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-8) && c >= 136
+ // result: (LEAL8 (SHLLconst <v.Type> [int8(log32(c-8))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-8) && c >= 136) {
+ break
+ }
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (SHLLconst [int8(log32(c/3))] (LEAL2 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (SHLLconst [int8(log32(c/5))] (LEAL4 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (SHLLconst [int8(log32(c/9))] (LEAL8 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c*d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (MULQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64MULQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULQconst [c] (MULQconst [d] x))
+ // cond: is32Bit(int64(c)*int64(d))
+ // result: (MULQconst [c * d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MULQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) * int64(d))) {
+ break
+ }
+ v.reset(OpAMD64MULQconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULQconst [-9] x)
+ // result: (NEGQ (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -9 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [-5] x)
+ // result: (NEGQ (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -5 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [-3] x)
+ // result: (NEGQ (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -3 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [-1] x)
+ // result: (NEGQ x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULQconst [ 0] _)
+ // result: (MOVQconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MULQconst [ 1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (MULQconst [ 3] x)
+ // result: (LEAQ2 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ2)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULQconst [ 5] x)
+ // result: (LEAQ4 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ4)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULQconst [ 7] x)
+ // result: (LEAQ2 x (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 7 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [ 9] x)
+ // result: (LEAQ8 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 9 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULQconst [11] x)
+ // result: (LEAQ2 x (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 11 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [13] x)
+ // result: (LEAQ4 x (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 13 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [19] x)
+ // result: (LEAQ2 x (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 19 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [21] x)
+ // result: (LEAQ4 x (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 21 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [25] x)
+ // result: (LEAQ8 x (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 25 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [27] x)
+ // result: (LEAQ8 (LEAQ2 <v.Type> x x) (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 27 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULQconst [37] x)
+ // result: (LEAQ4 x (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 37 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [41] x)
+ // result: (LEAQ8 x (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 41 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [45] x)
+ // result: (LEAQ8 (LEAQ4 <v.Type> x x) (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 45 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULQconst [73] x)
+ // result: (LEAQ8 x (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 73 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [81] x)
+ // result: (LEAQ8 (LEAQ8 <v.Type> x x) (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 81 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo64(int64(c)+1) && c >= 15
+ // result: (SUBQ (SHLQconst <v.Type> [int8(log64(int64(c)+1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
+ break
+ }
+ v.reset(OpAMD64SUBQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo32(c-1) && c >= 17
+ // result: (LEAQ1 (SHLQconst <v.Type> [int8(log32(c-1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-1) && c >= 17) {
+ break
+ }
+ v.reset(OpAMD64LEAQ1)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo32(c-2) && c >= 34
+ // result: (LEAQ2 (SHLQconst <v.Type> [int8(log32(c-2))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-2) && c >= 34) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo32(c-4) && c >= 68
+ // result: (LEAQ4 (SHLQconst <v.Type> [int8(log32(c-4))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-4) && c >= 68) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo32(c-8) && c >= 136
+ // result: (LEAQ8 (SHLQconst <v.Type> [int8(log32(c-8))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-8) && c >= 136) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (SHLQconst [int8(log32(c/3))] (LEAQ2 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (SHLQconst [int8(log32(c/5))] (LEAQ4 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (SHLQconst [int8(log32(c/9))] (LEAQ8 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)*d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) * d)
+ return true
+ }
+ // match: (MULQconst [c] (NEGQ x))
+ // cond: c != -(1<<31)
+ // result: (MULQconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64NEGQ {
+ break
+ }
+ x := v_0.Args[0]
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpAMD64MULQconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULSDload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSDload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64MULSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MULSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MULSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
+ // result: (MULSD x (MOVQi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MULSD)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULSSload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSSload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64MULSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MULSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MULSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
+ // result: (MULSS x (MOVLi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MULSS)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGL (NEGL x))
+ // result: x
+ for {
+ if v_0.Op != OpAMD64NEGL {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (NEGL s:(SUBL x y))
+ // cond: s.Uses == 1
+ // result: (SUBL y x)
+ for {
+ s := v_0
+ if s.Op != OpAMD64SUBL {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if !(s.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SUBL)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (NEGL (MOVLconst [c]))
+ // result: (MOVLconst [-c])
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGQ (NEGQ x))
+ // result: x
+ for {
+ if v_0.Op != OpAMD64NEGQ {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (NEGQ s:(SUBQ x y))
+ // cond: s.Uses == 1
+ // result: (SUBQ y x)
+ for {
+ s := v_0
+ if s.Op != OpAMD64SUBQ {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if !(s.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SUBQ)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (NEGQ (MOVQconst [c]))
+ // result: (MOVQconst [-c])
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ // match: (NEGQ (ADDQconst [c] (NEGQ x)))
+ // cond: c != -(1<<31)
+ // result: (ADDQconst [-c] x)
+ for {
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64NEGQ {
+ break
+ }
+ x := v_0_0.Args[0]
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NOTL (MOVLconst [c]))
+ // result: (MOVLconst [^c])
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(^c)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NOTQ (MOVQconst [c]))
+ // result: (MOVQconst [^c])
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORL (SHLL (MOVLconst [1]) y) x)
+ // result: (BTSL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTSL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL (MOVLconst [c]) x)
+ // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
+ // result: (BTSLconst [int8(log32(c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
+ continue
+ }
+ v.reset(OpAMD64BTSLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL x (MOVLconst [c]))
+ // result: (ORLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHLLconst x [c]) (SHRLconst x [d]))
+ // cond: d==32-c
+ // result: (ROLLconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRLconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
+ // cond: d==16-c && c < 16 && t.Size() == 2
+ // result: (ROLWconst x [c])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRWconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
+ // cond: d==8-c && c < 8 && t.Size() == 1
+ // result: (ROLBconst x [c])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRBconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
+ // result: (ROLL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
+ // result: (ROLL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
+ // result: (RORL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
+ // result: (RORL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
+ // cond: v.Type.Size() == 2
+ // result: (ROLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
+ // cond: v.Type.Size() == 2
+ // result: (ROLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
+ // cond: v.Type.Size() == 2
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
+ // cond: v.Type.Size() == 2
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
+ // cond: v.Type.Size() == 1
+ // result: (ROLB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
+ // cond: v.Type.Size() == 1
+ // result: (ROLB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
+ // cond: v.Type.Size() == 1
+ // result: (RORB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
+ // cond: v.Type.Size() == 1
+ // result: (RORB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL x0:(MOVBload [i] {s} p0 mem) sh:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)))
+ // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL x0:(MOVWload [i] {s} p0 mem) sh:(SHLLconst [16] x1:(MOVWload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y))
+ // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORL {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i] {s} p1 mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i] {s} p0 mem)) y))
+ // cond: j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORL {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(i)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p0, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int8ToAuxInt(8)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORL x1:(MOVBload [i] {s} p1 mem) sh:(SHLLconst [8] x0:(MOVBload [i] {s} p0 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int8ToAuxInt(8)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
+ // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r1 := v_0
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ r0 := sh.Args[0]
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r1 := v_0
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ r0 := sh.Args[0]
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y))
+ // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORL {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
+ v2.AuxInt = int8ToAuxInt(8)
+ v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i] {s} p0 mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i] {s} p1 mem)) y))
+ // cond: j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORL {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
+ v2.AuxInt = int8ToAuxInt(8)
+ v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v3.AuxInt = int32ToAuxInt(i)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p0, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ORLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ORLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORLconst [c] x)
+ // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
+ // result: (BTSLconst [int8(log32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
+ break
+ }
+ v.reset(OpAMD64BTSLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORLconst [c] (ORLconst [d] x))
+ // result: (ORLconst [c | d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ORLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORLconst [c] (BTSLconst [d] x))
+ // result: (ORLconst [c | 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64BTSLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORLconst [c] _)
+ // cond: c==-1
+ // result: (MOVLconst [-1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (ORLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: ( ORL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ORL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) <t> x) mem)
+ // result: (BTSLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ s := v_1
+ if s.Op != OpAMD64SHLL {
+ break
+ }
+ t := s.Type
+ x := s.Args[1]
+ s_0 := s.Args[0]
+ if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64BTSLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t)
+ v0.AuxInt = int32ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORQ (SHLQ (MOVQconst [1]) y) x)
+ // result: (BTSQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTSQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (MOVQconst [c]) x)
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
+ // result: (BTSQconst [int8(log64(c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
+ continue
+ }
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ORQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x (MOVLconst [c]))
+ // result: (ORQconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d]))
+ // cond: d==64-c
+ // result: (ROLQconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRQconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 64-c) {
+ continue
+ }
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
+ // result: (ROLQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
+ // result: (ROLQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
+ // result: (RORQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
+ // result: (RORQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (MOVQconst [c]) (MOVQconst [d]))
+ // result: (MOVQconst [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x0:(MOVBload [i] {s} p0 mem) sh:(SHLQconst [8] x1:(MOVBload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)))
+ // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x0:(MOVWload [i] {s} p0 mem) sh:(SHLQconst [16] x1:(MOVWload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)))
+ // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVLload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVLload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x0:(MOVLload [i] {s} p0 mem) sh:(SHLQconst [32] x1:(MOVLload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVQload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVLload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVLload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y))
+ // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i] {s} p1 mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i] {s} p0 mem)) y))
+ // cond: j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(i)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p0, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y))
+ // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i] {s} p1 mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i] {s} p0 mem)) y))
+ // cond: j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i] {s} p0 mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(i)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p0, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int8ToAuxInt(8)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x1:(MOVBload [i] {s} p1 mem) sh:(SHLQconst [8] x0:(MOVBload [i] {s} p0 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int8ToAuxInt(8)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
+ // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r1 := v_0
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ r0 := sh.Args[0]
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r1 := v_0
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ r0 := sh.Args[0]
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))))
+ // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r1 := v_0
+ if r1.Op != OpAMD64BSWAPL {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVLload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
+ continue
+ }
+ r0 := sh.Args[0]
+ if r0.Op != OpAMD64BSWAPL {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVLload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORQ r1:(BSWAPL x1:(MOVLload [i] {s} p1 mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i] {s} p0 mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i] {s} p0 mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r1 := v_0
+ if r1.Op != OpAMD64BSWAPL {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVLload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
+ continue
+ }
+ r0 := sh.Args[0]
+ if r0.Op != OpAMD64BSWAPL {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVLload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y))
+ // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
+ v2.AuxInt = int8ToAuxInt(8)
+ v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i] {s} p0 mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i] {s} p1 mem)) y))
+ // cond: j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
+ v2.AuxInt = int8ToAuxInt(8)
+ v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v3.AuxInt = int32ToAuxInt(i)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p0, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y))
+ // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ r0 := s0.Args[0]
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ r1 := s1.Args[0]
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem))) y))
+ // cond: j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i] {s} p0 mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ r0 := s0.Args[0]
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ r1 := s1.Args[0]
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(i)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p0, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ORQload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ORQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORQconst [c] x)
+ // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
+ // result: (BTSQconst [int8(log32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
+ break
+ }
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORQconst [c] (ORQconst [d] x))
+ // result: (ORQconst [c | d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORQconst [c] (BTSQconst [d] x))
+ // cond: is32Bit(int64(c) | 1<<uint32(d))
+ // result: (ORQconst [c | 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64BTSQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) | 1<<uint32(d))) {
+ break
+ }
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORQconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORQconst [-1] _)
+ // result: (MOVQconst [-1])
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) | d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ORQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: ( ORQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ORQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) <t> x) mem)
+ // result: (BTSQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ s := v_1
+ if s.Op != OpAMD64SHLQ {
+ break
+ }
+ t := s.Type
+ x := s.Args[1]
+ s_0 := s.Args[0]
+ if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64BTSQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t)
+ v0.AuxInt = int32ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ORQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLB x (NEGQ y))
+ // result: (RORB x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLB x (NEGL y))
+ // result: (RORB x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLB x (MOVQconst [c]))
+ // result: (ROLBconst [int8(c&7) ] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 7))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLB x (MOVLconst [c]))
+ // result: (ROLBconst [int8(c&7) ] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 7))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLBconst [c] (ROLBconst [d] x))
+ // result: (ROLBconst [(c+d)& 7] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ROLBconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt((c + d) & 7)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLL x (NEGQ y))
+ // result: (RORL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLL x (NEGL y))
+ // result: (RORL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLL x (MOVQconst [c]))
+ // result: (ROLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLL x (MOVLconst [c]))
+ // result: (ROLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLLconst [c] (ROLLconst [d] x))
+ // result: (ROLLconst [(c+d)&31] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ROLLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt((c + d) & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLQ x (NEGQ y))
+ // result: (RORQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLQ x (NEGL y))
+ // result: (RORQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLQ x (MOVQconst [c]))
+ // result: (ROLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLQ x (MOVLconst [c]))
+ // result: (ROLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLQconst [c] (ROLQconst [d] x))
+ // result: (ROLQconst [(c+d)&63] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ROLQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt((c + d) & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLQconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLW x (NEGQ y))
+ // result: (RORW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLW x (NEGL y))
+ // result: (RORW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLW x (MOVQconst [c]))
+ // result: (ROLWconst [int8(c&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 15))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLW x (MOVLconst [c]))
+ // result: (ROLWconst [int8(c&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 15))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLWconst [c] (ROLWconst [d] x))
+ // result: (ROLWconst [(c+d)&15] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ROLWconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt((c + d) & 15)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORB x (NEGQ y))
+ // result: (ROLB x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORB x (NEGL y))
+ // result: (ROLB x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORB x (MOVQconst [c]))
+ // result: (ROLBconst [int8((-c)&7) ] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 7))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RORB x (MOVLconst [c]))
+ // result: (ROLBconst [int8((-c)&7) ] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 7))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORL x (NEGQ y))
+ // result: (ROLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORL x (NEGL y))
+ // result: (ROLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORL x (MOVQconst [c]))
+ // result: (ROLLconst [int8((-c)&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RORL x (MOVLconst [c]))
+ // result: (ROLLconst [int8((-c)&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORQ x (NEGQ y))
+ // result: (ROLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORQ x (NEGL y))
+ // result: (ROLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORQ x (MOVQconst [c]))
+ // result: (ROLQconst [int8((-c)&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RORQ x (MOVLconst [c]))
+ // result: (ROLQconst [int8((-c)&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORW x (NEGQ y))
+ // result: (ROLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORW x (NEGL y))
+ // result: (ROLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORW x (MOVQconst [c]))
+ // result: (ROLWconst [int8((-c)&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 15))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RORW x (MOVLconst [c]))
+ // result: (ROLWconst [int8((-c)&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 15))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARB x (MOVQconst [c]))
+ // result: (SARBconst [int8(min(int64(c)&31,7))] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARBconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARB x (MOVLconst [c]))
+ // result: (SARBconst [int8(min(int64(c)&31,7))] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARBconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARBconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(int8(d))>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SARL x (MOVQconst [c]))
+ // result: (SARLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARL x (MOVLconst [c]))
+ // result: (SARLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARL x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SARL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARL x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SARL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARL x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SARL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARL x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SARL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARLconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(int32(d))>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SARQ x (MOVQconst [c]))
+ // result: (SARQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARQ x (MOVLconst [c]))
+ // result: (SARQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARQ x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SARQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARQ x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SARQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARQ x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SARQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARQ x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SARQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARQconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARW x (MOVQconst [c]))
+ // result: (SARWconst [int8(min(int64(c)&31,15))] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARWconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARW x (MOVLconst [c]))
+ // result: (SARWconst [int8(min(int64(c)&31,15))] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARWconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARWconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(int16(d))>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SBBLcarrymask (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_ULT))
+ // result: (MOVLconst [-1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_ULT))
+ // result: (MOVLconst [-1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBBQ x (MOVQconst [c]) borrow)
+ // cond: is32Bit(c)
+ // result: (SBBQconst x [int32(c)] borrow)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ borrow := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64SBBQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, borrow)
+ return true
+ }
+ // match: (SBBQ x y (FlagEQ))
+ // result: (SUBQborrow x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64SUBQborrow)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SBBQcarrymask (FlagEQ))
+ // result: (MOVQconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SBBQcarrymask (FlagLT_ULT))
+ // result: (MOVQconst [-1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBQcarrymask (FlagLT_UGT))
+ // result: (MOVQconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SBBQcarrymask (FlagGT_ULT))
+ // result: (MOVQconst [-1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBQcarrymask (FlagGT_UGT))
+ // result: (MOVQconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBBQconst x [c] (FlagEQ))
+ // result: (SUBQconstborrow x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64SUBQconstborrow)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETA (InvertFlags x))
+ // result: (SETB x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETB)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETA (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETA (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETAE (TESTQ x x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (SETAE (TESTL x x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (SETAE (TESTW x x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAMD64TESTW {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (SETAE (TESTB x x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAMD64TESTB {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (SETAE (InvertFlags x))
+ // result: (SETBE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETBE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETAE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETAE (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETAE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETAE (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETAE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETBEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETBEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETAEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETAstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETAstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETAstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETB (TESTQ x x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (SETB (TESTL x x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (SETB (TESTW x x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpAMD64TESTW {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (SETB (TESTB x x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpAMD64TESTB {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (SETB (BTLconst [0] x))
+ // result: (ANDLconst [1] x)
+ for {
+ if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETB (BTQconst [0] x))
+ // result: (ANDQconst [1] x)
+ for {
+ if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETB (InvertFlags x))
+ // result: (SETA x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETA)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETB (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETB (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETB (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETB (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETB (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETBE (InvertFlags x))
+ // result: (SETAE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETAE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETBE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETBE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETAEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETBEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETBEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETBEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETAstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETAstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETBstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y))
+ // result: (SETAE (BTL x y))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // result: (SETAE (BTQ x y))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (SETAE (BTLconst [int8(log32(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (SETAE (BTQconst [int8(log32(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTQconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c)
+ // result: (SETAE (BTQconst [int8(log64(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _)))
+ // result: (SETNE (CMPLconst [0] s))
+ for {
+ if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ s := v_0.Args[0]
+ if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _)))
+ // result: (SETNE (CMPQconst [0] s))
+ for {
+ if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ s := v_0.Args[0]
+ if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTQconst [63] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTQconst [31] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTQconst [0] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTLconst [0] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTQconst [63] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTLconst [31] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (InvertFlags x))
+ // result: (SETEQ x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETEQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETEQ (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETEQ (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTL x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem)
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ x := v_1.Args[0]
+ mem := v_2
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem)
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ x := v_1.Args[0]
+ mem := v_2
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
+ // cond: isUint64PowerOfTwo(c)
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ mem := v_2
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
+ // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
+ // result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETEQstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETEQstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETG (InvertFlags x))
+ // result: (SETL x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETG (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETG (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETGE (InvertFlags x))
+ // result: (SETLE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETLE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETGE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETGE (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETGE (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETGE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETGE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETLEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETLEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETGEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETGEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETGEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETLstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETGstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETGstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETGstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETL (InvertFlags x))
+ // result: (SETG x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETL (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETL (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETL (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETL (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETL (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETLE (InvertFlags x))
+ // result: (SETGE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETGE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETLE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETLE (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETGEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETGEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETLEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETLEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETLEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETGstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETGstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETLstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SETNE (TESTBconst [1] x))
+ // result: (ANDLconst [1] x)
+ for {
+ if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETNE (TESTWconst [1] x))
+ // result: (ANDLconst [1] x)
+ for {
+ if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y))
+ // result: (SETB (BTL x y))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // result: (SETB (BTQ x y))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (SETB (BTLconst [int8(log32(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (SETB (BTQconst [int8(log32(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTQconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c)
+ // result: (SETB (BTQconst [int8(log64(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _)))
+ // result: (SETEQ (CMPLconst [0] s))
+ for {
+ if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ s := v_0.Args[0]
+ if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _)))
+ // result: (SETEQ (CMPQconst [0] s))
+ for {
+ if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ s := v_0.Args[0]
+ if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (SETB (BTQconst [63] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (SETB (BTQconst [31] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (SETB (BTQconst [0] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (SETB (BTLconst [0] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2
+ // result: (SETB (BTQconst [63] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2
+ // result: (SETB (BTLconst [31] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (InvertFlags x))
+ // result: (SETNE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETNE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETNE (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETNE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTL x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQ x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem)
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ x := v_1.Args[0]
+ mem := v_2
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem)
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ x := v_1.Args[0]
+ mem := v_2
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
+ // cond: isUint64PowerOfTwo(c)
+ // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ mem := v_2
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
+ // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
+ // result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETNEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETNEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHLL x (MOVQconst [c]))
+ // result: (SHLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLL x (MOVLconst [c]))
+ // result: (SHLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLL x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHLL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLL x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHLL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLL x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHLL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLL x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHLL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHLLconst [1] (SHRLconst [1] x))
+ // result: (BTRLconst [0] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = int8ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SHLLconst [d] (MOVLconst [c]))
+ // result: (MOVLconst [c << uint64(d)])
+ for {
+ d := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHLQ x (MOVQconst [c]))
+ // result: (SHLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLQ x (MOVLconst [c]))
+ // result: (SHLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHLQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHLQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLQ x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHLQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLQ x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHLQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHLQconst [1] (SHRQconst [1] x))
+ // result: (BTRQconst [0] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLQconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SHLQconst [d] (MOVQconst [c]))
+ // result: (MOVQconst [c << uint64(d)])
+ for {
+ d := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (SHLQconst [d] (MOVLconst [c]))
+ // result: (MOVQconst [int64(c) << uint64(d)])
+ for {
+ d := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRB x (MOVQconst [c]))
+ // cond: c&31 < 8
+ // result: (SHRBconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&31 < 8) {
+ break
+ }
+ v.reset(OpAMD64SHRBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRB x (MOVLconst [c]))
+ // cond: c&31 < 8
+ // result: (SHRBconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 < 8) {
+ break
+ }
+ v.reset(OpAMD64SHRBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRB _ (MOVQconst [c]))
+ // cond: c&31 >= 8
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&31 >= 8) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SHRB _ (MOVLconst [c]))
+ // cond: c&31 >= 8
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 >= 8) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHRL x (MOVQconst [c]))
+ // result: (SHRLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRL x (MOVLconst [c]))
+ // result: (SHRLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRL x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHRL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRL x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHRL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRL x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHRL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRL x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHRL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRLconst [1] (SHLLconst [1] x))
+ // result: (BTRLconst [31] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = int8ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHRQ x (MOVQconst [c]))
+ // result: (SHRQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRQ x (MOVLconst [c]))
+ // result: (SHRQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHRQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHRQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRQ x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHRQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRQ x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHRQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRQconst [1] (SHLQconst [1] x))
+ // result: (BTRQconst [63] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRQconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRW x (MOVQconst [c]))
+ // cond: c&31 < 16
+ // result: (SHRWconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&31 < 16) {
+ break
+ }
+ v.reset(OpAMD64SHRWconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRW x (MOVLconst [c]))
+ // cond: c&31 < 16
+ // result: (SHRWconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 < 16) {
+ break
+ }
+ v.reset(OpAMD64SHRWconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRW _ (MOVQconst [c]))
+ // cond: c&31 >= 16
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&31 >= 16) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SHRW _ (MOVLconst [c]))
+ // cond: c&31 >= 16
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 >= 16) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBL x (MOVLconst [c]))
+ // result: (SUBLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SUBLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBL (MOVLconst [c]) x)
+ // result: (NEGL (SUBLconst <v.Type> x [c]))
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64NEGL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBL x x)
+ // result: (MOVLconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBLload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SUBLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBLconst [c] x)
+ // result: (ADDLconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ v.reset(OpAMD64ADDLconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: (SUBL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64SUBL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBQconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64SUBQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (NEGQ (SUBQconst <v.Type> x [int32(c)]))
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBQ x x)
+ // result: (MOVQconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBQload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SUBQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBQborrow x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBQconstborrow x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64SUBQconstborrow)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBQconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBQconst [c] x)
+ // cond: c != -(1<<31)
+ // result: (ADDQconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBQconst (MOVQconst [d]) [c])
+ // result: (MOVQconst [d-int64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d - int64(c))
+ return true
+ }
+ // match: (SUBQconst (SUBQconst x [d]) [c])
+ // cond: is32Bit(int64(-c)-int64(d))
+ // result: (ADDQconst [-c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SUBQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(-c) - int64(d))) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: (SUBQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64SUBQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBSDload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSDload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SUBSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
+ // result: (SUBSD x (MOVQi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64SUBSD)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBSSload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSSload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SUBSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
+ // result: (SUBSS x (MOVLi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64SUBSS)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TESTB (MOVLconst [c]) x)
+ // result: (TESTBconst [int8(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64TESTBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)
+ // result: @l.Block (CMPBconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpAMD64MOVBload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ l2 := v_1
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) {
+ continue
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off)))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TESTBconst [-1] x)
+ // cond: x.Op != OpAMD64MOVLconst
+ // result: (TESTB x x)
+ for {
+ if auxIntToInt8(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ if !(x.Op != OpAMD64MOVLconst) {
+ break
+ }
+ v.reset(OpAMD64TESTB)
+ v.AddArg2(x, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TESTL (MOVLconst [c]) x)
+ // result: (TESTLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64TESTLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)
+ // result: @l.Block (CMPLconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ l2 := v_1
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) {
+ continue
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off)))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TESTLconst [c] (MOVLconst [c]))
+ // cond: c == 0
+ // result: (FlagEQ)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (TESTLconst [c] (MOVLconst [c]))
+ // cond: c < 0
+ // result: (FlagLT_UGT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (TESTLconst [c] (MOVLconst [c]))
+ // cond: c > 0
+ // result: (FlagGT_UGT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (TESTLconst [-1] x)
+ // cond: x.Op != OpAMD64MOVLconst
+ // result: (TESTL x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ if !(x.Op != OpAMD64MOVLconst) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ v.AddArg2(x, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TESTQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (TESTQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64TESTQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)
+ // result: @l.Block (CMPQconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ l2 := v_1
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) {
+ continue
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off)))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TESTQconst [c] (MOVQconst [d]))
+ // cond: int64(c) == d && c == 0
+ // result: (FlagEQ)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(int64(c) == d && c == 0) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (TESTQconst [c] (MOVQconst [d]))
+ // cond: int64(c) == d && c < 0
+ // result: (FlagLT_UGT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(int64(c) == d && c < 0) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (TESTQconst [c] (MOVQconst [d]))
+ // cond: int64(c) == d && c > 0
+ // result: (FlagGT_UGT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(int64(c) == d && c > 0) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (TESTQconst [-1] x)
+ // cond: x.Op != OpAMD64MOVQconst
+ // result: (TESTQ x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ if !(x.Op != OpAMD64MOVQconst) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ v.AddArg2(x, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TESTW (MOVLconst [c]) x)
+ // result: (TESTWconst [int16(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64TESTWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)
+ // result: @l.Block (CMPWconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpAMD64MOVWload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ l2 := v_1
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) {
+ continue
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off)))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TESTWconst [-1] x)
+ // cond: x.Op != OpAMD64MOVLconst
+ // result: (TESTW x x)
+ for {
+ if auxIntToInt16(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ if !(x.Op != OpAMD64MOVLconst) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ v.AddArg2(x, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XADDLlock [off1+off2] {sym} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XADDLlock)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XADDQlock [off1+off2] {sym} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XADDQlock)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XCHGL [off1+off2] {sym} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XCHGL)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
+ // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64XCHGL)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XCHGQ [off1+off2] {sym} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XCHGQ)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
+ // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64XCHGQ)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORL (SHLL (MOVLconst [1]) y) x)
+ // result: (BTCL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTCL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XORL (MOVLconst [c]) x)
+ // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
+ // result: (BTCLconst [int8(log32(c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
+ continue
+ }
+ v.reset(OpAMD64BTCLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL x (MOVLconst [c]))
+ // result: (XORLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL (SHLLconst x [c]) (SHRLconst x [d]))
+ // cond: d==32-c
+ // result: (ROLLconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRLconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
+ // cond: d==16-c && c < 16 && t.Size() == 2
+ // result: (ROLWconst x [c])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRWconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
+ // cond: d==8-c && c < 8 && t.Size() == 1
+ // result: (ROLBconst x [c])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRBconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL x x)
+ // result: (MOVLconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (XORL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (XORLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64XORLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORLconst [c] x)
+ // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
+ // result: (BTCLconst [int8(log32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
+ break
+ }
+ v.reset(OpAMD64BTCLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETNE x))
+ // result: (SETEQ x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETEQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETEQ x))
+ // result: (SETNE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETNE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETL x))
+ // result: (SETGE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETGE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETGE x))
+ // result: (SETL x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETLE x))
+ // result: (SETG x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETG x))
+ // result: (SETLE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETLE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETB x))
+ // result: (SETAE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETAE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETAE x))
+ // result: (SETB x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETB)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETBE x))
+ // result: (SETA x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETA)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETA x))
+ // result: (SETBE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETBE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] (XORLconst [d] x))
+ // result: (XORLconst [c ^ d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64XORLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] (BTCLconst [d] x))
+ // result: (XORLconst [c ^ 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64BTCLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: (XORL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64XORL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) <t> x) mem)
+ // result: (BTCLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ s := v_1
+ if s.Op != OpAMD64SHLL {
+ break
+ }
+ t := s.Type
+ x := s.Args[1]
+ s_0 := s.Args[0]
+ if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64BTCLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t)
+ v0.AuxInt = int32ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORQ (SHLQ (MOVQconst [1]) y) x)
+ // result: (BTCQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTCQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XORQ (MOVQconst [c]) x)
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
+ // result: (BTCQconst [int8(log64(c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
+ continue
+ }
+ v.reset(OpAMD64BTCQconst)
+ v.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (XORQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d]))
+ // cond: d==64-c
+ // result: (ROLQconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRQconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 64-c) {
+ continue
+ }
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORQ x x)
+ // result: (MOVQconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XORQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (XORQload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64XORQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORQconst [c] x)
+ // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
+ // result: (BTCQconst [int8(log32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
+ break
+ }
+ v.reset(OpAMD64BTCQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQconst [c] (XORQconst [d] x))
+ // result: (XORQconst [c ^ d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64XORQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQconst [c] (BTCQconst [d] x))
+ // cond: is32Bit(int64(c) ^ 1<<uint32(d))
+ // result: (XORQconst [c ^ 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64BTCQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) ^ 1<<uint32(d))) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64XORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XORQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: (XORQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64XORQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) <t> x) mem)
+ // result: (BTCQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ s := v_1
+ if s.Op != OpAMD64SHLQ {
+ break
+ }
+ t := s.Type
+ x := s.Args[1]
+ s_0 := s.Args[0]
+ if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64BTCQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t)
+ v0.AuxInt = int32ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XORQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (LEAQ {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpAMD64LEAQ)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAdd32 ptr val mem)
+ // result: (AddTupleFirst32 val (XADDLlock val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64AddTupleFirst32)
+ v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg2(val, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAdd64 ptr val mem)
+ // result: (AddTupleFirst64 val (XADDQlock val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64AddTupleFirst64)
+ v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg2(val, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicAnd32 ptr val mem)
+ // result: (ANDLlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ANDLlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicAnd8 ptr val mem)
+ // result: (ANDBlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ANDBlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap32 ptr old new_ mem)
+ // result: (CMPXCHGLlock ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPXCHGLlock)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap64 ptr old new_ mem)
+ // result: (CMPXCHGQlock ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPXCHGQlock)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicExchange32 ptr val mem)
+ // result: (XCHGL val ptr mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64XCHGL)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicExchange64 ptr val mem)
+ // result: (XCHGQ val ptr mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64XCHGQ)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad32 ptr mem)
+ // result: (MOVLatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVLatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad64 ptr mem)
+ // result: (MOVQatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVQatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad8 ptr mem)
+ // result: (MOVBatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVBatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadPtr ptr mem)
+ // result: (MOVQatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVQatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicOr32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicOr32 ptr val mem)
+ // result: (ORLlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ORLlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicOr8 ptr val mem)
+ // result: (ORBlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ORBlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicStore32 ptr val mem)
+ // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicStore64 ptr val mem)
+ // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicStore8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicStore8 ptr val mem)
+ // result: (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicStorePtrNoWB ptr val mem)
+ // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBitLen16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen16 x)
+ // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
+ for {
+ x := v_0
+ v.reset(OpAMD64BSRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 x)
+ // result: (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
+ for {
+ x := v_0
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
+ v2.AddArg(x)
+ v1.AddArg2(v2, v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 <t> x)
+ // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
+ v1 := b.NewValue0(v.Pos, OpSelect0, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
+ v3.AuxInt = int64ToAuxInt(-1)
+ v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4.AddArg(v2)
+ v0.AddArg3(v1, v3, v4)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBitLen8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen8 x)
+ // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
+ for {
+ x := v_0
+ v.reset(OpAMD64BSRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCeil(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Ceil x)
+ // result: (ROUNDSD [2] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64ROUNDSD)
+ v.AuxInt = int8ToAuxInt(2)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCondSelect(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CondSelect <t> x y (SETEQ cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQEQ y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQ {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQEQ)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQNE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQNE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETL cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQLT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETL {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETG cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETG {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETLE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQLE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETLE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETA cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQHI y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETA {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQHI)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETB cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQCS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETB {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQCS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETAE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQCC y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETAE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQCC)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETBE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQLS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETBE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQEQF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQEQF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNEF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQNEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQNEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGTF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGTF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGEF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQ cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLEQ y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQ {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLEQ)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLNE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLNE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETL cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLLT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETL {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETG cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETG {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETLE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLLE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETLE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETA cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLHI y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETA {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLHI)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETB cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLCS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETB {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLCS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETAE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLCC y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETAE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLCC)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETBE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLLS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETBE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLEQF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLEQF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNEF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLNEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLNEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGTF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGTF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGEF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQ cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWEQ y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQ {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWEQ)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWNE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWNE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETL cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWLT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETL {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWLT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETG cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETG {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETLE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWLE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETLE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWLE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETA cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWHI y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETA {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWHI)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETB cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWCS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETB {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWCS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETAE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWCC y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETAE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWCC)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETBE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWLS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETBE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWLS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWEQF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWEQF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNEF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWNEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWNEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGTF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGTF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGEF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 1
+ // result: (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
+ break
+ }
+ v.reset(OpCondSelect)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
+ v0.AddArg(check)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 2
+ // result: (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
+ break
+ }
+ v.reset(OpCondSelect)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
+ v0.AddArg(check)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 4
+ // result: (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
+ break
+ }
+ v.reset(OpCondSelect)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
+ v0.AddArg(check)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQNE y x (CMPQconst [0] check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
+ break
+ }
+ v.reset(OpAMD64CMOVQNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(check)
+ v.AddArg3(y, x, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
+ // result: (CMOVLNE y x (CMPQconst [0] check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(check)
+ v.AddArg3(y, x, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
+ // result: (CMOVWNE y x (CMPQconst [0] check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(check)
+ v.AddArg3(y, x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpConst16(v *Value) bool {
+ // match: (Const16 [c])
+ // result: (MOVLconst [int32(c)])
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst8(v *Value) bool {
+ // match: (Const8 [c])
+ // result: (MOVLconst [int32(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+}
+func rewriteValueAMD64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [c])
+ // result: (MOVLconst [b2i32(c)])
+ for {
+ c := auxIntToBool(v.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(b2i32(c))
+ return true
+ }
+}
+func rewriteValueAMD64_OpConstNil(v *Value) bool {
+ // match: (ConstNil )
+ // result: (MOVQconst [0])
+ for {
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 x)
+ // result: (BSFL (BTSLconst <typ.UInt32> [16] x))
+ for {
+ x := v_0
+ v.reset(OpAMD64BSFL)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
+ v0.AuxInt = int8ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 x)
+ // result: (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
+ for {
+ x := v_0
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
+ v1.AuxInt = int8ToAuxInt(32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64 <t> x)
+ // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpAMD64CMOVQEQ)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64NonZero x)
+ // result: (Select0 (BSFQ x))
+ for {
+ x := v_0
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 x)
+ // result: (BSFL (BTSLconst <typ.UInt32> [ 8] x))
+ for {
+ x := v_0
+ v.reset(OpAMD64BSFL)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
+ v0.AuxInt = int8ToAuxInt(8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 [a] x y)
+ // result: (Select0 (DIVW [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (Select0 (DIVWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 [a] x y)
+ // result: (Select0 (DIVL [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (Select0 (DIVLU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div64 [a] x y)
+ // result: (Select0 (DIVQ [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div64u x y)
+ // result: (Select0 (DIVQU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq16 x y)
+ // result: (SETEQ (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (SETEQF (UCOMISS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64 x y)
+ // result: (SETEQ (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (SETEQF (UCOMISD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq8 x y)
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqB x y)
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (SETEQ (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpFMA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMA x y z)
+ // result: (VFMADD231SD z x y)
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ v.reset(OpAMD64VFMADD231SD)
+ v.AddArg3(z, x, y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpFloor(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Floor x)
+ // result: (ROUNDSD [1] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64ROUNDSD)
+ v.AuxInt = int8ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (HasCPUFeature {s})
+ // result: (SETNE (CMPQconst [0] (LoweredHasCPUFeature {s})))
+ for {
+ s := auxToSym(v.Aux)
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64)
+ v1.Aux = symToAux(s)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (SETB (CMPQ idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil p)
+ // result: (SETNE (TESTQ p p))
+ for {
+ p := v_0
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
+ v0.AddArg2(p, p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (SETBE (CMPQ idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16 x y)
+ // result: (SETLE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16U x y)
+ // result: (SETBE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (SETLE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (SETGEF (UCOMISS y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32U x y)
+ // result: (SETBE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64 x y)
+ // result: (SETLE (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (SETGEF (UCOMISD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64U x y)
+ // result: (SETBE (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8 x y)
+ // result: (SETLE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8U x y)
+ // result: (SETBE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16 x y)
+ // result: (SETL (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16U x y)
+ // result: (SETB (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (SETL (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (SETGF (UCOMISS y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32U x y)
+ // result: (SETB (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64 x y)
+ // result: (SETL (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (SETGF (UCOMISD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64U x y)
+ // result: (SETB (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8 x y)
+ // result: (SETL (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8U x y)
+ // result: (SETB (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVQload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t)
+ // result: (MOVLload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVSSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVSDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (LEAQ {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpAMD64LEAQ)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 [a] x y)
+ // result: (Select1 (DIVW [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Select1 (DIVWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 [a] x y)
+ // result: (Select1 (DIVL [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (Select1 (DIVLU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64 [a] x y)
+ // result: (Select1 (DIVQ [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64u x y)
+ // result: (Select1 (DIVQU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVLstore dst (MOVLload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVQstore dst (MOVQload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVQstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // cond: config.useSSE
+ // result: (MOVOstore dst (MOVOload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // cond: !config.useSSE
+ // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(!config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [32] dst src mem)
+ // result: (Move [16] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(16)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [48] dst src mem)
+ // cond: config.useSSE
+ // result: (Move [32] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 48 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(16)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [64] dst src mem)
+ // cond: config.useSSE
+ // result: (Move [32] (OffPtr <dst.Type> dst [32]) (OffPtr <src.Type> src [32]) (Move [32] dst src mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 64 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(32)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(32)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(32)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [9] dst src mem)
+ // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 9 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [10] dst src mem)
+ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 10 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] dst src mem)
+ // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s == 11 || s >= 13 && s <= 15
+ // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s == 11 || s >= 13 && s <= 15) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(int32(s - 8))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(int32(s - 8))
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 16 && s%16 != 0 && s%16 <= 8
+ // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && s%16 != 0 && s%16 <= 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE
+ // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE
+ // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(8)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg2(src, mem)
+ v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v5.AddArg2(src, mem)
+ v4.AddArg3(dst, v5, mem)
+ v2.AddArg3(dst, v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [s] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpAMD64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)
+ // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !((s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpAMD64REPMOVSQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(s / 8)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpNeg32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg32F x)
+ // result: (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
+ for {
+ x := v_0
+ v.reset(OpAMD64PXOR)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
+ v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg64F x)
+ // result: (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
+ for {
+ x := v_0
+ v.reset(OpAMD64PXOR)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
+ v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq16 x y)
+ // result: (SETNE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (SETNEF (UCOMISS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNEF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64 x y)
+ // result: (SETNE (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (SETNEF (UCOMISD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNEF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq8 x y)
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqB x y)
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (SETNE (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORLconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OffPtr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (ADDQconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDQ (MOVQconst [off]) ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpAMD64ADDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(off)
+ v.AddArg2(v0, ptr)
+ return true
+ }
+}
+func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpAMD64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpAMD64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpAMD64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 x)
+ // result: (POPCNTL (MOVWQZX <typ.UInt32> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64POPCNTL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpPopCount8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount8 x)
+ // result: (POPCNTL (MOVBQZX <typ.UInt32> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64POPCNTL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RoundToEven x)
+ // result: (ROUNDSD [0] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64ROUNDSD)
+ v.AuxInt = int8ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Mul64uover x y))
+ // result: (Select0 <typ.UInt64> (MULQU x y))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Mul32uover x y))
+ // result: (Select0 <typ.UInt32> (MULLU x y))
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Add64carry x y c))
+ // result: (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Sub64borrow x y c))
+ // result: (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 <t> (AddTupleFirst32 val tuple))
+ // result: (ADDL val (Select0 <t> tuple))
+ for {
+ t := v.Type
+ if v_0.Op != OpAMD64AddTupleFirst32 {
+ break
+ }
+ tuple := v_0.Args[1]
+ val := v_0.Args[0]
+ v.reset(OpAMD64ADDL)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v0.AddArg(tuple)
+ v.AddArg2(val, v0)
+ return true
+ }
+ // match: (Select0 <t> (AddTupleFirst64 val tuple))
+ // result: (ADDQ val (Select0 <t> tuple))
+ for {
+ t := v.Type
+ if v_0.Op != OpAMD64AddTupleFirst64 {
+ break
+ }
+ tuple := v_0.Args[1]
+ val := v_0.Args[0]
+ v.reset(OpAMD64ADDQ)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v0.AddArg(tuple)
+ v.AddArg2(val, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Mul64uover x y))
+ // result: (SETO (Select1 <types.TypeFlags> (MULQU x y)))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETO)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Mul32uover x y))
+ // result: (SETO (Select1 <types.TypeFlags> (MULLU x y)))
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETO)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Add64carry x y c))
+ // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpAMD64NEGQ)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v4.AddArg(c)
+ v3.AddArg(v4)
+ v2.AddArg3(x, y, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Sub64borrow x y c))
+ // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpAMD64NEGQ)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v4.AddArg(c)
+ v3.AddArg(v4)
+ v2.AddArg3(x, y, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (NEGLflags (MOVQconst [0])))
+ // result: (FlagEQ)
+ for {
+ if v_0.Op != OpAMD64NEGLflags {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x))))
+ // result: x
+ for {
+ if v_0.Op != OpAMD64NEGLflags {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64NEGQ {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64SBBQcarrymask {
+ break
+ }
+ x := v_0_0_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Select1 (AddTupleFirst32 _ tuple))
+ // result: (Select1 tuple)
+ for {
+ if v_0.Op != OpAMD64AddTupleFirst32 {
+ break
+ }
+ tuple := v_0.Args[1]
+ v.reset(OpSelect1)
+ v.AddArg(tuple)
+ return true
+ }
+ // match: (Select1 (AddTupleFirst64 _ tuple))
+ // result: (Select1 tuple)
+ for {
+ if v_0.Op != OpAMD64AddTupleFirst64 {
+ break
+ }
+ tuple := v_0.Args[1]
+ v.reset(OpSelect1)
+ v.AddArg(tuple)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SARQconst (NEGQ <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpSpectreIndex(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SpectreIndex <t> x y)
+ // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64CMOVQCC)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v1.AddArg2(x, y)
+ v.AddArg3(x, v0, v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SpectreSliceIndex <t> x y)
+ // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64CMOVQHI)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v1.AddArg2(x, y)
+ v.AddArg3(x, v0, v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (MOVSDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (MOVSSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8
+ // result: (MOVQstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4
+ // result: (MOVLstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpTrunc(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc x)
+ // result: (ROUNDSD [3] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64ROUNDSD)
+ v.AuxInt = int8ToAuxInt(3)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff32(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [8] destptr mem)
+ // result: (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff32(0,2)] destptr (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 2))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (MOVWstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff32(0,3)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 3))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%8 != 0 && s > 8 && !config.useSSE
+ // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%8 != 0 && s > 8 && !config.useSSE) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(s % 8)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [16] destptr mem)
+ // cond: !config.useSSE
+ // result: (MOVQstoreconst [makeValAndOff32(0,8)] destptr (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(!config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 8))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [24] destptr mem)
+ // cond: !config.useSSE
+ // result: (MOVQstoreconst [makeValAndOff32(0,16)] destptr (MOVQstoreconst [makeValAndOff32(0,8)] destptr (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(!config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 16))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 8))
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [32] destptr mem)
+ // cond: !config.useSSE
+ // result: (MOVQstoreconst [makeValAndOff32(0,24)] destptr (MOVQstoreconst [makeValAndOff32(0,16)] destptr (MOVQstoreconst [makeValAndOff32(0,8)] destptr (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(!config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 24))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 16))
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 8))
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v2.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v2.AddArg2(destptr, mem)
+ v1.AddArg2(destptr, v2)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 8 && s < 16 && config.useSSE
+ // result: (MOVQstoreconst [makeValAndOff32(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 8 && s < 16 && config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, int32(s-8)))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE
+ // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
+ v2.AuxInt = int128ToAuxInt(0)
+ v1.AddArg3(destptr, v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE
+ // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [16] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVOstore destptr (MOVOconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
+ v0.AuxInt = int128ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [32] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(16)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
+ v1.AuxInt = int128ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
+ v2.AddArg3(destptr, v1, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Zero [48] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 48 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(32)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
+ v1.AuxInt = int128ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v3.AuxInt = int64ToAuxInt(16)
+ v3.AddArg(destptr)
+ v4 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
+ v4.AddArg3(destptr, v1, mem)
+ v2.AddArg3(v3, v1, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Zero [64] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 64 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(48)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
+ v1.AuxInt = int128ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v3.AuxInt = int64ToAuxInt(32)
+ v3.AddArg(destptr)
+ v4 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
+ v5 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v5.AuxInt = int64ToAuxInt(16)
+ v5.AddArg(destptr)
+ v6 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
+ v6.AddArg3(destptr, v1, mem)
+ v4.AddArg3(v5, v1, v6)
+ v2.AddArg3(v3, v1, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpAMD64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(s)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
+ v0.AuxInt = int128ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0
+ // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) {
+ break
+ }
+ v.reset(OpAMD64REPSTOSQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(s / 8)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg4(destptr, v0, v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockAMD64(b *Block) bool {
+ switch b.Kind {
+ case BlockAMD64EQ:
+ // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y))
+ // result: (UGE (BTL x y))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // result: (UGE (BTQ x y))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (UGE (BTLconst [int8(log32(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTLconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ // match: (EQ (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (UGE (BTQconst [int8(log32(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTQconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ // match: (EQ (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c)
+ // result: (UGE (BTQconst [int8(log64(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (UGE (BTQconst [63] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (UGE (BTQconst [31] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (UGE (BTQconst [0] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (UGE (BTLconst [0] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2
+ // result: (UGE (BTQconst [63] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2
+ // result: (UGE (BTLconst [31] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64EQ, cmp)
+ return true
+ }
+ // match: (EQ (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockAMD64GE:
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64LE, cmp)
+ return true
+ }
+ // match: (GE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockAMD64GT:
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64LT, cmp)
+ return true
+ }
+ // match: (GT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockIf:
+ // match: (If (SETL cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETL {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64LT, cmp)
+ return true
+ }
+ // match: (If (SETLE cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETLE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64LE, cmp)
+ return true
+ }
+ // match: (If (SETG cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETG {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64GT, cmp)
+ return true
+ }
+ // match: (If (SETGE cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETGE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64GE, cmp)
+ return true
+ }
+ // match: (If (SETEQ cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETEQ {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64EQ, cmp)
+ return true
+ }
+ // match: (If (SETNE cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETNE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64NE, cmp)
+ return true
+ }
+ // match: (If (SETB cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETB {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64ULT, cmp)
+ return true
+ }
+ // match: (If (SETBE cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETBE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64ULE, cmp)
+ return true
+ }
+ // match: (If (SETA cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETA {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (If (SETAE cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETAE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (If (SETO cmp) yes no)
+ // result: (OS cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETO {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64OS, cmp)
+ return true
+ }
+ // match: (If (SETGF cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETGF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (If (SETGEF cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETGEF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (If (SETEQF cmp) yes no)
+ // result: (EQF cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETEQF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64EQF, cmp)
+ return true
+ }
+ // match: (If (SETNEF cmp) yes no)
+ // result: (NEF cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETNEF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64NEF, cmp)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (NE (TESTB cond cond) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags)
+ v0.AddArg2(cond, cond)
+ b.resetWithControl(BlockAMD64NE, v0)
+ return true
+ }
+ case BlockAMD64LE:
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64GE, cmp)
+ return true
+ }
+ // match: (LE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockAMD64LT:
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64GT, cmp)
+ return true
+ }
+ // match: (LT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockAMD64NE:
+ // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETL {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64LT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETLE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64LE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETG {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64GT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETGE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64GE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETEQ {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64EQ, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETNE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64NE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETB {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64ULT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETBE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64ULE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETA {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETAE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
+ // result: (OS cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETO {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64OS, cmp)
+ return true
+ }
+ // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y))
+ // result: (ULT (BTL x y))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // result: (ULT (BTQ x y))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (ULT (BTLconst [int8(log32(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTLconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ // match: (NE (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (ULT (BTQconst [int8(log32(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTQconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ // match: (NE (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c)
+ // result: (ULT (BTQconst [int8(log64(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (ULT (BTQconst [63] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (ULT (BTQconst [31] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (ULT (BTQconst [0] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (ULT (BTLconst [0] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2
+ // result: (ULT (BTQconst [63] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2
+ // result: (ULT (BTLconst [31] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETGF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETGEF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
+ // result: (EQF cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETEQF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64EQF, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
+ // result: (NEF cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETNEF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64NEF, cmp)
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64NE, cmp)
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockAMD64UGE:
+ // match: (UGE (TESTQ x x) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (TESTL x x) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (TESTW x x) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64TESTW {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (TESTB x x) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (InvertFlags cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64ULE, cmp)
+ return true
+ }
+ // match: (UGE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockAMD64UGT:
+ // match: (UGT (InvertFlags cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64ULT, cmp)
+ return true
+ }
+ // match: (UGT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGT (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockAMD64ULE:
+ // match: (ULE (InvertFlags cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (ULE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockAMD64ULT:
+ // match: (ULT (TESTQ x x) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (TESTL x x) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (TESTW x x) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64TESTW {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (TESTB x x) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (InvertFlags cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (ULT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go
new file mode 100644
index 0000000..65bfec0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go
@@ -0,0 +1,853 @@
+// Code generated from gen/AMD64splitload.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+func rewriteValueAMD64splitload(v *Value) bool {
+ switch v.Op {
+ case OpAMD64CMPBconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPBconstload(v)
+ case OpAMD64CMPBconstloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v)
+ case OpAMD64CMPBload:
+ return rewriteValueAMD64splitload_OpAMD64CMPBload(v)
+ case OpAMD64CMPBloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPBloadidx1(v)
+ case OpAMD64CMPLconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPLconstload(v)
+ case OpAMD64CMPLconstloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v)
+ case OpAMD64CMPLconstloadidx4:
+ return rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v)
+ case OpAMD64CMPLload:
+ return rewriteValueAMD64splitload_OpAMD64CMPLload(v)
+ case OpAMD64CMPLloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPLloadidx1(v)
+ case OpAMD64CMPLloadidx4:
+ return rewriteValueAMD64splitload_OpAMD64CMPLloadidx4(v)
+ case OpAMD64CMPQconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPQconstload(v)
+ case OpAMD64CMPQconstloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v)
+ case OpAMD64CMPQconstloadidx8:
+ return rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v)
+ case OpAMD64CMPQload:
+ return rewriteValueAMD64splitload_OpAMD64CMPQload(v)
+ case OpAMD64CMPQloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPQloadidx1(v)
+ case OpAMD64CMPQloadidx8:
+ return rewriteValueAMD64splitload_OpAMD64CMPQloadidx8(v)
+ case OpAMD64CMPWconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPWconstload(v)
+ case OpAMD64CMPWconstloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v)
+ case OpAMD64CMPWconstloadidx2:
+ return rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v)
+ case OpAMD64CMPWload:
+ return rewriteValueAMD64splitload_OpAMD64CMPWload(v)
+ case OpAMD64CMPWloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPWloadidx1(v)
+ case OpAMD64CMPWloadidx2:
+ return rewriteValueAMD64splitload_OpAMD64CMPWloadidx2(v)
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() == 0
+ // result: (TESTB x:(MOVBload {sym} [vo.Off32()] ptr mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTB)
+ x := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ x.AuxInt = int32ToAuxInt(vo.Off32())
+ x.Aux = symToAux(sym)
+ x.AddArg2(ptr, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPBconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() != 0
+ // result: (CMPBconst (MOVBload {sym} [vo.Off32()] ptr mem) [vo.Val8()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPBconst)
+ v.AuxInt = int8ToAuxInt(vo.Val8())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTB x:(MOVBloadidx1 {sym} [vo.Off32()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTB)
+ x := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8)
+ x.AuxInt = int32ToAuxInt(vo.Off32())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPBconst (MOVBloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val8()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPBconst)
+ v.AuxInt = int8ToAuxInt(vo.Val8())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBload {sym} [off] ptr x mem)
+ // result: (CMPB (MOVBload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(OpAMD64CMPB)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBloadidx1(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBloadidx1 {sym} [off] ptr idx x mem)
+ // result: (CMPB (MOVBloadidx1 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPB)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() == 0
+ // result: (TESTL x:(MOVLload {sym} [vo.Off32()] ptr mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ x := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ x.AuxInt = int32ToAuxInt(vo.Off32())
+ x.Aux = symToAux(sym)
+ x.AddArg2(ptr, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() != 0
+ // result: (CMPLconst (MOVLload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = int32ToAuxInt(vo.Val32())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTL x:(MOVLloadidx1 {sym} [vo.Off32()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ x.AuxInt = int32ToAuxInt(vo.Off32())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPLconst (MOVLloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = int32ToAuxInt(vo.Val32())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTL x:(MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32)
+ x.AuxInt = int32ToAuxInt(vo.Off32())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPLconst (MOVLloadidx4 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = int32ToAuxInt(vo.Val32())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLload {sym} [off] ptr x mem)
+ // result: (CMPL (MOVLload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(OpAMD64CMPL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLloadidx1(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLloadidx1 {sym} [off] ptr idx x mem)
+ // result: (CMPL (MOVLloadidx1 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLloadidx4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLloadidx4 {sym} [off] ptr idx x mem)
+ // result: (CMPL (MOVLloadidx4 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() == 0
+ // result: (TESTQ x:(MOVQload {sym} [vo.Off32()] ptr mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ x := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ x.AuxInt = int32ToAuxInt(vo.Off32())
+ x.Aux = symToAux(sym)
+ x.AddArg2(ptr, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPQconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() != 0
+ // result: (CMPQconst (MOVQload {sym} [vo.Off32()] ptr mem) [vo.Val32()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = int32ToAuxInt(vo.Val32())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTQ x:(MOVQloadidx1 {sym} [vo.Off32()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
+ x.AuxInt = int32ToAuxInt(vo.Off32())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPQconst (MOVQloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = int32ToAuxInt(vo.Val32())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTQ x:(MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64)
+ x.AuxInt = int32ToAuxInt(vo.Off32())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPQconst (MOVQloadidx8 {sym} [vo.Off32()] ptr idx mem) [vo.Val32()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = int32ToAuxInt(vo.Val32())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQload {sym} [off] ptr x mem)
+ // result: (CMPQ (MOVQload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(OpAMD64CMPQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQloadidx1(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQloadidx1 {sym} [off] ptr idx x mem)
+ // result: (CMPQ (MOVQloadidx1 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQloadidx8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQloadidx8 {sym} [off] ptr idx x mem)
+ // result: (CMPQ (MOVQloadidx8 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() == 0
+ // result: (TESTW x:(MOVWload {sym} [vo.Off32()] ptr mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ x := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ x.AuxInt = int32ToAuxInt(vo.Off32())
+ x.Aux = symToAux(sym)
+ x.AddArg2(ptr, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() != 0
+ // result: (CMPWconst (MOVWload {sym} [vo.Off32()] ptr mem) [vo.Val16()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = int16ToAuxInt(vo.Val16())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTW x:(MOVWloadidx1 {sym} [vo.Off32()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
+ x.AuxInt = int32ToAuxInt(vo.Off32())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPWconst (MOVWloadidx1 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = int16ToAuxInt(vo.Val16())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTW x:(MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16)
+ x.AuxInt = int32ToAuxInt(vo.Off32())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPWconst (MOVWloadidx2 {sym} [vo.Off32()] ptr idx mem) [vo.Val16()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = int16ToAuxInt(vo.Val16())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(vo.Off32())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWload {sym} [off] ptr x mem)
+ // result: (CMPW (MOVWload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(OpAMD64CMPW)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWloadidx1(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWloadidx1 {sym} [off] ptr idx x mem)
+ // result: (CMPW (MOVWloadidx1 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPW)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWloadidx2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWloadidx2 {sym} [off] ptr idx x mem)
+ // result: (CMPW (MOVWloadidx2 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPW)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteBlockAMD64splitload(b *Block) bool {
+ switch b.Kind {
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
new file mode 100644
index 0000000..1f25005
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -0,0 +1,22017 @@
+// Code generated from gen/ARM.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
+
+func rewriteValueARM(v *Value) bool {
+ switch v.Op {
+ case OpARMADC:
+ return rewriteValueARM_OpARMADC(v)
+ case OpARMADCconst:
+ return rewriteValueARM_OpARMADCconst(v)
+ case OpARMADCshiftLL:
+ return rewriteValueARM_OpARMADCshiftLL(v)
+ case OpARMADCshiftLLreg:
+ return rewriteValueARM_OpARMADCshiftLLreg(v)
+ case OpARMADCshiftRA:
+ return rewriteValueARM_OpARMADCshiftRA(v)
+ case OpARMADCshiftRAreg:
+ return rewriteValueARM_OpARMADCshiftRAreg(v)
+ case OpARMADCshiftRL:
+ return rewriteValueARM_OpARMADCshiftRL(v)
+ case OpARMADCshiftRLreg:
+ return rewriteValueARM_OpARMADCshiftRLreg(v)
+ case OpARMADD:
+ return rewriteValueARM_OpARMADD(v)
+ case OpARMADDD:
+ return rewriteValueARM_OpARMADDD(v)
+ case OpARMADDF:
+ return rewriteValueARM_OpARMADDF(v)
+ case OpARMADDS:
+ return rewriteValueARM_OpARMADDS(v)
+ case OpARMADDSshiftLL:
+ return rewriteValueARM_OpARMADDSshiftLL(v)
+ case OpARMADDSshiftLLreg:
+ return rewriteValueARM_OpARMADDSshiftLLreg(v)
+ case OpARMADDSshiftRA:
+ return rewriteValueARM_OpARMADDSshiftRA(v)
+ case OpARMADDSshiftRAreg:
+ return rewriteValueARM_OpARMADDSshiftRAreg(v)
+ case OpARMADDSshiftRL:
+ return rewriteValueARM_OpARMADDSshiftRL(v)
+ case OpARMADDSshiftRLreg:
+ return rewriteValueARM_OpARMADDSshiftRLreg(v)
+ case OpARMADDconst:
+ return rewriteValueARM_OpARMADDconst(v)
+ case OpARMADDshiftLL:
+ return rewriteValueARM_OpARMADDshiftLL(v)
+ case OpARMADDshiftLLreg:
+ return rewriteValueARM_OpARMADDshiftLLreg(v)
+ case OpARMADDshiftRA:
+ return rewriteValueARM_OpARMADDshiftRA(v)
+ case OpARMADDshiftRAreg:
+ return rewriteValueARM_OpARMADDshiftRAreg(v)
+ case OpARMADDshiftRL:
+ return rewriteValueARM_OpARMADDshiftRL(v)
+ case OpARMADDshiftRLreg:
+ return rewriteValueARM_OpARMADDshiftRLreg(v)
+ case OpARMAND:
+ return rewriteValueARM_OpARMAND(v)
+ case OpARMANDconst:
+ return rewriteValueARM_OpARMANDconst(v)
+ case OpARMANDshiftLL:
+ return rewriteValueARM_OpARMANDshiftLL(v)
+ case OpARMANDshiftLLreg:
+ return rewriteValueARM_OpARMANDshiftLLreg(v)
+ case OpARMANDshiftRA:
+ return rewriteValueARM_OpARMANDshiftRA(v)
+ case OpARMANDshiftRAreg:
+ return rewriteValueARM_OpARMANDshiftRAreg(v)
+ case OpARMANDshiftRL:
+ return rewriteValueARM_OpARMANDshiftRL(v)
+ case OpARMANDshiftRLreg:
+ return rewriteValueARM_OpARMANDshiftRLreg(v)
+ case OpARMBFX:
+ return rewriteValueARM_OpARMBFX(v)
+ case OpARMBFXU:
+ return rewriteValueARM_OpARMBFXU(v)
+ case OpARMBIC:
+ return rewriteValueARM_OpARMBIC(v)
+ case OpARMBICconst:
+ return rewriteValueARM_OpARMBICconst(v)
+ case OpARMBICshiftLL:
+ return rewriteValueARM_OpARMBICshiftLL(v)
+ case OpARMBICshiftLLreg:
+ return rewriteValueARM_OpARMBICshiftLLreg(v)
+ case OpARMBICshiftRA:
+ return rewriteValueARM_OpARMBICshiftRA(v)
+ case OpARMBICshiftRAreg:
+ return rewriteValueARM_OpARMBICshiftRAreg(v)
+ case OpARMBICshiftRL:
+ return rewriteValueARM_OpARMBICshiftRL(v)
+ case OpARMBICshiftRLreg:
+ return rewriteValueARM_OpARMBICshiftRLreg(v)
+ case OpARMCMN:
+ return rewriteValueARM_OpARMCMN(v)
+ case OpARMCMNconst:
+ return rewriteValueARM_OpARMCMNconst(v)
+ case OpARMCMNshiftLL:
+ return rewriteValueARM_OpARMCMNshiftLL(v)
+ case OpARMCMNshiftLLreg:
+ return rewriteValueARM_OpARMCMNshiftLLreg(v)
+ case OpARMCMNshiftRA:
+ return rewriteValueARM_OpARMCMNshiftRA(v)
+ case OpARMCMNshiftRAreg:
+ return rewriteValueARM_OpARMCMNshiftRAreg(v)
+ case OpARMCMNshiftRL:
+ return rewriteValueARM_OpARMCMNshiftRL(v)
+ case OpARMCMNshiftRLreg:
+ return rewriteValueARM_OpARMCMNshiftRLreg(v)
+ case OpARMCMOVWHSconst:
+ return rewriteValueARM_OpARMCMOVWHSconst(v)
+ case OpARMCMOVWLSconst:
+ return rewriteValueARM_OpARMCMOVWLSconst(v)
+ case OpARMCMP:
+ return rewriteValueARM_OpARMCMP(v)
+ case OpARMCMPD:
+ return rewriteValueARM_OpARMCMPD(v)
+ case OpARMCMPF:
+ return rewriteValueARM_OpARMCMPF(v)
+ case OpARMCMPconst:
+ return rewriteValueARM_OpARMCMPconst(v)
+ case OpARMCMPshiftLL:
+ return rewriteValueARM_OpARMCMPshiftLL(v)
+ case OpARMCMPshiftLLreg:
+ return rewriteValueARM_OpARMCMPshiftLLreg(v)
+ case OpARMCMPshiftRA:
+ return rewriteValueARM_OpARMCMPshiftRA(v)
+ case OpARMCMPshiftRAreg:
+ return rewriteValueARM_OpARMCMPshiftRAreg(v)
+ case OpARMCMPshiftRL:
+ return rewriteValueARM_OpARMCMPshiftRL(v)
+ case OpARMCMPshiftRLreg:
+ return rewriteValueARM_OpARMCMPshiftRLreg(v)
+ case OpARMEqual:
+ return rewriteValueARM_OpARMEqual(v)
+ case OpARMGreaterEqual:
+ return rewriteValueARM_OpARMGreaterEqual(v)
+ case OpARMGreaterEqualU:
+ return rewriteValueARM_OpARMGreaterEqualU(v)
+ case OpARMGreaterThan:
+ return rewriteValueARM_OpARMGreaterThan(v)
+ case OpARMGreaterThanU:
+ return rewriteValueARM_OpARMGreaterThanU(v)
+ case OpARMLessEqual:
+ return rewriteValueARM_OpARMLessEqual(v)
+ case OpARMLessEqualU:
+ return rewriteValueARM_OpARMLessEqualU(v)
+ case OpARMLessThan:
+ return rewriteValueARM_OpARMLessThan(v)
+ case OpARMLessThanU:
+ return rewriteValueARM_OpARMLessThanU(v)
+ case OpARMMOVBUload:
+ return rewriteValueARM_OpARMMOVBUload(v)
+ case OpARMMOVBUloadidx:
+ return rewriteValueARM_OpARMMOVBUloadidx(v)
+ case OpARMMOVBUreg:
+ return rewriteValueARM_OpARMMOVBUreg(v)
+ case OpARMMOVBload:
+ return rewriteValueARM_OpARMMOVBload(v)
+ case OpARMMOVBloadidx:
+ return rewriteValueARM_OpARMMOVBloadidx(v)
+ case OpARMMOVBreg:
+ return rewriteValueARM_OpARMMOVBreg(v)
+ case OpARMMOVBstore:
+ return rewriteValueARM_OpARMMOVBstore(v)
+ case OpARMMOVBstoreidx:
+ return rewriteValueARM_OpARMMOVBstoreidx(v)
+ case OpARMMOVDload:
+ return rewriteValueARM_OpARMMOVDload(v)
+ case OpARMMOVDstore:
+ return rewriteValueARM_OpARMMOVDstore(v)
+ case OpARMMOVFload:
+ return rewriteValueARM_OpARMMOVFload(v)
+ case OpARMMOVFstore:
+ return rewriteValueARM_OpARMMOVFstore(v)
+ case OpARMMOVHUload:
+ return rewriteValueARM_OpARMMOVHUload(v)
+ case OpARMMOVHUloadidx:
+ return rewriteValueARM_OpARMMOVHUloadidx(v)
+ case OpARMMOVHUreg:
+ return rewriteValueARM_OpARMMOVHUreg(v)
+ case OpARMMOVHload:
+ return rewriteValueARM_OpARMMOVHload(v)
+ case OpARMMOVHloadidx:
+ return rewriteValueARM_OpARMMOVHloadidx(v)
+ case OpARMMOVHreg:
+ return rewriteValueARM_OpARMMOVHreg(v)
+ case OpARMMOVHstore:
+ return rewriteValueARM_OpARMMOVHstore(v)
+ case OpARMMOVHstoreidx:
+ return rewriteValueARM_OpARMMOVHstoreidx(v)
+ case OpARMMOVWload:
+ return rewriteValueARM_OpARMMOVWload(v)
+ case OpARMMOVWloadidx:
+ return rewriteValueARM_OpARMMOVWloadidx(v)
+ case OpARMMOVWloadshiftLL:
+ return rewriteValueARM_OpARMMOVWloadshiftLL(v)
+ case OpARMMOVWloadshiftRA:
+ return rewriteValueARM_OpARMMOVWloadshiftRA(v)
+ case OpARMMOVWloadshiftRL:
+ return rewriteValueARM_OpARMMOVWloadshiftRL(v)
+ case OpARMMOVWreg:
+ return rewriteValueARM_OpARMMOVWreg(v)
+ case OpARMMOVWstore:
+ return rewriteValueARM_OpARMMOVWstore(v)
+ case OpARMMOVWstoreidx:
+ return rewriteValueARM_OpARMMOVWstoreidx(v)
+ case OpARMMOVWstoreshiftLL:
+ return rewriteValueARM_OpARMMOVWstoreshiftLL(v)
+ case OpARMMOVWstoreshiftRA:
+ return rewriteValueARM_OpARMMOVWstoreshiftRA(v)
+ case OpARMMOVWstoreshiftRL:
+ return rewriteValueARM_OpARMMOVWstoreshiftRL(v)
+ case OpARMMUL:
+ return rewriteValueARM_OpARMMUL(v)
+ case OpARMMULA:
+ return rewriteValueARM_OpARMMULA(v)
+ case OpARMMULD:
+ return rewriteValueARM_OpARMMULD(v)
+ case OpARMMULF:
+ return rewriteValueARM_OpARMMULF(v)
+ case OpARMMULS:
+ return rewriteValueARM_OpARMMULS(v)
+ case OpARMMVN:
+ return rewriteValueARM_OpARMMVN(v)
+ case OpARMMVNshiftLL:
+ return rewriteValueARM_OpARMMVNshiftLL(v)
+ case OpARMMVNshiftLLreg:
+ return rewriteValueARM_OpARMMVNshiftLLreg(v)
+ case OpARMMVNshiftRA:
+ return rewriteValueARM_OpARMMVNshiftRA(v)
+ case OpARMMVNshiftRAreg:
+ return rewriteValueARM_OpARMMVNshiftRAreg(v)
+ case OpARMMVNshiftRL:
+ return rewriteValueARM_OpARMMVNshiftRL(v)
+ case OpARMMVNshiftRLreg:
+ return rewriteValueARM_OpARMMVNshiftRLreg(v)
+ case OpARMNEGD:
+ return rewriteValueARM_OpARMNEGD(v)
+ case OpARMNEGF:
+ return rewriteValueARM_OpARMNEGF(v)
+ case OpARMNMULD:
+ return rewriteValueARM_OpARMNMULD(v)
+ case OpARMNMULF:
+ return rewriteValueARM_OpARMNMULF(v)
+ case OpARMNotEqual:
+ return rewriteValueARM_OpARMNotEqual(v)
+ case OpARMOR:
+ return rewriteValueARM_OpARMOR(v)
+ case OpARMORconst:
+ return rewriteValueARM_OpARMORconst(v)
+ case OpARMORshiftLL:
+ return rewriteValueARM_OpARMORshiftLL(v)
+ case OpARMORshiftLLreg:
+ return rewriteValueARM_OpARMORshiftLLreg(v)
+ case OpARMORshiftRA:
+ return rewriteValueARM_OpARMORshiftRA(v)
+ case OpARMORshiftRAreg:
+ return rewriteValueARM_OpARMORshiftRAreg(v)
+ case OpARMORshiftRL:
+ return rewriteValueARM_OpARMORshiftRL(v)
+ case OpARMORshiftRLreg:
+ return rewriteValueARM_OpARMORshiftRLreg(v)
+ case OpARMRSB:
+ return rewriteValueARM_OpARMRSB(v)
+ case OpARMRSBSshiftLL:
+ return rewriteValueARM_OpARMRSBSshiftLL(v)
+ case OpARMRSBSshiftLLreg:
+ return rewriteValueARM_OpARMRSBSshiftLLreg(v)
+ case OpARMRSBSshiftRA:
+ return rewriteValueARM_OpARMRSBSshiftRA(v)
+ case OpARMRSBSshiftRAreg:
+ return rewriteValueARM_OpARMRSBSshiftRAreg(v)
+ case OpARMRSBSshiftRL:
+ return rewriteValueARM_OpARMRSBSshiftRL(v)
+ case OpARMRSBSshiftRLreg:
+ return rewriteValueARM_OpARMRSBSshiftRLreg(v)
+ case OpARMRSBconst:
+ return rewriteValueARM_OpARMRSBconst(v)
+ case OpARMRSBshiftLL:
+ return rewriteValueARM_OpARMRSBshiftLL(v)
+ case OpARMRSBshiftLLreg:
+ return rewriteValueARM_OpARMRSBshiftLLreg(v)
+ case OpARMRSBshiftRA:
+ return rewriteValueARM_OpARMRSBshiftRA(v)
+ case OpARMRSBshiftRAreg:
+ return rewriteValueARM_OpARMRSBshiftRAreg(v)
+ case OpARMRSBshiftRL:
+ return rewriteValueARM_OpARMRSBshiftRL(v)
+ case OpARMRSBshiftRLreg:
+ return rewriteValueARM_OpARMRSBshiftRLreg(v)
+ case OpARMRSCconst:
+ return rewriteValueARM_OpARMRSCconst(v)
+ case OpARMRSCshiftLL:
+ return rewriteValueARM_OpARMRSCshiftLL(v)
+ case OpARMRSCshiftLLreg:
+ return rewriteValueARM_OpARMRSCshiftLLreg(v)
+ case OpARMRSCshiftRA:
+ return rewriteValueARM_OpARMRSCshiftRA(v)
+ case OpARMRSCshiftRAreg:
+ return rewriteValueARM_OpARMRSCshiftRAreg(v)
+ case OpARMRSCshiftRL:
+ return rewriteValueARM_OpARMRSCshiftRL(v)
+ case OpARMRSCshiftRLreg:
+ return rewriteValueARM_OpARMRSCshiftRLreg(v)
+ case OpARMSBC:
+ return rewriteValueARM_OpARMSBC(v)
+ case OpARMSBCconst:
+ return rewriteValueARM_OpARMSBCconst(v)
+ case OpARMSBCshiftLL:
+ return rewriteValueARM_OpARMSBCshiftLL(v)
+ case OpARMSBCshiftLLreg:
+ return rewriteValueARM_OpARMSBCshiftLLreg(v)
+ case OpARMSBCshiftRA:
+ return rewriteValueARM_OpARMSBCshiftRA(v)
+ case OpARMSBCshiftRAreg:
+ return rewriteValueARM_OpARMSBCshiftRAreg(v)
+ case OpARMSBCshiftRL:
+ return rewriteValueARM_OpARMSBCshiftRL(v)
+ case OpARMSBCshiftRLreg:
+ return rewriteValueARM_OpARMSBCshiftRLreg(v)
+ case OpARMSLL:
+ return rewriteValueARM_OpARMSLL(v)
+ case OpARMSLLconst:
+ return rewriteValueARM_OpARMSLLconst(v)
+ case OpARMSRA:
+ return rewriteValueARM_OpARMSRA(v)
+ case OpARMSRAcond:
+ return rewriteValueARM_OpARMSRAcond(v)
+ case OpARMSRAconst:
+ return rewriteValueARM_OpARMSRAconst(v)
+ case OpARMSRL:
+ return rewriteValueARM_OpARMSRL(v)
+ case OpARMSRLconst:
+ return rewriteValueARM_OpARMSRLconst(v)
+ case OpARMSUB:
+ return rewriteValueARM_OpARMSUB(v)
+ case OpARMSUBD:
+ return rewriteValueARM_OpARMSUBD(v)
+ case OpARMSUBF:
+ return rewriteValueARM_OpARMSUBF(v)
+ case OpARMSUBS:
+ return rewriteValueARM_OpARMSUBS(v)
+ case OpARMSUBSshiftLL:
+ return rewriteValueARM_OpARMSUBSshiftLL(v)
+ case OpARMSUBSshiftLLreg:
+ return rewriteValueARM_OpARMSUBSshiftLLreg(v)
+ case OpARMSUBSshiftRA:
+ return rewriteValueARM_OpARMSUBSshiftRA(v)
+ case OpARMSUBSshiftRAreg:
+ return rewriteValueARM_OpARMSUBSshiftRAreg(v)
+ case OpARMSUBSshiftRL:
+ return rewriteValueARM_OpARMSUBSshiftRL(v)
+ case OpARMSUBSshiftRLreg:
+ return rewriteValueARM_OpARMSUBSshiftRLreg(v)
+ case OpARMSUBconst:
+ return rewriteValueARM_OpARMSUBconst(v)
+ case OpARMSUBshiftLL:
+ return rewriteValueARM_OpARMSUBshiftLL(v)
+ case OpARMSUBshiftLLreg:
+ return rewriteValueARM_OpARMSUBshiftLLreg(v)
+ case OpARMSUBshiftRA:
+ return rewriteValueARM_OpARMSUBshiftRA(v)
+ case OpARMSUBshiftRAreg:
+ return rewriteValueARM_OpARMSUBshiftRAreg(v)
+ case OpARMSUBshiftRL:
+ return rewriteValueARM_OpARMSUBshiftRL(v)
+ case OpARMSUBshiftRLreg:
+ return rewriteValueARM_OpARMSUBshiftRLreg(v)
+ case OpARMTEQ:
+ return rewriteValueARM_OpARMTEQ(v)
+ case OpARMTEQconst:
+ return rewriteValueARM_OpARMTEQconst(v)
+ case OpARMTEQshiftLL:
+ return rewriteValueARM_OpARMTEQshiftLL(v)
+ case OpARMTEQshiftLLreg:
+ return rewriteValueARM_OpARMTEQshiftLLreg(v)
+ case OpARMTEQshiftRA:
+ return rewriteValueARM_OpARMTEQshiftRA(v)
+ case OpARMTEQshiftRAreg:
+ return rewriteValueARM_OpARMTEQshiftRAreg(v)
+ case OpARMTEQshiftRL:
+ return rewriteValueARM_OpARMTEQshiftRL(v)
+ case OpARMTEQshiftRLreg:
+ return rewriteValueARM_OpARMTEQshiftRLreg(v)
+ case OpARMTST:
+ return rewriteValueARM_OpARMTST(v)
+ case OpARMTSTconst:
+ return rewriteValueARM_OpARMTSTconst(v)
+ case OpARMTSTshiftLL:
+ return rewriteValueARM_OpARMTSTshiftLL(v)
+ case OpARMTSTshiftLLreg:
+ return rewriteValueARM_OpARMTSTshiftLLreg(v)
+ case OpARMTSTshiftRA:
+ return rewriteValueARM_OpARMTSTshiftRA(v)
+ case OpARMTSTshiftRAreg:
+ return rewriteValueARM_OpARMTSTshiftRAreg(v)
+ case OpARMTSTshiftRL:
+ return rewriteValueARM_OpARMTSTshiftRL(v)
+ case OpARMTSTshiftRLreg:
+ return rewriteValueARM_OpARMTSTshiftRLreg(v)
+ case OpARMXOR:
+ return rewriteValueARM_OpARMXOR(v)
+ case OpARMXORconst:
+ return rewriteValueARM_OpARMXORconst(v)
+ case OpARMXORshiftLL:
+ return rewriteValueARM_OpARMXORshiftLL(v)
+ case OpARMXORshiftLLreg:
+ return rewriteValueARM_OpARMXORshiftLLreg(v)
+ case OpARMXORshiftRA:
+ return rewriteValueARM_OpARMXORshiftRA(v)
+ case OpARMXORshiftRAreg:
+ return rewriteValueARM_OpARMXORshiftRAreg(v)
+ case OpARMXORshiftRL:
+ return rewriteValueARM_OpARMXORshiftRL(v)
+ case OpARMXORshiftRLreg:
+ return rewriteValueARM_OpARMXORshiftRLreg(v)
+ case OpARMXORshiftRR:
+ return rewriteValueARM_OpARMXORshiftRR(v)
+ case OpAbs:
+ v.Op = OpARMABSD
+ return true
+ case OpAdd16:
+ v.Op = OpARMADD
+ return true
+ case OpAdd32:
+ v.Op = OpARMADD
+ return true
+ case OpAdd32F:
+ v.Op = OpARMADDF
+ return true
+ case OpAdd32carry:
+ v.Op = OpARMADDS
+ return true
+ case OpAdd32withcarry:
+ v.Op = OpARMADC
+ return true
+ case OpAdd64F:
+ v.Op = OpARMADDD
+ return true
+ case OpAdd8:
+ v.Op = OpARMADD
+ return true
+ case OpAddPtr:
+ v.Op = OpARMADD
+ return true
+ case OpAddr:
+ return rewriteValueARM_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpARMAND
+ return true
+ case OpAnd32:
+ v.Op = OpARMAND
+ return true
+ case OpAnd8:
+ v.Op = OpARMAND
+ return true
+ case OpAndB:
+ v.Op = OpARMAND
+ return true
+ case OpAvg32u:
+ return rewriteValueARM_OpAvg32u(v)
+ case OpBitLen32:
+ return rewriteValueARM_OpBitLen32(v)
+ case OpBswap32:
+ return rewriteValueARM_OpBswap32(v)
+ case OpClosureCall:
+ v.Op = OpARMCALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpARMMVN
+ return true
+ case OpCom32:
+ v.Op = OpARMMVN
+ return true
+ case OpCom8:
+ v.Op = OpARMMVN
+ return true
+ case OpConst16:
+ return rewriteValueARM_OpConst16(v)
+ case OpConst32:
+ return rewriteValueARM_OpConst32(v)
+ case OpConst32F:
+ return rewriteValueARM_OpConst32F(v)
+ case OpConst64F:
+ return rewriteValueARM_OpConst64F(v)
+ case OpConst8:
+ return rewriteValueARM_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueARM_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueARM_OpConstNil(v)
+ case OpCtz16:
+ return rewriteValueARM_OpCtz16(v)
+ case OpCtz16NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz32:
+ return rewriteValueARM_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz8:
+ return rewriteValueARM_OpCtz8(v)
+ case OpCtz8NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpARMMOVFW
+ return true
+ case OpCvt32Fto32U:
+ v.Op = OpARMMOVFWU
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpARMMOVFD
+ return true
+ case OpCvt32Uto32F:
+ v.Op = OpARMMOVWUF
+ return true
+ case OpCvt32Uto64F:
+ v.Op = OpARMMOVWUD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpARMMOVWF
+ return true
+ case OpCvt32to64F:
+ v.Op = OpARMMOVWD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpARMMOVDW
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpARMMOVDF
+ return true
+ case OpCvt64Fto32U:
+ v.Op = OpARMMOVDWU
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueARM_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueARM_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueARM_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpARMDIVF
+ return true
+ case OpDiv32u:
+ return rewriteValueARM_OpDiv32u(v)
+ case OpDiv64F:
+ v.Op = OpARMDIVD
+ return true
+ case OpDiv8:
+ return rewriteValueARM_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueARM_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueARM_OpEq16(v)
+ case OpEq32:
+ return rewriteValueARM_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueARM_OpEq32F(v)
+ case OpEq64F:
+ return rewriteValueARM_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueARM_OpEq8(v)
+ case OpEqB:
+ return rewriteValueARM_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueARM_OpEqPtr(v)
+ case OpFMA:
+ return rewriteValueARM_OpFMA(v)
+ case OpGetCallerPC:
+ v.Op = OpARMLoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpARMLoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpARMLoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ v.Op = OpARMHMUL
+ return true
+ case OpHmul32u:
+ v.Op = OpARMHMULU
+ return true
+ case OpInterCall:
+ v.Op = OpARMCALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueARM_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueARM_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueARM_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueARM_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueARM_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueARM_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueARM_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueARM_OpLeq32U(v)
+ case OpLeq64F:
+ return rewriteValueARM_OpLeq64F(v)
+ case OpLeq8:
+ return rewriteValueARM_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueARM_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueARM_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueARM_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueARM_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueARM_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueARM_OpLess32U(v)
+ case OpLess64F:
+ return rewriteValueARM_OpLess64F(v)
+ case OpLess8:
+ return rewriteValueARM_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueARM_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueARM_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueARM_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueARM_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueARM_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueARM_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueARM_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueARM_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueARM_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueARM_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueARM_OpLsh32x8(v)
+ case OpLsh8x16:
+ return rewriteValueARM_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueARM_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueARM_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueARM_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueARM_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueARM_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueARM_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueARM_OpMod32u(v)
+ case OpMod8:
+ return rewriteValueARM_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueARM_OpMod8u(v)
+ case OpMove:
+ return rewriteValueARM_OpMove(v)
+ case OpMul16:
+ v.Op = OpARMMUL
+ return true
+ case OpMul32:
+ v.Op = OpARMMUL
+ return true
+ case OpMul32F:
+ v.Op = OpARMMULF
+ return true
+ case OpMul32uhilo:
+ v.Op = OpARMMULLU
+ return true
+ case OpMul64F:
+ v.Op = OpARMMULD
+ return true
+ case OpMul8:
+ v.Op = OpARMMUL
+ return true
+ case OpNeg16:
+ return rewriteValueARM_OpNeg16(v)
+ case OpNeg32:
+ return rewriteValueARM_OpNeg32(v)
+ case OpNeg32F:
+ v.Op = OpARMNEGF
+ return true
+ case OpNeg64F:
+ v.Op = OpARMNEGD
+ return true
+ case OpNeg8:
+ return rewriteValueARM_OpNeg8(v)
+ case OpNeq16:
+ return rewriteValueARM_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueARM_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueARM_OpNeq32F(v)
+ case OpNeq64F:
+ return rewriteValueARM_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueARM_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpARMXOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueARM_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpARMLoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueARM_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueARM_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpARMOR
+ return true
+ case OpOr32:
+ v.Op = OpARMOR
+ return true
+ case OpOr8:
+ v.Op = OpARMOR
+ return true
+ case OpOrB:
+ v.Op = OpARMOR
+ return true
+ case OpPanicBounds:
+ return rewriteValueARM_OpPanicBounds(v)
+ case OpPanicExtend:
+ return rewriteValueARM_OpPanicExtend(v)
+ case OpRotateLeft16:
+ return rewriteValueARM_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueARM_OpRotateLeft32(v)
+ case OpRotateLeft8:
+ return rewriteValueARM_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueARM_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueARM_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueARM_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueARM_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueARM_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueARM_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueARM_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueARM_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueARM_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueARM_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueARM_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueARM_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueARM_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueARM_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueARM_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueARM_OpRsh32x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueARM_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueARM_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueARM_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueARM_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueARM_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueARM_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueARM_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueARM_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueARM_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueARM_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpARMMOVHreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpARMMOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpARMMOVBreg
+ return true
+ case OpSignmask:
+ return rewriteValueARM_OpSignmask(v)
+ case OpSlicemask:
+ return rewriteValueARM_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpARMSQRTD
+ return true
+ case OpStaticCall:
+ v.Op = OpARMCALLstatic
+ return true
+ case OpStore:
+ return rewriteValueARM_OpStore(v)
+ case OpSub16:
+ v.Op = OpARMSUB
+ return true
+ case OpSub32:
+ v.Op = OpARMSUB
+ return true
+ case OpSub32F:
+ v.Op = OpARMSUBF
+ return true
+ case OpSub32carry:
+ v.Op = OpARMSUBS
+ return true
+ case OpSub32withcarry:
+ v.Op = OpARMSBC
+ return true
+ case OpSub64F:
+ v.Op = OpARMSUBD
+ return true
+ case OpSub8:
+ v.Op = OpARMSUB
+ return true
+ case OpSubPtr:
+ v.Op = OpARMSUB
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpARMLoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpARMXOR
+ return true
+ case OpXor32:
+ v.Op = OpARMXOR
+ return true
+ case OpXor8:
+ v.Op = OpARMXOR
+ return true
+ case OpZero:
+ return rewriteValueARM_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpARMMOVHUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpARMMOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpARMMOVBUreg
+ return true
+ case OpZeromask:
+ return rewriteValueARM_OpZeromask(v)
+ }
+ return false
+}
+func rewriteValueARM_OpARMADC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADC (MOVWconst [c]) x flags)
+ // result: (ADCconst [c] x flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SLLconst [c] y) flags)
+ // result: (ADCshiftLL x y [c] flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SRLconst [c] y) flags)
+ // result: (ADCshiftRL x y [c] flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SRAconst [c] y) flags)
+ // result: (ADCshiftRA x y [c] flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SLL y z) flags)
+ // result: (ADCshiftLLreg x y z flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftLLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SRL y z) flags)
+ // result: (ADCshiftRLreg x y z flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftRLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SRA y z) flags)
+ // result: (ADCshiftRAreg x y z flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftRAreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADCconst [c] (ADDconst [d] x) flags)
+ // result: (ADCconst [c+d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (ADCconst [c] (SUBconst [d] x) flags)
+ // result: (ADCconst [c-d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftLL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftLL (MOVWconst [c]) x [d] flags)
+ // result: (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftLL x (MOVWconst [c]) [d] flags)
+ // result: (ADCconst x [c<<uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftLLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftLLreg (MOVWconst [c]) x y flags)
+ // result: (ADCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (ADCshiftLL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftRA (MOVWconst [c]) x [d] flags)
+ // result: (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftRA x (MOVWconst [c]) [d] flags)
+ // result: (ADCconst x [c>>uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRAreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftRAreg (MOVWconst [c]) x y flags)
+ // result: (ADCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (ADCshiftRA x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftRL (MOVWconst [c]) x [d] flags)
+ // result: (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftRL x (MOVWconst [c]) [d] flags)
+ // result: (ADCconst x [int32(uint32(c)>>uint64(d))] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftRLreg (MOVWconst [c]) x y flags)
+ // result: (ADCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (ADCshiftRL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADD x (MOVWconst [c]))
+ // result: (ADDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SLLconst [c] y))
+ // result: (ADDshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SRLconst [c] y))
+ // result: (ADDshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SRAconst [c] y))
+ // result: (ADDshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SLL y z))
+ // result: (ADDshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SRL y z))
+ // result: (ADDshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SRA y z))
+ // result: (ADDshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (RSBconst [0] y))
+ // result: (SUB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMRSBconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARMSUB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD <t> (RSBconst [c] x) (RSBconst [d] y))
+ // result: (RSBconst [c+d] (ADD <t> x y))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMRSBconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARMRSBconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v0 := b.NewValue0(v.Pos, OpARMADD, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (ADD (MUL x y) a)
+ // result: (MULA x y a)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMMUL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ v.reset(OpARMMULA)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDD a (MULD x y))
+ // cond: a.Uses == 1 && objabi.GOARM >= 6
+ // result: (MULAD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARMMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && objabi.GOARM >= 6) {
+ continue
+ }
+ v.reset(OpARMMULAD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDD a (NMULD x y))
+ // cond: a.Uses == 1 && objabi.GOARM >= 6
+ // result: (MULSD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARMNMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && objabi.GOARM >= 6) {
+ continue
+ }
+ v.reset(OpARMMULSD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDF a (MULF x y))
+ // cond: a.Uses == 1 && objabi.GOARM >= 6
+ // result: (MULAF a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARMMULF {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && objabi.GOARM >= 6) {
+ continue
+ }
+ v.reset(OpARMMULAF)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDF a (NMULF x y))
+ // cond: a.Uses == 1 && objabi.GOARM >= 6
+ // result: (MULSF a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARMNMULF {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && objabi.GOARM >= 6) {
+ continue
+ }
+ v.reset(OpARMMULSF)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDS x (MOVWconst [c]))
+ // result: (ADDSconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SLLconst [c] y))
+ // result: (ADDSshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SRLconst [c] y))
+ // result: (ADDSshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SRAconst [c] y))
+ // result: (ADDSshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SLL y z))
+ // result: (ADDSshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SRL y z))
+ // result: (ADDSshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SRA y z))
+ // result: (ADDSshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftLL (MOVWconst [c]) x [d])
+ // result: (ADDSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftLL x (MOVWconst [c]) [d])
+ // result: (ADDSconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftLLreg (MOVWconst [c]) x y)
+ // result: (ADDSconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDSshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftRA (MOVWconst [c]) x [d])
+ // result: (ADDSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRA x (MOVWconst [c]) [d])
+ // result: (ADDSconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftRAreg (MOVWconst [c]) x y)
+ // result: (ADDSconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDSshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftRL (MOVWconst [c]) x [d])
+ // result: (ADDSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRL x (MOVWconst [c]) [d])
+ // result: (ADDSconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftRLreg (MOVWconst [c]) x y)
+ // result: (ADDSconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDSshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
+ // result: (MOVWaddr [off1+off2] {sym} ptr)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] x)
+ // cond: !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))
+ // result: (SUBconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))) {
+ break
+ }
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] x)
+ // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
+ // result: (SUBconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
+ break
+ }
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBconst [d] x))
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (RSBconst [d] x))
+ // result: (RSBconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDshiftLL (MOVWconst [c]) x [d])
+ // result: (ADDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLL x (MOVWconst [c]) [d])
+ // result: (ADDconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL [c] (SRLconst x [32-c]) x)
+ // result: (SRRconst [32-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(32 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
+ // cond: objabi.GOARM>=6
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 {
+ break
+ }
+ x := v_0_0.Args[0]
+ if x != v_1 || !(objabi.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftLLreg (MOVWconst [c]) x y)
+ // result: (ADDconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRA (MOVWconst [c]) x [d])
+ // result: (ADDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRA x (MOVWconst [c]) [d])
+ // result: (ADDconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRAreg (MOVWconst [c]) x y)
+ // result: (ADDconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRL (MOVWconst [c]) x [d])
+ // result: (ADDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRL x (MOVWconst [c]) [d])
+ // result: (ADDconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftRL [c] (SLLconst x [32-c]) x)
+ // result: (SRRconst [ c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRLreg (MOVWconst [c]) x y)
+ // result: (ADDconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMAND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND x (MOVWconst [c]))
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SLLconst [c] y))
+ // result: (ANDshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SRLconst [c] y))
+ // result: (ANDshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SRAconst [c] y))
+ // result: (ANDshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SLL y z))
+ // result: (ANDshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SRL y z))
+ // result: (ANDshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SRA y z))
+ // result: (ANDshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (AND x (MVN y))
+ // result: (BIC x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMVN {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARMBIC)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MVNshiftLL y [c]))
+ // result: (BICshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMVNshiftLL {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MVNshiftRL y [c]))
+ // result: (BICshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMVNshiftRL {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MVNshiftRA y [c]))
+ // result: (BICshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMVNshiftRA {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [0] _)
+ // result: (MOVWconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [c] x)
+ // cond: int32(c)==-1
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == -1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] x)
+ // cond: !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))
+ // result: (BICconst [int32(^uint32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))) {
+ break
+ }
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(int32(^uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] x)
+ // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
+ // result: (BICconst [int32(^uint32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
+ break
+ }
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(int32(^uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftLL (MOVWconst [c]) x [d])
+ // result: (ANDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftLL x (MOVWconst [c]) [d])
+ // result: (ANDconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftLL y:(SLLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSLLconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftLLreg (MOVWconst [c]) x y)
+ // result: (ANDconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ANDshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMANDshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRA (MOVWconst [c]) x [d])
+ // result: (ANDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRA x (MOVWconst [c]) [d])
+ // result: (ANDconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRA y:(SRAconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRAconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRAreg (MOVWconst [c]) x y)
+ // result: (ANDconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ANDshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMANDshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRL (MOVWconst [c]) x [d])
+ // result: (ANDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRL x (MOVWconst [c]) [d])
+ // result: (ANDconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRL y:(SRLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRLconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRLreg (MOVWconst [c]) x y)
+ // result: (ANDconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ANDshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMANDshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBFX(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BFX [c] (MOVWconst [d]))
+ // result: (MOVWconst [d<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBFXU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BFXU [c] (MOVWconst [d]))
+ // result: (MOVWconst [int32(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(d) << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8))))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBIC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BIC x (MOVWconst [c]))
+ // result: (BICconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BIC x (SLLconst [c] y))
+ // result: (BICshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (BIC x (SRLconst [c] y))
+ // result: (BICshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (BIC x (SRAconst [c] y))
+ // result: (BICshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (BIC x (SLL y z))
+ // result: (BICshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (BIC x (SRL y z))
+ // result: (BICshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (BIC x (SRA y z))
+ // result: (BICshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (BIC x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BICconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (BICconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (BICconst [c] x)
+ // cond: !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))
+ // result: (ANDconst [int32(^uint32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(int32(^uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICconst [c] x)
+ // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
+ // result: (ANDconst [int32(^uint32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(int32(^uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d&^c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d &^ c)
+ return true
+ }
+ // match: (BICconst [c] (BICconst [d] x))
+ // result: (BICconst [c|d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMBICconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftLL x (MOVWconst [c]) [d])
+ // result: (BICconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (BICshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMBICshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRA x (MOVWconst [c]) [d])
+ // result: (BICconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (BICshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMBICshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRL x (MOVWconst [c]) [d])
+ // result: (BICconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (BICshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMBICshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMN x (MOVWconst [c]))
+ // result: (CMNconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SLLconst [c] y))
+ // result: (CMNshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SRLconst [c] y))
+ // result: (CMNshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SRAconst [c] y))
+ // result: (CMNshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SLL y z))
+ // result: (CMNshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SRL y z))
+ // result: (CMNshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SRA y z))
+ // result: (CMNshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMNconst (MOVWconst [x]) [y])
+ // result: (FlagConstant [addFlags32(x,y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(addFlags32(x, y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftLL (MOVWconst [c]) x [d])
+ // result: (CMNconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftLL x (MOVWconst [c]) [d])
+ // result: (CMNconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftLLreg (MOVWconst [c]) x y)
+ // result: (CMNconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMNshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMNshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRA (MOVWconst [c]) x [d])
+ // result: (CMNconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRA x (MOVWconst [c]) [d])
+ // result: (CMNconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRAreg (MOVWconst [c]) x y)
+ // result: (CMNconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMNshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMNshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRL (MOVWconst [c]) x [d])
+ // result: (CMNconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRL x (MOVWconst [c]) [d])
+ // result: (CMNconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRLreg (MOVWconst [c]) x y)
+ // result: (CMNconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMNshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMNshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWHSconst _ (FlagConstant [fc]) [c])
+ // cond: fc.uge()
+ // result: (MOVWconst [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_1.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_1.AuxInt)
+ if !(fc.uge()) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (CMOVWHSconst x (FlagConstant [fc]) [c])
+ // cond: fc.ult()
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_1.AuxInt)
+ if !(fc.ult()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWHSconst x (InvertFlags flags) [c])
+ // result: (CMOVWLSconst x flags [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMInvertFlags {
+ break
+ }
+ flags := v_1.Args[0]
+ v.reset(OpARMCMOVWLSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWLSconst _ (FlagConstant [fc]) [c])
+ // cond: fc.ule()
+ // result: (MOVWconst [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_1.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_1.AuxInt)
+ if !(fc.ule()) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (CMOVWLSconst x (FlagConstant [fc]) [c])
+ // cond: fc.ugt()
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_1.AuxInt)
+ if !(fc.ugt()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLSconst x (InvertFlags flags) [c])
+ // result: (CMOVWHSconst x flags [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMInvertFlags {
+ break
+ }
+ flags := v_1.Args[0]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMP x (MOVWconst [c]))
+ // result: (CMPconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVWconst [c]) x)
+ // result: (InvertFlags (CMPconst [c] x))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMP y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SLLconst [c] y))
+ // result: (CMPshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMP (SLLconst [c] y) x)
+ // result: (InvertFlags (CMPshiftLL x y [c]))
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRLconst [c] y))
+ // result: (CMPshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMP (SRLconst [c] y) x)
+ // result: (InvertFlags (CMPshiftRL x y [c]))
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRAconst [c] y))
+ // result: (CMPshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMP (SRAconst [c] y) x)
+ // result: (InvertFlags (CMPshiftRA x y [c]))
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SLL y z))
+ // result: (CMPshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (CMP (SLL y z) x)
+ // result: (InvertFlags (CMPshiftLLreg x y z))
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRL y z))
+ // result: (CMPshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (CMP (SRL y z) x)
+ // result: (InvertFlags (CMPshiftRLreg x y z))
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRA y z))
+ // result: (CMPshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (CMP (SRA y z) x)
+ // result: (InvertFlags (CMPshiftRAreg x y z))
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPD x (MOVDconst [0]))
+ // result: (CMPD0 x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARMCMPD0)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPF x (MOVFconst [0]))
+ // result: (CMPF0 x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVFconst || auxIntToFloat64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARMCMPF0)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst (MOVWconst [x]) [y])
+ // result: (FlagConstant [subFlags32(x,y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(x, y))
+ return true
+ }
+ // match: (CMPconst (MOVBUreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagConstant [subFlags32(0, 1)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1))
+ return true
+ }
+ // match: (CMPconst (MOVHUreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagConstant [subFlags32(0, 1)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1))
+ return true
+ }
+ // match: (CMPconst (ANDconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagConstant [subFlags32(0, 1)])
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1))
+ return true
+ }
+ // match: (CMPconst (SRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)
+ // result: (FlagConstant [subFlags32(0, 1)])
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) {
+ break
+ }
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftLL (MOVWconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v1.AuxInt = int32ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLL x (MOVWconst [c]) [d])
+ // result: (CMPconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftLLreg (MOVWconst [c]) x y)
+ // result: (InvertFlags (CMPconst [c] (SLL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMPshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMPshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRA (MOVWconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v1.AuxInt = int32ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRA x (MOVWconst [c]) [d])
+ // result: (CMPconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRAreg (MOVWconst [c]) x y)
+ // result: (InvertFlags (CMPconst [c] (SRA <x.Type> x y)))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMPshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMPshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRL (MOVWconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v1.AuxInt = int32ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRL x (MOVWconst [c]) [d])
+ // result: (CMPconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRLreg (MOVWconst [c]) x y)
+ // result: (InvertFlags (CMPconst [c] (SRL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMPshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMPshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Equal (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.eq())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.eq()))
+ return true
+ }
+ // match: (Equal (InvertFlags x))
+ // result: (Equal x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqual (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ge())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ge()))
+ return true
+ }
+ // match: (GreaterEqual (InvertFlags x))
+ // result: (LessEqual x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterEqualU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqualU (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.uge())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.uge()))
+ return true
+ }
+ // match: (GreaterEqualU (InvertFlags x))
+ // result: (LessEqualU x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThan (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.gt())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.gt()))
+ return true
+ }
+ // match: (GreaterThan (InvertFlags x))
+ // result: (LessThan x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterThanU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThanU (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ugt())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ugt()))
+ return true
+ }
+ // match: (GreaterThanU (InvertFlags x))
+ // result: (LessThanU x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqual (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.le())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.le()))
+ return true
+ }
+ // match: (LessEqual (InvertFlags x))
+ // result: (GreaterEqual x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessEqualU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqualU (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ule())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ule()))
+ return true
+ }
+ // match: (LessEqualU (InvertFlags x))
+ // result: (GreaterEqualU x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThan (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.lt())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.lt()))
+ return true
+ }
+ // match: (LessThan (InvertFlags x))
+ // result: (GreaterThan x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessThanU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThanU (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ult())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ult()))
+ return true
+ }
+ // match: (LessThanU (InvertFlags x))
+ // result: (GreaterThanU x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVBUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVBUload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBUreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVBUloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVBUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int32(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUloadidx ptr idx (MOVBstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: (MOVBUreg x)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVBstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVBUload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVBUload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (ANDconst [c] x))
+ // result: (ANDconst [c&0xff] x)
+ for {
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0xff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(uint8(c))])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVBload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVBloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVBloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBloadidx ptr idx (MOVBstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: (MOVBreg x)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVBstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVBload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVBload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (ANDconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDconst [c&0x7f] x)
+ for {
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(int8(c))])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVBstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [0] {sym} (ADD ptr idx) val mem)
+ // cond: sym == nil
+ // result: (MOVBstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVBstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstoreidx ptr (MOVWconst [c]) val mem)
+ // result: (MOVBstore [c] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx (MOVWconst [c]) ptr val mem)
+ // result: (MOVBstore [c] ptr val mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVDload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVDstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVDstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVFload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVFload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVFload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVFstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVFstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVFstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVFstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVHUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVHUload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVHUreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVHUloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVHUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUloadidx ptr idx (MOVHstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: (MOVHUreg x)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVHstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVHUload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVHUload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVHUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (ANDconst [c] x))
+ // result: (ANDconst [c&0xffff] x)
+ for {
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVHUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(uint16(c))])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVHload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVHreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVHloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVHloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: (MOVHreg x)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVHstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVHload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVHload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVHload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (ANDconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDconst [c&0x7fff] x)
+ for {
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVHreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(int16(c))])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVHstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [0] {sym} (ADD ptr idx) val mem)
+ // cond: sym == nil
+ // result: (MOVHstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVHstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreidx ptr (MOVWconst [c]) val mem)
+ // result: (MOVHstore [c] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (MOVWconst [c]) ptr val mem)
+ // result: (MOVHstore [c] ptr val mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVWload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVWloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
+ // cond: sym == nil
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
+ // cond: sym == nil
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
+ // cond: sym == nil
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVWstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVWload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVWload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SLLconst idx [c]) mem)
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx (SLLconst idx [c]) ptr mem)
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SRLconst idx [c]) mem)
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx (SRLconst idx [c]) ptr mem)
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SRAconst idx [c]) mem)
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx (SRAconst idx [c]) ptr mem)
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVWstoreshiftLL {
+ break
+ }
+ d := auxIntToInt32(v_2.AuxInt)
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem)
+ // result: (MOVWload [int32(uint32(c)<<uint64(d))] ptr mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) << uint64(d)))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVWstoreshiftRA {
+ break
+ }
+ d := auxIntToInt32(v_2.AuxInt)
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem)
+ // result: (MOVWload [c>>uint64(d)] ptr mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVWstoreshiftRL {
+ break
+ }
+ d := auxIntToInt32(v_2.AuxInt)
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem)
+ // result: (MOVWload [int32(uint32(c)>>uint64(d))] ptr mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWreg x)
+ // cond: x.Uses == 1
+ // result: (MOVWnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARMMOVWnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVWconst [c]))
+ // result: (MOVWconst [c])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVWstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreidx ptr (MOVWconst [c]) val mem)
+ // result: (MOVWstore [c] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (MOVWconst [c]) ptr val mem)
+ // result: (MOVWstore [c] ptr val mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem)
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem)
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem)
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem)
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem)
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem)
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem)
+ // result: (MOVWstore [int32(uint32(c)<<uint64(d))] ptr val mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) << uint64(d)))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem)
+ // result: (MOVWstore [c>>uint64(d)] ptr val mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem)
+ // result: (MOVWstore [int32(uint32(c)>>uint64(d))] ptr val mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMUL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MUL x (MOVWconst [c]))
+ // cond: int32(c) == -1
+ // result: (RSBconst [0] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(int32(c) == -1) {
+ continue
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL _ (MOVWconst [0]))
+ // result: (MOVWconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (SLLconst [int32(log32(c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (ADDshiftLL x x [int32(log32(c-1))])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ continue
+ }
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (RSBshiftLL x x [int32(log32(c+1))])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ continue
+ }
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (SLLconst [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (SLLconst [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (SLLconst [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (SLLconst [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [c]) (MOVWconst [d]))
+ // result: (MOVWconst [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMMULA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c == -1
+ // result: (SUB a x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpARMSUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MULA _ (MOVWconst [0]) a)
+ // result: a
+ for {
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ a := v_2
+ v.copyOf(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [1]) a)
+ // result: (ADD x a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ a := v_2
+ v.reset(OpARMADD)
+ v.AddArg2(x, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c))] x) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v0.AddArg(x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (ADD (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (ADD (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c == -1
+ // result: (SUB a x)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpARMSUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MULA (MOVWconst [0]) _ a)
+ // result: a
+ for {
+ if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ a := v_2
+ v.copyOf(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [1]) x a)
+ // result: (ADD x a)
+ for {
+ if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ a := v_2
+ v.reset(OpARMADD)
+ v.AddArg2(x, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c))] x) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v0.AddArg(x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (ADD (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (ADD (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a)
+ // result: (ADDconst [c*d] a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMULD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULD (NEGD x) y)
+ // cond: objabi.GOARM >= 6
+ // result: (NMULD x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMNEGD {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ if !(objabi.GOARM >= 6) {
+ continue
+ }
+ v.reset(OpARMNMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMMULF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULF (NEGF x) y)
+ // cond: objabi.GOARM >= 6
+ // result: (NMULF x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMNEGF {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ if !(objabi.GOARM >= 6) {
+ continue
+ }
+ v.reset(OpARMNMULF)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMMULS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c == -1
+ // result: (ADD a x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpARMADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MULS _ (MOVWconst [0]) a)
+ // result: a
+ for {
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ a := v_2
+ v.copyOf(a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [1]) a)
+ // result: (RSB x a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ a := v_2
+ v.reset(OpARMRSB)
+ v.AddArg2(x, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c))] x) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v0.AddArg(x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (RSB (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (RSB (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c == -1
+ // result: (ADD a x)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpARMADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MULS (MOVWconst [0]) _ a)
+ // result: a
+ for {
+ if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ a := v_2
+ v.copyOf(a)
+ return true
+ }
+ // match: (MULS (MOVWconst [1]) x a)
+ // result: (RSB x a)
+ for {
+ if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ a := v_2
+ v.reset(OpARMRSB)
+ v.AddArg2(x, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c))] x) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v0.AddArg(x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (RSB (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (RSB (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) (MOVWconst [d]) a)
+ // result: (SUBconst [c*d] a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVN(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVN (MOVWconst [c]))
+ // result: (MOVWconst [^c])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(^c)
+ return true
+ }
+ // match: (MVN (SLLconst [c] x))
+ // result: (MVNshiftLL x [c])
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MVN (SRLconst [c] x))
+ // result: (MVNshiftRL x [c])
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MVN (SRAconst [c] x))
+ // result: (MVNshiftRA x [c])
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MVN (SLL x y))
+ // result: (MVNshiftLLreg x y)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftLLreg)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MVN (SRL x y))
+ // result: (MVNshiftRLreg x y)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRLreg)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MVN (SRA x y))
+ // result: (MVNshiftRAreg x y)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRAreg)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftLL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftLL (MOVWconst [c]) [d])
+ // result: (MOVWconst [^(c<<uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(^(c << uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftLLreg(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MVNshiftLLreg x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (MVNshiftLL x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMMVNshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRA (MOVWconst [c]) [d])
+ // result: (MOVWconst [int32(c)>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRAreg(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MVNshiftRAreg x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (MVNshiftRA x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMMVNshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRL (MOVWconst [c]) [d])
+ // result: (MOVWconst [^int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(^int32(uint32(c) >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRLreg(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MVNshiftRLreg x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (MVNshiftRL x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMMVNshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMNEGD(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGD (MULD x y))
+ // cond: objabi.GOARM >= 6
+ // result: (NMULD x y)
+ for {
+ if v_0.Op != OpARMMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(objabi.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMNMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMNEGF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGF (MULF x y))
+ // cond: objabi.GOARM >= 6
+ // result: (NMULF x y)
+ for {
+ if v_0.Op != OpARMMULF {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(objabi.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMNMULF)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMNMULD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NMULD (NEGD x) y)
+ // result: (MULD x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMNEGD {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARMMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMNMULF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NMULF (NEGF x) y)
+ // result: (MULF x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMNEGF {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARMMULF)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMNotEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NotEqual (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ne())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ne()))
+ return true
+ }
+ // match: (NotEqual (InvertFlags x))
+ // result: (NotEqual x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMNotEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (OR x (MOVWconst [c]))
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SLLconst [c] y))
+ // result: (ORshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SRLconst [c] y))
+ // result: (ORshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SRAconst [c] y))
+ // result: (ORshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SLL y z))
+ // result: (ORshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SRL y z))
+ // result: (ORshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SRA y z))
+ // result: (ORshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVWconst [-1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMORconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORshiftLL (MOVWconst [c]) x [d])
+ // result: (ORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLL x (MOVWconst [c]) [d])
+ // result: (ORconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: ( ORshiftLL [c] (SRLconst x [32-c]) x)
+ // result: (SRRconst [32-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(32 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
+ // cond: objabi.GOARM>=6
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 {
+ break
+ }
+ x := v_0_0.Args[0]
+ if x != v_1 || !(objabi.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL y:(SLLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSLLconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftLLreg (MOVWconst [c]) x y)
+ // result: (ORconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ORshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMORshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRA (MOVWconst [c]) x [d])
+ // result: (ORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRA x (MOVWconst [c]) [d])
+ // result: (ORconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRA y:(SRAconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRAconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRAreg (MOVWconst [c]) x y)
+ // result: (ORconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ORshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMORshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRL (MOVWconst [c]) x [d])
+ // result: (ORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRL x (MOVWconst [c]) [d])
+ // result: (ORconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: ( ORshiftRL [c] (SLLconst x [32-c]) x)
+ // result: (SRRconst [ c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRL y:(SRLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRLconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRLreg (MOVWconst [c]) x y)
+ // result: (ORconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ORshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMORshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RSB (MOVWconst [c]) x)
+ // result: (SUBconst [c] x)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSB x (MOVWconst [c]))
+ // result: (RSBconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSB x (SLLconst [c] y))
+ // result: (RSBshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB (SLLconst [c] y) x)
+ // result: (SUBshiftLL x y [c])
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB x (SRLconst [c] y))
+ // result: (RSBshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB (SRLconst [c] y) x)
+ // result: (SUBshiftRL x y [c])
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB x (SRAconst [c] y))
+ // result: (RSBshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB (SRAconst [c] y) x)
+ // result: (SUBshiftRA x y [c])
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB x (SLL y z))
+ // result: (RSBshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB (SLL y z) x)
+ // result: (SUBshiftLLreg x y z)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB x (SRL y z))
+ // result: (RSBshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB (SRL y z) x)
+ // result: (SUBshiftRLreg x y z)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB x (SRA y z))
+ // result: (RSBshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB (SRA y z) x)
+ // result: (SUBshiftRAreg x y z)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (RSB (MUL x y) a)
+ // cond: objabi.GOARM == 7
+ // result: (MULS x y a)
+ for {
+ if v_0.Op != OpARMMUL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ if !(objabi.GOARM == 7) {
+ break
+ }
+ v.reset(OpARMMULS)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftLL (MOVWconst [c]) x [d])
+ // result: (SUBSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftLL x (MOVWconst [c]) [d])
+ // result: (RSBSconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftLLreg (MOVWconst [c]) x y)
+ // result: (SUBSconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBSshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftRA (MOVWconst [c]) x [d])
+ // result: (SUBSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRA x (MOVWconst [c]) [d])
+ // result: (RSBSconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftRAreg (MOVWconst [c]) x y)
+ // result: (SUBSconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBSshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftRL (MOVWconst [c]) x [d])
+ // result: (SUBSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRL x (MOVWconst [c]) [d])
+ // result: (RSBSconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftRLreg (MOVWconst [c]) x y)
+ // result: (SUBSconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBSshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RSBconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c-d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ return true
+ }
+ // match: (RSBconst [c] (RSBconst [d] x))
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBconst [c] (ADDconst [d] x))
+ // result: (RSBconst [c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBconst [c] (SUBconst [d] x))
+ // result: (RSBconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftLL (MOVWconst [c]) x [d])
+ // result: (SUBconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftLL x (MOVWconst [c]) [d])
+ // result: (RSBconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftLLreg (MOVWconst [c]) x y)
+ // result: (SUBconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftRA (MOVWconst [c]) x [d])
+ // result: (SUBconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRA x (MOVWconst [c]) [d])
+ // result: (RSBconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftRAreg (MOVWconst [c]) x y)
+ // result: (SUBconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftRL (MOVWconst [c]) x [d])
+ // result: (SUBconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRL x (MOVWconst [c]) [d])
+ // result: (RSBconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftRLreg (MOVWconst [c]) x y)
+ // result: (SUBconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RSCconst [c] (ADDconst [d] x) flags)
+ // result: (RSCconst [c-d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (RSCconst [c] (SUBconst [d] x) flags)
+ // result: (RSCconst [c+d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftLL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftLL (MOVWconst [c]) x [d] flags)
+ // result: (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftLL x (MOVWconst [c]) [d] flags)
+ // result: (RSCconst x [c<<uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftLLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftLLreg (MOVWconst [c]) x y flags)
+ // result: (SBCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (RSCshiftLL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftRA (MOVWconst [c]) x [d] flags)
+ // result: (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftRA x (MOVWconst [c]) [d] flags)
+ // result: (RSCconst x [c>>uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRAreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftRAreg (MOVWconst [c]) x y flags)
+ // result: (SBCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (RSCshiftRA x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftRL (MOVWconst [c]) x [d] flags)
+ // result: (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftRL x (MOVWconst [c]) [d] flags)
+ // result: (RSCconst x [int32(uint32(c)>>uint64(d))] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftRLreg (MOVWconst [c]) x y flags)
+ // result: (SBCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (RSCshiftRL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBC (MOVWconst [c]) x flags)
+ // result: (RSCconst [c] x flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (SBC x (MOVWconst [c]) flags)
+ // result: (SBCconst [c] x flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (SBC x (SLLconst [c] y) flags)
+ // result: (SBCshiftLL x y [c] flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC (SLLconst [c] y) x flags)
+ // result: (RSCshiftLL x y [c] flags)
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC x (SRLconst [c] y) flags)
+ // result: (SBCshiftRL x y [c] flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC (SRLconst [c] y) x flags)
+ // result: (RSCshiftRL x y [c] flags)
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC x (SRAconst [c] y) flags)
+ // result: (SBCshiftRA x y [c] flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC (SRAconst [c] y) x flags)
+ // result: (RSCshiftRA x y [c] flags)
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC x (SLL y z) flags)
+ // result: (SBCshiftLLreg x y z flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftLLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC (SLL y z) x flags)
+ // result: (RSCshiftLLreg x y z flags)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftLLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC x (SRL y z) flags)
+ // result: (SBCshiftRLreg x y z flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftRLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC (SRL y z) x flags)
+ // result: (RSCshiftRLreg x y z flags)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftRLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC x (SRA y z) flags)
+ // result: (SBCshiftRAreg x y z flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftRAreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC (SRA y z) x flags)
+ // result: (RSCshiftRAreg x y z flags)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftRAreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBCconst [c] (ADDconst [d] x) flags)
+ // result: (SBCconst [c-d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (SBCconst [c] (SUBconst [d] x) flags)
+ // result: (SBCconst [c+d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftLL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftLL (MOVWconst [c]) x [d] flags)
+ // result: (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftLL x (MOVWconst [c]) [d] flags)
+ // result: (SBCconst x [c<<uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftLLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftLLreg (MOVWconst [c]) x y flags)
+ // result: (RSCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (SBCshiftLL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSBCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftRA (MOVWconst [c]) x [d] flags)
+ // result: (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftRA x (MOVWconst [c]) [d] flags)
+ // result: (SBCconst x [c>>uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRAreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftRAreg (MOVWconst [c]) x y flags)
+ // result: (RSCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (SBCshiftRA x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSBCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftRL (MOVWconst [c]) x [d] flags)
+ // result: (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftRL x (MOVWconst [c]) [d] flags)
+ // result: (SBCconst x [int32(uint32(c)>>uint64(d))] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftRLreg (MOVWconst [c]) x y flags)
+ // result: (RSCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (SBCshiftRL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSBCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLL x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SLLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d<<uint64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d << uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRA x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SRAconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRAcond(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAcond x _ (FlagConstant [fc]))
+ // cond: fc.uge()
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ if v_2.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_2.AuxInt)
+ if !(fc.uge()) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAcond x y (FlagConstant [fc]))
+ // cond: fc.ult()
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_2.AuxInt)
+ if !(fc.ult()) {
+ break
+ }
+ v.reset(OpARMSRA)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRAconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d >> uint64(c))
+ return true
+ }
+ // match: (SRAconst (SLLconst x [c]) [d])
+ // cond: objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31
+ // result: (BFX [(d-c)|(32-d)<<8] x)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(objabi.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
+ break
+ }
+ v.reset(OpARMBFX)
+ v.AuxInt = int32ToAuxInt((d - c) | (32-d)<<8)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRL x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SRLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [int32(uint32(d)>>uint64(c))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(d) >> uint64(c)))
+ return true
+ }
+ // match: (SRLconst (SLLconst x [c]) [d])
+ // cond: objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31
+ // result: (BFXU [(d-c)|(32-d)<<8] x)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(objabi.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
+ break
+ }
+ v.reset(OpARMBFXU)
+ v.AuxInt = int32ToAuxInt((d - c) | (32-d)<<8)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUB (MOVWconst [c]) x)
+ // result: (RSBconst [c] x)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (MOVWconst [c]))
+ // result: (SUBconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (SLLconst [c] y))
+ // result: (SUBshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB (SLLconst [c] y) x)
+ // result: (RSBshiftLL x y [c])
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB x (SRLconst [c] y))
+ // result: (SUBshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB (SRLconst [c] y) x)
+ // result: (RSBshiftRL x y [c])
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB x (SRAconst [c] y))
+ // result: (SUBshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB (SRAconst [c] y) x)
+ // result: (RSBshiftRA x y [c])
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB x (SLL y z))
+ // result: (SUBshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB (SLL y z) x)
+ // result: (RSBshiftLLreg x y z)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB x (SRL y z))
+ // result: (SUBshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB (SRL y z) x)
+ // result: (RSBshiftRLreg x y z)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB x (SRA y z))
+ // result: (SUBshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB (SRA y z) x)
+ // result: (RSBshiftRAreg x y z)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SUB a (MUL x y))
+ // cond: objabi.GOARM == 7
+ // result: (MULS x y a)
+ for {
+ a := v_0
+ if v_1.Op != OpARMMUL {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(objabi.GOARM == 7) {
+ break
+ }
+ v.reset(OpARMMULS)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBD a (MULD x y))
+ // cond: a.Uses == 1 && objabi.GOARM >= 6
+ // result: (MULSD a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARMMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && objabi.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMMULSD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUBD a (NMULD x y))
+ // cond: a.Uses == 1 && objabi.GOARM >= 6
+ // result: (MULAD a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARMNMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && objabi.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMMULAD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBF a (MULF x y))
+ // cond: a.Uses == 1 && objabi.GOARM >= 6
+ // result: (MULSF a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARMMULF {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && objabi.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMMULSF)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUBF a (NMULF x y))
+ // cond: a.Uses == 1 && objabi.GOARM >= 6
+ // result: (MULAF a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARMNMULF {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && objabi.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMMULAF)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBS x (MOVWconst [c]))
+ // result: (SUBSconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBS x (SLLconst [c] y))
+ // result: (SUBSshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS (SLLconst [c] y) x)
+ // result: (RSBSshiftLL x y [c])
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS x (SRLconst [c] y))
+ // result: (SUBSshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS (SRLconst [c] y) x)
+ // result: (RSBSshiftRL x y [c])
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS x (SRAconst [c] y))
+ // result: (SUBSshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS (SRAconst [c] y) x)
+ // result: (RSBSshiftRA x y [c])
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS x (SLL y z))
+ // result: (SUBSshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS (SLL y z) x)
+ // result: (RSBSshiftLLreg x y z)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS x (SRL y z))
+ // result: (SUBSshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS (SRL y z) x)
+ // result: (RSBSshiftRLreg x y z)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS x (SRA y z))
+ // result: (SUBSshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS (SRA y z) x)
+ // result: (RSBSshiftRAreg x y z)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftLL (MOVWconst [c]) x [d])
+ // result: (RSBSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftLL x (MOVWconst [c]) [d])
+ // result: (SUBSconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftLLreg (MOVWconst [c]) x y)
+ // result: (RSBSconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBSshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftRA (MOVWconst [c]) x [d])
+ // result: (RSBSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRA x (MOVWconst [c]) [d])
+ // result: (SUBSconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftRAreg (MOVWconst [c]) x y)
+ // result: (RSBSconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBSshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftRL (MOVWconst [c]) x [d])
+ // result: (RSBSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRL x (MOVWconst [c]) [d])
+ // result: (SUBSconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftRLreg (MOVWconst [c]) x y)
+ // result: (RSBSconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBSshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBconst [off1] (MOVWaddr [off2] {sym} ptr))
+ // result: (MOVWaddr [off2-off1] {sym} ptr)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = int32ToAuxInt(off2 - off1)
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (SUBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBconst [c] x)
+ // cond: !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))
+ // result: (ADDconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))) {
+ break
+ }
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] x)
+ // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
+ // result: (ADDconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
+ break
+ }
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d-c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d - c)
+ return true
+ }
+ // match: (SUBconst [c] (SUBconst [d] x))
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (ADDconst [d] x))
+ // result: (ADDconst [-c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (RSBconst [d] x))
+ // result: (RSBconst [-c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftLL (MOVWconst [c]) x [d])
+ // result: (RSBconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftLL x (MOVWconst [c]) [d])
+ // result: (SUBconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftLLreg (MOVWconst [c]) x y)
+ // result: (RSBconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftRA (MOVWconst [c]) x [d])
+ // result: (RSBconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRA x (MOVWconst [c]) [d])
+ // result: (SUBconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftRAreg (MOVWconst [c]) x y)
+ // result: (RSBconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftRL (MOVWconst [c]) x [d])
+ // result: (RSBconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRL x (MOVWconst [c]) [d])
+ // result: (SUBconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftRLreg (MOVWconst [c]) x y)
+ // result: (RSBconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (TEQ x (MOVWconst [c]))
+ // result: (TEQconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SLLconst [c] y))
+ // result: (TEQshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SRLconst [c] y))
+ // result: (TEQshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SRAconst [c] y))
+ // result: (TEQshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SLL y z))
+ // result: (TEQshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SRL y z))
+ // result: (TEQshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SRA y z))
+ // result: (TEQshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TEQconst (MOVWconst [x]) [y])
+ // result: (FlagConstant [logicFlags32(x^y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(logicFlags32(x ^ y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftLL (MOVWconst [c]) x [d])
+ // result: (TEQconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftLL x (MOVWconst [c]) [d])
+ // result: (TEQconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftLLreg (MOVWconst [c]) x y)
+ // result: (TEQconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TEQshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTEQshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftRA (MOVWconst [c]) x [d])
+ // result: (TEQconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftRA x (MOVWconst [c]) [d])
+ // result: (TEQconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftRAreg (MOVWconst [c]) x y)
+ // result: (TEQconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TEQshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTEQshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftRL (MOVWconst [c]) x [d])
+ // result: (TEQconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftRL x (MOVWconst [c]) [d])
+ // result: (TEQconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftRLreg (MOVWconst [c]) x y)
+ // result: (TEQconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TEQshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTEQshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTST(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (TST x (MOVWconst [c]))
+ // result: (TSTconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SLLconst [c] y))
+ // result: (TSTshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SRLconst [c] y))
+ // result: (TSTshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SRAconst [c] y))
+ // result: (TSTshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SLL y z))
+ // result: (TSTshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SRL y z))
+ // result: (TSTshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SRA y z))
+ // result: (TSTshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TSTconst (MOVWconst [x]) [y])
+ // result: (FlagConstant [logicFlags32(x&y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(logicFlags32(x & y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftLL (MOVWconst [c]) x [d])
+ // result: (TSTconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftLL x (MOVWconst [c]) [d])
+ // result: (TSTconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftLLreg (MOVWconst [c]) x y)
+ // result: (TSTconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TSTshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTSTshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRA (MOVWconst [c]) x [d])
+ // result: (TSTconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRA x (MOVWconst [c]) [d])
+ // result: (TSTconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRAreg (MOVWconst [c]) x y)
+ // result: (TSTconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TSTshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTSTshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRL (MOVWconst [c]) x [d])
+ // result: (TSTconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRL x (MOVWconst [c]) [d])
+ // result: (TSTconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRLreg (MOVWconst [c]) x y)
+ // result: (TSTconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TSTshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTSTshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVWconst [c]))
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SLLconst [c] y))
+ // result: (XORshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRLconst [c] y))
+ // result: (XORshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRAconst [c] y))
+ // result: (XORshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRRconst [c] y))
+ // result: (XORshiftRR x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRRconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRR)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SLL y z))
+ // result: (XORshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRL y z))
+ // result: (XORshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRA y z))
+ // result: (XORshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMXORconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORshiftLL (MOVWconst [c]) x [d])
+ // result: (XORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLL x (MOVWconst [c]) [d])
+ // result: (XORconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL [c] (SRLconst x [32-c]) x)
+ // result: (SRRconst [32-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(32 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
+ // cond: objabi.GOARM>=6
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 {
+ break
+ }
+ x := v_0_0.Args[0]
+ if x != v_1 || !(objabi.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftLLreg (MOVWconst [c]) x y)
+ // result: (XORconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (XORshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMXORshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRA (MOVWconst [c]) x [d])
+ // result: (XORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRA x (MOVWconst [c]) [d])
+ // result: (XORconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRAreg (MOVWconst [c]) x y)
+ // result: (XORconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (XORshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMXORshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRL (MOVWconst [c]) x [d])
+ // result: (XORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRL x (MOVWconst [c]) [d])
+ // result: (XORconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRL [c] (SLLconst x [32-c]) x)
+ // result: (SRRconst [ c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRLreg (MOVWconst [c]) x y)
+ // result: (XORconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (XORshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMXORshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRR (MOVWconst [c]) x [d])
+ // result: (XORconst [c] (SRRconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRRconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRR x (MOVWconst [c]) [d])
+ // result: (XORconst x [int32(uint32(c)>>uint64(d)|uint32(c)<<uint64(32-d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c)>>uint64(d) | uint32(c)<<uint64(32-d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpARMMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM_OpAvg32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg32u <t> x y)
+ // result: (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, t)
+ v0.AuxInt = int32ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpARMSUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueARM_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (BitLen32 <t> x)
+ // result: (RSBconst [32] (CLZ <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARMCLZ, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpBswap32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Bswap32 <t> x)
+ // cond: objabi.GOARM==5
+ // result: (XOR <t> (SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8]) (SRRconst <t> x [8]))
+ for {
+ t := v.Type
+ x := v_0
+ if !(objabi.GOARM == 5) {
+ break
+ }
+ v.reset(OpARMXOR)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, t)
+ v0.AuxInt = int32ToAuxInt(8)
+ v1 := b.NewValue0(v.Pos, OpARMBICconst, t)
+ v1.AuxInt = int32ToAuxInt(0xff0000)
+ v2 := b.NewValue0(v.Pos, OpARMXOR, t)
+ v3 := b.NewValue0(v.Pos, OpARMSRRconst, t)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(x)
+ v2.AddArg2(x, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpARMSRRconst, t)
+ v4.AuxInt = int32ToAuxInt(8)
+ v4.AddArg(x)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Bswap32 x)
+ // cond: objabi.GOARM>=6
+ // result: (REV x)
+ for {
+ x := v_0
+ if !(objabi.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMREV)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst32F(v *Value) bool {
+ // match: (Const32F [val])
+ // result: (MOVFconst [float64(val)])
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpARMMOVFconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst64F(v *Value) bool {
+ // match: (Const64F [val])
+ // result: (MOVDconst [float64(val)])
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpARMMOVDconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConstBool(v *Value) bool {
+ // match: (ConstBool [b])
+ // result: (MOVWconst [b2i32(b)])
+ for {
+ b := auxIntToBool(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(b))
+ return true
+ }
+}
+func rewriteValueARM_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVWconst [0])
+ for {
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueARM_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 <t> x)
+ // cond: objabi.GOARM<=6
+ // result: (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x10000] x))) [1])))
+ for {
+ t := v.Type
+ x := v_0
+ if !(objabi.GOARM <= 6) {
+ break
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARMCLZ, t)
+ v1 := b.NewValue0(v.Pos, OpARMSUBconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMAND, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0x10000)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(0)
+ v4.AddArg(v3)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz16 <t> x)
+ // cond: objabi.GOARM==7
+ // result: (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+ for {
+ t := v.Type
+ x := v_0
+ if !(objabi.GOARM == 7) {
+ break
+ }
+ v.reset(OpARMCLZ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARMRBIT, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0x10000)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Ctz32 <t> x)
+ // cond: objabi.GOARM<=6
+ // result: (RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
+ for {
+ t := v.Type
+ x := v_0
+ if !(objabi.GOARM <= 6) {
+ break
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARMCLZ, t)
+ v1 := b.NewValue0(v.Pos, OpARMSUBconst, t)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMAND, t)
+ v3 := b.NewValue0(v.Pos, OpARMRSBconst, t)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg(x)
+ v2.AddArg2(x, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz32 <t> x)
+ // cond: objabi.GOARM==7
+ // result: (CLZ <t> (RBIT <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ if !(objabi.GOARM == 7) {
+ break
+ }
+ v.reset(OpARMCLZ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARMRBIT, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 <t> x)
+ // cond: objabi.GOARM<=6
+ // result: (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x100] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x100] x))) [1])))
+ for {
+ t := v.Type
+ x := v_0
+ if !(objabi.GOARM <= 6) {
+ break
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARMCLZ, t)
+ v1 := b.NewValue0(v.Pos, OpARMSUBconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMAND, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0x100)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(0)
+ v4.AddArg(v3)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz8 <t> x)
+ // cond: objabi.GOARM==7
+ // result: (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+ for {
+ t := v.Type
+ x := v_0
+ if !(objabi.GOARM == 7) {
+ break
+ }
+ v.reset(OpARMCLZ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARMRBIT, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0x100)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (Div32 (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpDiv32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpDiv32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 x y)
+ // result: (SUB (XOR <typ.UInt32> (Select0 <typ.UInt32> (CALLudiv (SUB <typ.UInt32> (XOR x <typ.UInt32> (Signmask x)) (Signmask x)) (SUB <typ.UInt32> (XOR y <typ.UInt32> (Signmask y)) (Signmask y)))) (Signmask (XOR <typ.UInt32> x y))) (Signmask (XOR <typ.UInt32> x y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSUB)
+ v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v5.AddArg(x)
+ v4.AddArg2(x, v5)
+ v3.AddArg2(v4, v5)
+ v6 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v8.AddArg(y)
+ v7.AddArg2(y, v8)
+ v6.AddArg2(v7, v8)
+ v2.AddArg2(v3, v6)
+ v1.AddArg(v2)
+ v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v10 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v10.AddArg2(x, y)
+ v9.AddArg(v10)
+ v0.AddArg2(v1, v9)
+ v.AddArg2(v0, v9)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (Select0 <typ.UInt32> (CALLudiv x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (Div32 (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpDiv32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpDiv32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (Equal (CMPF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (Equal (CMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XORconst [1] (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpARMXOR, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpFMA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMA x y z)
+ // result: (FMULAD z x y)
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ v.reset(OpARMFMULAD)
+ v.AddArg3(z, x, y)
+ return true
+ }
+}
+func rewriteValueARM_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (LessThanU (CMP idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil ptr)
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v_0
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (LessEqualU (CMP idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (GreaterEqual (CMPF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32U x y)
+ // result: (LessEqualU (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (GreaterEqual (CMPD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (LessThan (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (GreaterThan (CMPF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32U x y)
+ // result: (LessThanU (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (GreaterThan (CMPD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpARMMOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVFload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpARMMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x32 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 x y)
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSLL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x32 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 x y)
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSLL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x32 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 x y)
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSLL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // result: (SUB (XOR <typ.UInt32> (Select1 <typ.UInt32> (CALLudiv (SUB <typ.UInt32> (XOR <typ.UInt32> x (Signmask x)) (Signmask x)) (SUB <typ.UInt32> (XOR <typ.UInt32> y (Signmask y)) (Signmask y)))) (Signmask x)) (Signmask x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSUB)
+ v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v5.AddArg(x)
+ v4.AddArg2(x, v5)
+ v3.AddArg2(v4, v5)
+ v6 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v8.AddArg(y)
+ v7.AddArg2(y, v8)
+ v6.AddArg2(v7, v8)
+ v2.AddArg2(v3, v6)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v5)
+ v.AddArg2(v0, v5)
+ return true
+ }
+}
+func rewriteValueARM_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (Select1 <typ.UInt32> (CALLudiv x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v2.AuxInt = int32ToAuxInt(1)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [8 * (128 - s/4)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARMDUFFCOPY)
+ v.AuxInt = int64ToAuxInt(8 * (128 - s/4))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: ((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s)
+ // result: (LoweredMove [t.Alignment()] dst src (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARMLoweredMove)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpARMADDconst, src.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpNeg16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg16 x)
+ // result: (RSBconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg32 x)
+ // result: (RSBconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg8 x)
+ // result: (RSBconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (NotEqual (CMPF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (NotEqual (CMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr:(SP))
+ // result: (MOVWaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueARM_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpARMLoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpARMLoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpARMLoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpPanicExtend(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicExtendA [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpARMLoweredPanicExtendA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicExtendB [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpARMLoweredPanicExtendB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicExtendC [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpARMLoweredPanicExtendC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVWconst [c]))
+ // result: (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x32, t)
+ v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RotateLeft32 x (MOVWconst [c]))
+ // result: (SRRconst [-c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(-c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RotateLeft32 x y)
+ // result: (SRR x (RSBconst [0] <y.Type> y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRR)
+ v0 := b.NewValue0(v.Pos, OpARMRSBconst, y.Type)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVWconst [c]))
+ // result: (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x32, t)
+ v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(256)
+ v3.AddArg(v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(y)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 x y)
+ // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x y)
+ // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg3(v0, y, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux32 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 x y)
+ // result: (SRL x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(v0)
+ v.AddArg3(x, v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 x y)
+ // result: (SRAcond x y (CMPconst [256] y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(256)
+ v0.AddArg(y)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRAconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // result: (SRA x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(256)
+ v3.AddArg(v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(y)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 x y)
+ // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x y)
+ // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg3(v0, y, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select0 (CALLudiv x (MOVWconst [1])))
+ // result: x
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst || auxIntToInt32(v_0_1.AuxInt) != 1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Select0 (CALLudiv x (MOVWconst [c])))
+ // cond: isPowerOfTwo32(c)
+ // result: (SRLconst [int32(log32(c))] x)
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [int32(uint32(c)/uint32(d))])
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) / uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select1 (CALLudiv _ (MOVWconst [1])))
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst || auxIntToInt32(v_0_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Select1 (CALLudiv x (MOVWconst [c])))
+ // cond: isPowerOfTwo32(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [int32(uint32(c)%uint32(d))])
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) % uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpSignmask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Signmask x)
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAconst (RSBconst <t> [0] x) [31])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpARMRSBconst, t)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (MOVFstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVFstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARMMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [4 * (128 - s/4)] ptr (MOVWconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpARMDUFFZERO)
+ v.AuxInt = int64ToAuxInt(4 * (128 - s/4))
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: (s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0
+ // result: (LoweredZero [t.Alignment()] ptr (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))]) (MOVWconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) {
+ break
+ }
+ v.reset(OpARMLoweredZero)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpARMADDconst, ptr.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg4(ptr, v0, v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpZeromask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Zeromask x)
+ // result: (SRAconst (RSBshiftRL <typ.Int32> x x [1]) [31])
+ for {
+ x := v_0
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlockARM(b *Block) bool {
+ switch b.Kind {
+ case BlockARMEQ:
+ // match: (EQ (FlagConstant [fc]) yes no)
+ // cond: fc.eq()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.eq()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagConstant [fc]) yes no)
+ // cond: !fc.eq()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.eq()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMEQ, cmp)
+ return true
+ }
+ // match: (EQ (CMP x (RSBconst [0] y)))
+ // result: (EQ (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMN x (RSBconst [0] y)))
+ // result: (EQ (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ case BlockARMGE:
+ // match: (GE (FlagConstant [fc]) yes no)
+ // cond: fc.ge()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagConstant [fc]) yes no)
+ // cond: !fc.ge()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMLE, cmp)
+ return true
+ }
+ // match: (GE (CMP x (RSBconst [0] y)))
+ // result: (GE (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGE, v0)
+ return true
+ }
+ // match: (GE (CMN x (RSBconst [0] y)))
+ // result: (GE (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ case BlockARMGEnoov:
+ // match: (GEnoov (FlagConstant [fc]) yes no)
+ // cond: fc.geNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.geNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GEnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.geNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.geNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GEnoov (InvertFlags cmp) yes no)
+ // result: (LEnoov cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMLEnoov, cmp)
+ return true
+ }
+ case BlockARMGT:
+ // match: (GT (FlagConstant [fc]) yes no)
+ // cond: fc.gt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.gt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (FlagConstant [fc]) yes no)
+ // cond: !fc.gt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.gt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMLT, cmp)
+ return true
+ }
+ // match: (GT (CMP x (RSBconst [0] y)))
+ // result: (GT (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGT, v0)
+ return true
+ }
+ // match: (GT (CMN x (RSBconst [0] y)))
+ // result: (GT (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGT, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ case BlockARMGTnoov:
+ // match: (GTnoov (FlagConstant [fc]) yes no)
+ // cond: fc.gtNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.gtNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GTnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.gtNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.gtNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GTnoov (InvertFlags cmp) yes no)
+ // result: (LTnoov cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMLTnoov, cmp)
+ return true
+ }
+ case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARMEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMEQ, cc)
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARMNotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMNE, cc)
+ return true
+ }
+ // match: (If (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARMLessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMLT, cc)
+ return true
+ }
+ // match: (If (LessThanU cc) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARMLessThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMULT, cc)
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARMLessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMLE, cc)
+ return true
+ }
+ // match: (If (LessEqualU cc) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARMLessEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMULE, cc)
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARMGreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMGT, cc)
+ return true
+ }
+ // match: (If (GreaterThanU cc) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARMGreaterThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMUGT, cc)
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARMGreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMGE, cc)
+ return true
+ }
+ // match: (If (GreaterEqualU cc) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARMGreaterEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMUGE, cc)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (NE (CMPconst [0] cond) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(cond)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ case BlockARMLE:
+ // match: (LE (FlagConstant [fc]) yes no)
+ // cond: fc.le()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.le()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagConstant [fc]) yes no)
+ // cond: !fc.le()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.le()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMGE, cmp)
+ return true
+ }
+ // match: (LE (CMP x (RSBconst [0] y)))
+ // result: (LE (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLE, v0)
+ return true
+ }
+ // match: (LE (CMN x (RSBconst [0] y)))
+ // result: (LE (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLE, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ case BlockARMLEnoov:
+ // match: (LEnoov (FlagConstant [fc]) yes no)
+ // cond: fc.leNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.leNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LEnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.leNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.leNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LEnoov (InvertFlags cmp) yes no)
+ // result: (GEnoov cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMGEnoov, cmp)
+ return true
+ }
+ case BlockARMLT:
+ // match: (LT (FlagConstant [fc]) yes no)
+ // cond: fc.lt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.lt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagConstant [fc]) yes no)
+ // cond: !fc.lt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.lt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMGT, cmp)
+ return true
+ }
+ // match: (LT (CMP x (RSBconst [0] y)))
+ // result: (LT (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLT, v0)
+ return true
+ }
+ // match: (LT (CMN x (RSBconst [0] y)))
+ // result: (LT (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLT, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ case BlockARMLTnoov:
+ // match: (LTnoov (FlagConstant [fc]) yes no)
+ // cond: fc.ltNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ltNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LTnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.ltNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ltNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LTnoov (InvertFlags cmp) yes no)
+ // result: (GTnoov cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMGTnoov, cmp)
+ return true
+ }
+ case BlockARMNE:
+ // match: (NE (CMPconst [0] (Equal cc)) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMEQ, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (NotEqual cc)) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMNotEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMNE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThan cc)) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMLessThan {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMLT, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThanU cc)) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMLessThanU {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMULT, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqual cc)) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMLessEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMLE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqualU cc)) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMLessEqualU {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMULE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThan cc)) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMGreaterThan {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMGT, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMGreaterThanU {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMUGT, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMGreaterEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMGE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMGreaterEqualU {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMUGE, cc)
+ return true
+ }
+ // match: (NE (FlagConstant [fc]) yes no)
+ // cond: fc.ne()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ne()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagConstant [fc]) yes no)
+ // cond: !fc.ne()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ne()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMNE, cmp)
+ return true
+ }
+ // match: (NE (CMP x (RSBconst [0] y)))
+ // result: (NE (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMN x (RSBconst [0] y)))
+ // result: (NE (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ case BlockARMUGE:
+ // match: (UGE (FlagConstant [fc]) yes no)
+ // cond: fc.uge()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.uge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagConstant [fc]) yes no)
+ // cond: !fc.uge()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.uge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (InvertFlags cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMULE, cmp)
+ return true
+ }
+ case BlockARMUGT:
+ // match: (UGT (FlagConstant [fc]) yes no)
+ // cond: fc.ugt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ugt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGT (FlagConstant [fc]) yes no)
+ // cond: !fc.ugt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ugt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (InvertFlags cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMULT, cmp)
+ return true
+ }
+ case BlockARMULE:
+ // match: (ULE (FlagConstant [fc]) yes no)
+ // cond: fc.ule()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ule()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagConstant [fc]) yes no)
+ // cond: !fc.ule()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ule()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULE (InvertFlags cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMUGE, cmp)
+ return true
+ }
+ case BlockARMULT:
+ // match: (ULT (FlagConstant [fc]) yes no)
+ // cond: fc.ult()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ult()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagConstant [fc]) yes no)
+ // cond: !fc.ult()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ult()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (InvertFlags cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMUGT, cmp)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
new file mode 100644
index 0000000..e61d899
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -0,0 +1,28662 @@
+// Code generated from gen/ARM64.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValueARM64(v *Value) bool {
+ switch v.Op {
+ case OpARM64ADCSflags:
+ return rewriteValueARM64_OpARM64ADCSflags(v)
+ case OpARM64ADD:
+ return rewriteValueARM64_OpARM64ADD(v)
+ case OpARM64ADDconst:
+ return rewriteValueARM64_OpARM64ADDconst(v)
+ case OpARM64ADDshiftLL:
+ return rewriteValueARM64_OpARM64ADDshiftLL(v)
+ case OpARM64ADDshiftRA:
+ return rewriteValueARM64_OpARM64ADDshiftRA(v)
+ case OpARM64ADDshiftRL:
+ return rewriteValueARM64_OpARM64ADDshiftRL(v)
+ case OpARM64AND:
+ return rewriteValueARM64_OpARM64AND(v)
+ case OpARM64ANDconst:
+ return rewriteValueARM64_OpARM64ANDconst(v)
+ case OpARM64ANDshiftLL:
+ return rewriteValueARM64_OpARM64ANDshiftLL(v)
+ case OpARM64ANDshiftRA:
+ return rewriteValueARM64_OpARM64ANDshiftRA(v)
+ case OpARM64ANDshiftRL:
+ return rewriteValueARM64_OpARM64ANDshiftRL(v)
+ case OpARM64BIC:
+ return rewriteValueARM64_OpARM64BIC(v)
+ case OpARM64BICshiftLL:
+ return rewriteValueARM64_OpARM64BICshiftLL(v)
+ case OpARM64BICshiftRA:
+ return rewriteValueARM64_OpARM64BICshiftRA(v)
+ case OpARM64BICshiftRL:
+ return rewriteValueARM64_OpARM64BICshiftRL(v)
+ case OpARM64CMN:
+ return rewriteValueARM64_OpARM64CMN(v)
+ case OpARM64CMNW:
+ return rewriteValueARM64_OpARM64CMNW(v)
+ case OpARM64CMNWconst:
+ return rewriteValueARM64_OpARM64CMNWconst(v)
+ case OpARM64CMNconst:
+ return rewriteValueARM64_OpARM64CMNconst(v)
+ case OpARM64CMNshiftLL:
+ return rewriteValueARM64_OpARM64CMNshiftLL(v)
+ case OpARM64CMNshiftRA:
+ return rewriteValueARM64_OpARM64CMNshiftRA(v)
+ case OpARM64CMNshiftRL:
+ return rewriteValueARM64_OpARM64CMNshiftRL(v)
+ case OpARM64CMP:
+ return rewriteValueARM64_OpARM64CMP(v)
+ case OpARM64CMPW:
+ return rewriteValueARM64_OpARM64CMPW(v)
+ case OpARM64CMPWconst:
+ return rewriteValueARM64_OpARM64CMPWconst(v)
+ case OpARM64CMPconst:
+ return rewriteValueARM64_OpARM64CMPconst(v)
+ case OpARM64CMPshiftLL:
+ return rewriteValueARM64_OpARM64CMPshiftLL(v)
+ case OpARM64CMPshiftRA:
+ return rewriteValueARM64_OpARM64CMPshiftRA(v)
+ case OpARM64CMPshiftRL:
+ return rewriteValueARM64_OpARM64CMPshiftRL(v)
+ case OpARM64CSEL:
+ return rewriteValueARM64_OpARM64CSEL(v)
+ case OpARM64CSEL0:
+ return rewriteValueARM64_OpARM64CSEL0(v)
+ case OpARM64DIV:
+ return rewriteValueARM64_OpARM64DIV(v)
+ case OpARM64DIVW:
+ return rewriteValueARM64_OpARM64DIVW(v)
+ case OpARM64EON:
+ return rewriteValueARM64_OpARM64EON(v)
+ case OpARM64EONshiftLL:
+ return rewriteValueARM64_OpARM64EONshiftLL(v)
+ case OpARM64EONshiftRA:
+ return rewriteValueARM64_OpARM64EONshiftRA(v)
+ case OpARM64EONshiftRL:
+ return rewriteValueARM64_OpARM64EONshiftRL(v)
+ case OpARM64Equal:
+ return rewriteValueARM64_OpARM64Equal(v)
+ case OpARM64FADDD:
+ return rewriteValueARM64_OpARM64FADDD(v)
+ case OpARM64FADDS:
+ return rewriteValueARM64_OpARM64FADDS(v)
+ case OpARM64FCMPD:
+ return rewriteValueARM64_OpARM64FCMPD(v)
+ case OpARM64FCMPS:
+ return rewriteValueARM64_OpARM64FCMPS(v)
+ case OpARM64FMOVDfpgp:
+ return rewriteValueARM64_OpARM64FMOVDfpgp(v)
+ case OpARM64FMOVDgpfp:
+ return rewriteValueARM64_OpARM64FMOVDgpfp(v)
+ case OpARM64FMOVDload:
+ return rewriteValueARM64_OpARM64FMOVDload(v)
+ case OpARM64FMOVDloadidx:
+ return rewriteValueARM64_OpARM64FMOVDloadidx(v)
+ case OpARM64FMOVDstore:
+ return rewriteValueARM64_OpARM64FMOVDstore(v)
+ case OpARM64FMOVDstoreidx:
+ return rewriteValueARM64_OpARM64FMOVDstoreidx(v)
+ case OpARM64FMOVSload:
+ return rewriteValueARM64_OpARM64FMOVSload(v)
+ case OpARM64FMOVSloadidx:
+ return rewriteValueARM64_OpARM64FMOVSloadidx(v)
+ case OpARM64FMOVSstore:
+ return rewriteValueARM64_OpARM64FMOVSstore(v)
+ case OpARM64FMOVSstoreidx:
+ return rewriteValueARM64_OpARM64FMOVSstoreidx(v)
+ case OpARM64FMULD:
+ return rewriteValueARM64_OpARM64FMULD(v)
+ case OpARM64FMULS:
+ return rewriteValueARM64_OpARM64FMULS(v)
+ case OpARM64FNEGD:
+ return rewriteValueARM64_OpARM64FNEGD(v)
+ case OpARM64FNEGS:
+ return rewriteValueARM64_OpARM64FNEGS(v)
+ case OpARM64FNMULD:
+ return rewriteValueARM64_OpARM64FNMULD(v)
+ case OpARM64FNMULS:
+ return rewriteValueARM64_OpARM64FNMULS(v)
+ case OpARM64FSUBD:
+ return rewriteValueARM64_OpARM64FSUBD(v)
+ case OpARM64FSUBS:
+ return rewriteValueARM64_OpARM64FSUBS(v)
+ case OpARM64GreaterEqual:
+ return rewriteValueARM64_OpARM64GreaterEqual(v)
+ case OpARM64GreaterEqualF:
+ return rewriteValueARM64_OpARM64GreaterEqualF(v)
+ case OpARM64GreaterEqualU:
+ return rewriteValueARM64_OpARM64GreaterEqualU(v)
+ case OpARM64GreaterThan:
+ return rewriteValueARM64_OpARM64GreaterThan(v)
+ case OpARM64GreaterThanF:
+ return rewriteValueARM64_OpARM64GreaterThanF(v)
+ case OpARM64GreaterThanU:
+ return rewriteValueARM64_OpARM64GreaterThanU(v)
+ case OpARM64LessEqual:
+ return rewriteValueARM64_OpARM64LessEqual(v)
+ case OpARM64LessEqualF:
+ return rewriteValueARM64_OpARM64LessEqualF(v)
+ case OpARM64LessEqualU:
+ return rewriteValueARM64_OpARM64LessEqualU(v)
+ case OpARM64LessThan:
+ return rewriteValueARM64_OpARM64LessThan(v)
+ case OpARM64LessThanF:
+ return rewriteValueARM64_OpARM64LessThanF(v)
+ case OpARM64LessThanU:
+ return rewriteValueARM64_OpARM64LessThanU(v)
+ case OpARM64MADD:
+ return rewriteValueARM64_OpARM64MADD(v)
+ case OpARM64MADDW:
+ return rewriteValueARM64_OpARM64MADDW(v)
+ case OpARM64MNEG:
+ return rewriteValueARM64_OpARM64MNEG(v)
+ case OpARM64MNEGW:
+ return rewriteValueARM64_OpARM64MNEGW(v)
+ case OpARM64MOD:
+ return rewriteValueARM64_OpARM64MOD(v)
+ case OpARM64MODW:
+ return rewriteValueARM64_OpARM64MODW(v)
+ case OpARM64MOVBUload:
+ return rewriteValueARM64_OpARM64MOVBUload(v)
+ case OpARM64MOVBUloadidx:
+ return rewriteValueARM64_OpARM64MOVBUloadidx(v)
+ case OpARM64MOVBUreg:
+ return rewriteValueARM64_OpARM64MOVBUreg(v)
+ case OpARM64MOVBload:
+ return rewriteValueARM64_OpARM64MOVBload(v)
+ case OpARM64MOVBloadidx:
+ return rewriteValueARM64_OpARM64MOVBloadidx(v)
+ case OpARM64MOVBreg:
+ return rewriteValueARM64_OpARM64MOVBreg(v)
+ case OpARM64MOVBstore:
+ return rewriteValueARM64_OpARM64MOVBstore(v)
+ case OpARM64MOVBstoreidx:
+ return rewriteValueARM64_OpARM64MOVBstoreidx(v)
+ case OpARM64MOVBstorezero:
+ return rewriteValueARM64_OpARM64MOVBstorezero(v)
+ case OpARM64MOVBstorezeroidx:
+ return rewriteValueARM64_OpARM64MOVBstorezeroidx(v)
+ case OpARM64MOVDload:
+ return rewriteValueARM64_OpARM64MOVDload(v)
+ case OpARM64MOVDloadidx:
+ return rewriteValueARM64_OpARM64MOVDloadidx(v)
+ case OpARM64MOVDloadidx8:
+ return rewriteValueARM64_OpARM64MOVDloadidx8(v)
+ case OpARM64MOVDreg:
+ return rewriteValueARM64_OpARM64MOVDreg(v)
+ case OpARM64MOVDstore:
+ return rewriteValueARM64_OpARM64MOVDstore(v)
+ case OpARM64MOVDstoreidx:
+ return rewriteValueARM64_OpARM64MOVDstoreidx(v)
+ case OpARM64MOVDstoreidx8:
+ return rewriteValueARM64_OpARM64MOVDstoreidx8(v)
+ case OpARM64MOVDstorezero:
+ return rewriteValueARM64_OpARM64MOVDstorezero(v)
+ case OpARM64MOVDstorezeroidx:
+ return rewriteValueARM64_OpARM64MOVDstorezeroidx(v)
+ case OpARM64MOVDstorezeroidx8:
+ return rewriteValueARM64_OpARM64MOVDstorezeroidx8(v)
+ case OpARM64MOVHUload:
+ return rewriteValueARM64_OpARM64MOVHUload(v)
+ case OpARM64MOVHUloadidx:
+ return rewriteValueARM64_OpARM64MOVHUloadidx(v)
+ case OpARM64MOVHUloadidx2:
+ return rewriteValueARM64_OpARM64MOVHUloadidx2(v)
+ case OpARM64MOVHUreg:
+ return rewriteValueARM64_OpARM64MOVHUreg(v)
+ case OpARM64MOVHload:
+ return rewriteValueARM64_OpARM64MOVHload(v)
+ case OpARM64MOVHloadidx:
+ return rewriteValueARM64_OpARM64MOVHloadidx(v)
+ case OpARM64MOVHloadidx2:
+ return rewriteValueARM64_OpARM64MOVHloadidx2(v)
+ case OpARM64MOVHreg:
+ return rewriteValueARM64_OpARM64MOVHreg(v)
+ case OpARM64MOVHstore:
+ return rewriteValueARM64_OpARM64MOVHstore(v)
+ case OpARM64MOVHstoreidx:
+ return rewriteValueARM64_OpARM64MOVHstoreidx(v)
+ case OpARM64MOVHstoreidx2:
+ return rewriteValueARM64_OpARM64MOVHstoreidx2(v)
+ case OpARM64MOVHstorezero:
+ return rewriteValueARM64_OpARM64MOVHstorezero(v)
+ case OpARM64MOVHstorezeroidx:
+ return rewriteValueARM64_OpARM64MOVHstorezeroidx(v)
+ case OpARM64MOVHstorezeroidx2:
+ return rewriteValueARM64_OpARM64MOVHstorezeroidx2(v)
+ case OpARM64MOVQstorezero:
+ return rewriteValueARM64_OpARM64MOVQstorezero(v)
+ case OpARM64MOVWUload:
+ return rewriteValueARM64_OpARM64MOVWUload(v)
+ case OpARM64MOVWUloadidx:
+ return rewriteValueARM64_OpARM64MOVWUloadidx(v)
+ case OpARM64MOVWUloadidx4:
+ return rewriteValueARM64_OpARM64MOVWUloadidx4(v)
+ case OpARM64MOVWUreg:
+ return rewriteValueARM64_OpARM64MOVWUreg(v)
+ case OpARM64MOVWload:
+ return rewriteValueARM64_OpARM64MOVWload(v)
+ case OpARM64MOVWloadidx:
+ return rewriteValueARM64_OpARM64MOVWloadidx(v)
+ case OpARM64MOVWloadidx4:
+ return rewriteValueARM64_OpARM64MOVWloadidx4(v)
+ case OpARM64MOVWreg:
+ return rewriteValueARM64_OpARM64MOVWreg(v)
+ case OpARM64MOVWstore:
+ return rewriteValueARM64_OpARM64MOVWstore(v)
+ case OpARM64MOVWstoreidx:
+ return rewriteValueARM64_OpARM64MOVWstoreidx(v)
+ case OpARM64MOVWstoreidx4:
+ return rewriteValueARM64_OpARM64MOVWstoreidx4(v)
+ case OpARM64MOVWstorezero:
+ return rewriteValueARM64_OpARM64MOVWstorezero(v)
+ case OpARM64MOVWstorezeroidx:
+ return rewriteValueARM64_OpARM64MOVWstorezeroidx(v)
+ case OpARM64MOVWstorezeroidx4:
+ return rewriteValueARM64_OpARM64MOVWstorezeroidx4(v)
+ case OpARM64MSUB:
+ return rewriteValueARM64_OpARM64MSUB(v)
+ case OpARM64MSUBW:
+ return rewriteValueARM64_OpARM64MSUBW(v)
+ case OpARM64MUL:
+ return rewriteValueARM64_OpARM64MUL(v)
+ case OpARM64MULW:
+ return rewriteValueARM64_OpARM64MULW(v)
+ case OpARM64MVN:
+ return rewriteValueARM64_OpARM64MVN(v)
+ case OpARM64MVNshiftLL:
+ return rewriteValueARM64_OpARM64MVNshiftLL(v)
+ case OpARM64MVNshiftRA:
+ return rewriteValueARM64_OpARM64MVNshiftRA(v)
+ case OpARM64MVNshiftRL:
+ return rewriteValueARM64_OpARM64MVNshiftRL(v)
+ case OpARM64NEG:
+ return rewriteValueARM64_OpARM64NEG(v)
+ case OpARM64NEGshiftLL:
+ return rewriteValueARM64_OpARM64NEGshiftLL(v)
+ case OpARM64NEGshiftRA:
+ return rewriteValueARM64_OpARM64NEGshiftRA(v)
+ case OpARM64NEGshiftRL:
+ return rewriteValueARM64_OpARM64NEGshiftRL(v)
+ case OpARM64NotEqual:
+ return rewriteValueARM64_OpARM64NotEqual(v)
+ case OpARM64OR:
+ return rewriteValueARM64_OpARM64OR(v)
+ case OpARM64ORN:
+ return rewriteValueARM64_OpARM64ORN(v)
+ case OpARM64ORNshiftLL:
+ return rewriteValueARM64_OpARM64ORNshiftLL(v)
+ case OpARM64ORNshiftRA:
+ return rewriteValueARM64_OpARM64ORNshiftRA(v)
+ case OpARM64ORNshiftRL:
+ return rewriteValueARM64_OpARM64ORNshiftRL(v)
+ case OpARM64ORconst:
+ return rewriteValueARM64_OpARM64ORconst(v)
+ case OpARM64ORshiftLL:
+ return rewriteValueARM64_OpARM64ORshiftLL(v)
+ case OpARM64ORshiftRA:
+ return rewriteValueARM64_OpARM64ORshiftRA(v)
+ case OpARM64ORshiftRL:
+ return rewriteValueARM64_OpARM64ORshiftRL(v)
+ case OpARM64RORWconst:
+ return rewriteValueARM64_OpARM64RORWconst(v)
+ case OpARM64RORconst:
+ return rewriteValueARM64_OpARM64RORconst(v)
+ case OpARM64SBCSflags:
+ return rewriteValueARM64_OpARM64SBCSflags(v)
+ case OpARM64SLL:
+ return rewriteValueARM64_OpARM64SLL(v)
+ case OpARM64SLLconst:
+ return rewriteValueARM64_OpARM64SLLconst(v)
+ case OpARM64SRA:
+ return rewriteValueARM64_OpARM64SRA(v)
+ case OpARM64SRAconst:
+ return rewriteValueARM64_OpARM64SRAconst(v)
+ case OpARM64SRL:
+ return rewriteValueARM64_OpARM64SRL(v)
+ case OpARM64SRLconst:
+ return rewriteValueARM64_OpARM64SRLconst(v)
+ case OpARM64STP:
+ return rewriteValueARM64_OpARM64STP(v)
+ case OpARM64SUB:
+ return rewriteValueARM64_OpARM64SUB(v)
+ case OpARM64SUBconst:
+ return rewriteValueARM64_OpARM64SUBconst(v)
+ case OpARM64SUBshiftLL:
+ return rewriteValueARM64_OpARM64SUBshiftLL(v)
+ case OpARM64SUBshiftRA:
+ return rewriteValueARM64_OpARM64SUBshiftRA(v)
+ case OpARM64SUBshiftRL:
+ return rewriteValueARM64_OpARM64SUBshiftRL(v)
+ case OpARM64TST:
+ return rewriteValueARM64_OpARM64TST(v)
+ case OpARM64TSTW:
+ return rewriteValueARM64_OpARM64TSTW(v)
+ case OpARM64TSTWconst:
+ return rewriteValueARM64_OpARM64TSTWconst(v)
+ case OpARM64TSTconst:
+ return rewriteValueARM64_OpARM64TSTconst(v)
+ case OpARM64TSTshiftLL:
+ return rewriteValueARM64_OpARM64TSTshiftLL(v)
+ case OpARM64TSTshiftRA:
+ return rewriteValueARM64_OpARM64TSTshiftRA(v)
+ case OpARM64TSTshiftRL:
+ return rewriteValueARM64_OpARM64TSTshiftRL(v)
+ case OpARM64UBFIZ:
+ return rewriteValueARM64_OpARM64UBFIZ(v)
+ case OpARM64UBFX:
+ return rewriteValueARM64_OpARM64UBFX(v)
+ case OpARM64UDIV:
+ return rewriteValueARM64_OpARM64UDIV(v)
+ case OpARM64UDIVW:
+ return rewriteValueARM64_OpARM64UDIVW(v)
+ case OpARM64UMOD:
+ return rewriteValueARM64_OpARM64UMOD(v)
+ case OpARM64UMODW:
+ return rewriteValueARM64_OpARM64UMODW(v)
+ case OpARM64XOR:
+ return rewriteValueARM64_OpARM64XOR(v)
+ case OpARM64XORconst:
+ return rewriteValueARM64_OpARM64XORconst(v)
+ case OpARM64XORshiftLL:
+ return rewriteValueARM64_OpARM64XORshiftLL(v)
+ case OpARM64XORshiftRA:
+ return rewriteValueARM64_OpARM64XORshiftRA(v)
+ case OpARM64XORshiftRL:
+ return rewriteValueARM64_OpARM64XORshiftRL(v)
+ case OpAbs:
+ v.Op = OpARM64FABSD
+ return true
+ case OpAdd16:
+ v.Op = OpARM64ADD
+ return true
+ case OpAdd32:
+ v.Op = OpARM64ADD
+ return true
+ case OpAdd32F:
+ v.Op = OpARM64FADDS
+ return true
+ case OpAdd64:
+ v.Op = OpARM64ADD
+ return true
+ case OpAdd64F:
+ v.Op = OpARM64FADDD
+ return true
+ case OpAdd8:
+ v.Op = OpARM64ADD
+ return true
+ case OpAddPtr:
+ v.Op = OpARM64ADD
+ return true
+ case OpAddr:
+ return rewriteValueARM64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpARM64AND
+ return true
+ case OpAnd32:
+ v.Op = OpARM64AND
+ return true
+ case OpAnd64:
+ v.Op = OpARM64AND
+ return true
+ case OpAnd8:
+ v.Op = OpARM64AND
+ return true
+ case OpAndB:
+ v.Op = OpARM64AND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpARM64LoweredAtomicAdd32
+ return true
+ case OpAtomicAdd32Variant:
+ v.Op = OpARM64LoweredAtomicAdd32Variant
+ return true
+ case OpAtomicAdd64:
+ v.Op = OpARM64LoweredAtomicAdd64
+ return true
+ case OpAtomicAdd64Variant:
+ v.Op = OpARM64LoweredAtomicAdd64Variant
+ return true
+ case OpAtomicAnd32:
+ return rewriteValueARM64_OpAtomicAnd32(v)
+ case OpAtomicAnd32Variant:
+ return rewriteValueARM64_OpAtomicAnd32Variant(v)
+ case OpAtomicAnd8:
+ return rewriteValueARM64_OpAtomicAnd8(v)
+ case OpAtomicAnd8Variant:
+ return rewriteValueARM64_OpAtomicAnd8Variant(v)
+ case OpAtomicCompareAndSwap32:
+ v.Op = OpARM64LoweredAtomicCas32
+ return true
+ case OpAtomicCompareAndSwap32Variant:
+ v.Op = OpARM64LoweredAtomicCas32Variant
+ return true
+ case OpAtomicCompareAndSwap64:
+ v.Op = OpARM64LoweredAtomicCas64
+ return true
+ case OpAtomicCompareAndSwap64Variant:
+ v.Op = OpARM64LoweredAtomicCas64Variant
+ return true
+ case OpAtomicExchange32:
+ v.Op = OpARM64LoweredAtomicExchange32
+ return true
+ case OpAtomicExchange32Variant:
+ v.Op = OpARM64LoweredAtomicExchange32Variant
+ return true
+ case OpAtomicExchange64:
+ v.Op = OpARM64LoweredAtomicExchange64
+ return true
+ case OpAtomicExchange64Variant:
+ v.Op = OpARM64LoweredAtomicExchange64Variant
+ return true
+ case OpAtomicLoad32:
+ v.Op = OpARM64LDARW
+ return true
+ case OpAtomicLoad64:
+ v.Op = OpARM64LDAR
+ return true
+ case OpAtomicLoad8:
+ v.Op = OpARM64LDARB
+ return true
+ case OpAtomicLoadPtr:
+ v.Op = OpARM64LDAR
+ return true
+ case OpAtomicOr32:
+ return rewriteValueARM64_OpAtomicOr32(v)
+ case OpAtomicOr32Variant:
+ return rewriteValueARM64_OpAtomicOr32Variant(v)
+ case OpAtomicOr8:
+ return rewriteValueARM64_OpAtomicOr8(v)
+ case OpAtomicOr8Variant:
+ return rewriteValueARM64_OpAtomicOr8Variant(v)
+ case OpAtomicStore32:
+ v.Op = OpARM64STLRW
+ return true
+ case OpAtomicStore64:
+ v.Op = OpARM64STLR
+ return true
+ case OpAtomicStore8:
+ v.Op = OpARM64STLRB
+ return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpARM64STLR
+ return true
+ case OpAvg64u:
+ return rewriteValueARM64_OpAvg64u(v)
+ case OpBitLen32:
+ return rewriteValueARM64_OpBitLen32(v)
+ case OpBitLen64:
+ return rewriteValueARM64_OpBitLen64(v)
+ case OpBitRev16:
+ return rewriteValueARM64_OpBitRev16(v)
+ case OpBitRev32:
+ v.Op = OpARM64RBITW
+ return true
+ case OpBitRev64:
+ v.Op = OpARM64RBIT
+ return true
+ case OpBitRev8:
+ return rewriteValueARM64_OpBitRev8(v)
+ case OpBswap32:
+ v.Op = OpARM64REVW
+ return true
+ case OpBswap64:
+ v.Op = OpARM64REV
+ return true
+ case OpCeil:
+ v.Op = OpARM64FRINTPD
+ return true
+ case OpClosureCall:
+ v.Op = OpARM64CALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpARM64MVN
+ return true
+ case OpCom32:
+ v.Op = OpARM64MVN
+ return true
+ case OpCom64:
+ v.Op = OpARM64MVN
+ return true
+ case OpCom8:
+ v.Op = OpARM64MVN
+ return true
+ case OpCondSelect:
+ return rewriteValueARM64_OpCondSelect(v)
+ case OpConst16:
+ return rewriteValueARM64_OpConst16(v)
+ case OpConst32:
+ return rewriteValueARM64_OpConst32(v)
+ case OpConst32F:
+ return rewriteValueARM64_OpConst32F(v)
+ case OpConst64:
+ return rewriteValueARM64_OpConst64(v)
+ case OpConst64F:
+ return rewriteValueARM64_OpConst64F(v)
+ case OpConst8:
+ return rewriteValueARM64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueARM64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueARM64_OpConstNil(v)
+ case OpCtz16:
+ return rewriteValueARM64_OpCtz16(v)
+ case OpCtz16NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz32:
+ return rewriteValueARM64_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz64:
+ return rewriteValueARM64_OpCtz64(v)
+ case OpCtz64NonZero:
+ v.Op = OpCtz64
+ return true
+ case OpCtz8:
+ return rewriteValueARM64_OpCtz8(v)
+ case OpCtz8NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpARM64FCVTZSSW
+ return true
+ case OpCvt32Fto32U:
+ v.Op = OpARM64FCVTZUSW
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpARM64FCVTZSS
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpARM64FCVTSD
+ return true
+ case OpCvt32Fto64U:
+ v.Op = OpARM64FCVTZUS
+ return true
+ case OpCvt32Uto32F:
+ v.Op = OpARM64UCVTFWS
+ return true
+ case OpCvt32Uto64F:
+ v.Op = OpARM64UCVTFWD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpARM64SCVTFWS
+ return true
+ case OpCvt32to64F:
+ v.Op = OpARM64SCVTFWD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpARM64FCVTZSDW
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpARM64FCVTDS
+ return true
+ case OpCvt64Fto32U:
+ v.Op = OpARM64FCVTZUDW
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpARM64FCVTZSD
+ return true
+ case OpCvt64Fto64U:
+ v.Op = OpARM64FCVTZUD
+ return true
+ case OpCvt64Uto32F:
+ v.Op = OpARM64UCVTFS
+ return true
+ case OpCvt64Uto64F:
+ v.Op = OpARM64UCVTFD
+ return true
+ case OpCvt64to32F:
+ v.Op = OpARM64SCVTFS
+ return true
+ case OpCvt64to64F:
+ v.Op = OpARM64SCVTFD
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueARM64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueARM64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueARM64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpARM64FDIVS
+ return true
+ case OpDiv32u:
+ v.Op = OpARM64UDIVW
+ return true
+ case OpDiv64:
+ return rewriteValueARM64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpARM64FDIVD
+ return true
+ case OpDiv64u:
+ v.Op = OpARM64UDIV
+ return true
+ case OpDiv8:
+ return rewriteValueARM64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueARM64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueARM64_OpEq16(v)
+ case OpEq32:
+ return rewriteValueARM64_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueARM64_OpEq32F(v)
+ case OpEq64:
+ return rewriteValueARM64_OpEq64(v)
+ case OpEq64F:
+ return rewriteValueARM64_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueARM64_OpEq8(v)
+ case OpEqB:
+ return rewriteValueARM64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueARM64_OpEqPtr(v)
+ case OpFMA:
+ return rewriteValueARM64_OpFMA(v)
+ case OpFloor:
+ v.Op = OpARM64FRINTMD
+ return true
+ case OpGetCallerPC:
+ v.Op = OpARM64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpARM64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpARM64LoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ return rewriteValueARM64_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueARM64_OpHmul32u(v)
+ case OpHmul64:
+ v.Op = OpARM64MULH
+ return true
+ case OpHmul64u:
+ v.Op = OpARM64UMULH
+ return true
+ case OpInterCall:
+ v.Op = OpARM64CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueARM64_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueARM64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueARM64_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueARM64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueARM64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueARM64_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueARM64_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueARM64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueARM64_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValueARM64_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValueARM64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueARM64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueARM64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueARM64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueARM64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueARM64_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueARM64_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueARM64_OpLess32U(v)
+ case OpLess64:
+ return rewriteValueARM64_OpLess64(v)
+ case OpLess64F:
+ return rewriteValueARM64_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValueARM64_OpLess64U(v)
+ case OpLess8:
+ return rewriteValueARM64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueARM64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueARM64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueARM64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueARM64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueARM64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueARM64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueARM64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueARM64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueARM64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueARM64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueARM64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueARM64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueARM64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueARM64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueARM64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueARM64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueARM64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueARM64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueARM64_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueARM64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueARM64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueARM64_OpMod32(v)
+ case OpMod32u:
+ v.Op = OpARM64UMODW
+ return true
+ case OpMod64:
+ return rewriteValueARM64_OpMod64(v)
+ case OpMod64u:
+ v.Op = OpARM64UMOD
+ return true
+ case OpMod8:
+ return rewriteValueARM64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueARM64_OpMod8u(v)
+ case OpMove:
+ return rewriteValueARM64_OpMove(v)
+ case OpMul16:
+ v.Op = OpARM64MULW
+ return true
+ case OpMul32:
+ v.Op = OpARM64MULW
+ return true
+ case OpMul32F:
+ v.Op = OpARM64FMULS
+ return true
+ case OpMul64:
+ v.Op = OpARM64MUL
+ return true
+ case OpMul64F:
+ v.Op = OpARM64FMULD
+ return true
+ case OpMul64uhilo:
+ v.Op = OpARM64LoweredMuluhilo
+ return true
+ case OpMul8:
+ v.Op = OpARM64MULW
+ return true
+ case OpNeg16:
+ v.Op = OpARM64NEG
+ return true
+ case OpNeg32:
+ v.Op = OpARM64NEG
+ return true
+ case OpNeg32F:
+ v.Op = OpARM64FNEGS
+ return true
+ case OpNeg64:
+ v.Op = OpARM64NEG
+ return true
+ case OpNeg64F:
+ v.Op = OpARM64FNEGD
+ return true
+ case OpNeg8:
+ v.Op = OpARM64NEG
+ return true
+ case OpNeq16:
+ return rewriteValueARM64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueARM64_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueARM64_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValueARM64_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValueARM64_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueARM64_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpARM64XOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueARM64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpARM64LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueARM64_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueARM64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpARM64OR
+ return true
+ case OpOr32:
+ v.Op = OpARM64OR
+ return true
+ case OpOr64:
+ v.Op = OpARM64OR
+ return true
+ case OpOr8:
+ v.Op = OpARM64OR
+ return true
+ case OpOrB:
+ v.Op = OpARM64OR
+ return true
+ case OpPanicBounds:
+ return rewriteValueARM64_OpPanicBounds(v)
+ case OpPopCount16:
+ return rewriteValueARM64_OpPopCount16(v)
+ case OpPopCount32:
+ return rewriteValueARM64_OpPopCount32(v)
+ case OpPopCount64:
+ return rewriteValueARM64_OpPopCount64(v)
+ case OpRotateLeft16:
+ return rewriteValueARM64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueARM64_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValueARM64_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValueARM64_OpRotateLeft8(v)
+ case OpRound:
+ v.Op = OpARM64FRINTAD
+ return true
+ case OpRound32F:
+ v.Op = OpARM64LoweredRound32F
+ return true
+ case OpRound64F:
+ v.Op = OpARM64LoweredRound64F
+ return true
+ case OpRoundToEven:
+ v.Op = OpARM64FRINTND
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueARM64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueARM64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueARM64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueARM64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueARM64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueARM64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueARM64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueARM64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueARM64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueARM64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueARM64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueARM64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueARM64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueARM64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueARM64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueARM64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueARM64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueARM64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueARM64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueARM64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueARM64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueARM64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueARM64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueARM64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueARM64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueARM64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueARM64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueARM64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueARM64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueARM64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueARM64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueARM64_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueARM64_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueARM64_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpARM64MOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpARM64MOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpARM64MOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpARM64MOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpARM64MOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpARM64MOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValueARM64_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpARM64FSQRTD
+ return true
+ case OpStaticCall:
+ v.Op = OpARM64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValueARM64_OpStore(v)
+ case OpSub16:
+ v.Op = OpARM64SUB
+ return true
+ case OpSub32:
+ v.Op = OpARM64SUB
+ return true
+ case OpSub32F:
+ v.Op = OpARM64FSUBS
+ return true
+ case OpSub64:
+ v.Op = OpARM64SUB
+ return true
+ case OpSub64F:
+ v.Op = OpARM64FSUBD
+ return true
+ case OpSub8:
+ v.Op = OpARM64SUB
+ return true
+ case OpSubPtr:
+ v.Op = OpARM64SUB
+ return true
+ case OpTrunc:
+ v.Op = OpARM64FRINTZD
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpARM64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpARM64XOR
+ return true
+ case OpXor32:
+ v.Op = OpARM64XOR
+ return true
+ case OpXor64:
+ v.Op = OpARM64XOR
+ return true
+ case OpXor8:
+ v.Op = OpARM64XOR
+ return true
+ case OpZero:
+ return rewriteValueARM64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpARM64MOVHUreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpARM64MOVHUreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpARM64MOVWUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpARM64MOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpARM64MOVBUreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpARM64MOVBUreg
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADCSflags(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c))))
+ // result: (ADCSflags x y c)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpARM64ADDSconstflags || auxIntToInt64(v_2_0.AuxInt) != -1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpARM64ADCzerocarry || v_2_0_0.Type != typ.UInt64 {
+ break
+ }
+ c := v_2_0_0.Args[0]
+ v.reset(OpARM64ADCSflags)
+ v.AddArg3(x, y, c)
+ return true
+ }
+ // match: (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (MOVDconst [0]))))
+ // result: (ADDSflags x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpARM64ADDSconstflags || auxIntToInt64(v_2_0.AuxInt) != -1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64ADDSflags)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADD x (MOVDconst [c]))
+ // result: (ADDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD a l:(MUL x y))
+ // cond: l.Uses==1 && clobber(l)
+ // result: (MADD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MUL {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpARM64MADD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD a l:(MNEG x y))
+ // cond: l.Uses==1 && clobber(l)
+ // result: (MSUB a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MNEG {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpARM64MSUB)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD a l:(MULW x y))
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+ // result: (MADDW a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MULW {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpARM64MADDW)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD a l:(MNEGW x y))
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+ // result: (MSUBW a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MNEGW {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpARM64MSUBW)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (NEG y))
+ // result: (SUB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64NEG {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64SUB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ADDshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ADDshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ADDshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (ROR x (NEG <t> y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64ROR)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SRL <typ.UInt64> x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (ROR x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLL x (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> [cc] (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (RORW x (NEG <t> y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64RORW)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> [cc] (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64MOVWUreg {
+ continue
+ }
+ x := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr))
+ // cond: is32Bit(off1+int64(off2))
+ // result: (MOVDaddr [int32(off1)+off2] {sym} ptr)
+ for {
+ off1 := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ if !(is32Bit(off1 + int64(off2))) {
+ break
+ }
+ v.reset(OpARM64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(off1) + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c+d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBconst [d] x))
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SUBconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDshiftLL (MOVDconst [c]) x [d])
+ // result: (ADDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLL x (MOVDconst [c]) [d])
+ // result: (ADDconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL [c] (SRLconst x [64-c]) x)
+ // result: (RORconst [64-c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL <t> [c] (UBFX [bfc] x) x)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (RORWconst [32-c] x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
+ // result: (REV16W x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL [c] (SRLconst x [64-c]) x2)
+ // result: (EXTRconst [64-c] x2 x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ x2 := v_1
+ v.reset(OpARM64EXTRconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ // match: (ADDshiftLL <t> [c] (UBFX [bfc] x) x2)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (EXTRWconst [32-c] x2 x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ x2 := v_1
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64EXTRWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRA (MOVDconst [c]) x [d])
+ // result: (ADDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRA x (MOVDconst [c]) [d])
+ // result: (ADDconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRL (MOVDconst [c]) x [d])
+ // result: (ADDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRL x (MOVDconst [c]) [d])
+ // result: (ADDconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftRL [c] (SLLconst x [64-c]) x)
+ // result: (RORconst [ c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x))
+ // cond: c < 32 && t.Size() == 4
+ // result: (RORWconst [c] x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64MOVWUreg || x != v_1.Args[0] || !(c < 32 && t.Size() == 4) {
+ break
+ }
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND x (MOVDconst [c]))
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (AND x (MVN y))
+ // result: (BIC x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MVN {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64BIC)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ANDshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ANDshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ANDshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ANDshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ANDshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ANDshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [0] _)
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c&d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWUreg x))
+ // result: (ANDconst [c&(1<<32-1)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<32 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVHUreg x))
+ // result: (ANDconst [c&(1<<16-1)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<16 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVBUreg x))
+ // result: (ANDconst [c&(1<<8-1)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<8 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [ac] (SLLconst [sc] x))
+ // cond: isARM64BFMask(sc, ac, sc)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ for {
+ ac := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, ac, sc)) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [ac] (SRLconst [sc] x))
+ // cond: isARM64BFMask(sc, ac, 0)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+ for {
+ ac := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, ac, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftLL (MOVDconst [c]) x [d])
+ // result: (ANDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftLL x (MOVDconst [c]) [d])
+ // result: (ANDconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftLL y:(SLLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SLLconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRA (MOVDconst [c]) x [d])
+ // result: (ANDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRA x (MOVDconst [c]) [d])
+ // result: (ANDconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRA y:(SRAconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRAconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRL (MOVDconst [c]) x [d])
+ // result: (ANDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRL x (MOVDconst [c]) [d])
+ // result: (ANDconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRL y:(SRLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRLconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BIC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BIC x (MOVDconst [c]))
+ // result: (ANDconst [^c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BIC x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (BIC x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (BICshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64BICshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (BIC x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (BICshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64BICshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (BIC x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (BICshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64BICshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftLL x (MOVDconst [c]) [d])
+ // result: (ANDconst x [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRA x (MOVDconst [c]) [d])
+ // result: (ANDconst x [^(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRL x (MOVDconst [c]) [d])
+ // result: (ANDconst x [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMN x (MOVDconst [c]))
+ // result: (CMNconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (CMN x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64CMNshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64CMNshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64CMNshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMNW x (MOVDconst [c]))
+ // result: (CMNWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMNWconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [addFlags32(int32(x),y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(addFlags32(int32(x), y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMNconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [addFlags64(x,y)])
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(addFlags64(x, y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftLL (MOVDconst [c]) x [d])
+ // result: (CMNconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftLL x (MOVDconst [c]) [d])
+ // result: (CMNconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRA (MOVDconst [c]) x [d])
+ // result: (CMNconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRA x (MOVDconst [c]) [d])
+ // result: (CMNconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRL (MOVDconst [c]) x [d])
+ // result: (CMNconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRL x (MOVDconst [c]) [d])
+ // result: (CMNconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMP x (MOVDconst [c]))
+ // result: (CMPconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVDconst [c]) x)
+ // result: (InvertFlags (CMPconst [c] x))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMP y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMPshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMPshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (CMP x0:(SLLconst [c] y) x1)
+ // cond: clobberIfDead(x0)
+ // result: (InvertFlags (CMPshiftLL x1 y [c]))
+ for {
+ x0 := v_0
+ if x0.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x0.AuxInt)
+ y := x0.Args[0]
+ x1 := v_1
+ if !(clobberIfDead(x0)) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x1, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMPshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMPshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (CMP x0:(SRLconst [c] y) x1)
+ // cond: clobberIfDead(x0)
+ // result: (InvertFlags (CMPshiftRL x1 y [c]))
+ for {
+ x0 := v_0
+ if x0.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x0.AuxInt)
+ y := x0.Args[0]
+ x1 := v_1
+ if !(clobberIfDead(x0)) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x1, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMPshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMPshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (CMP x0:(SRAconst [c] y) x1)
+ // cond: clobberIfDead(x0)
+ // result: (InvertFlags (CMPshiftRA x1 y [c]))
+ for {
+ x0 := v_0
+ if x0.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x0.AuxInt)
+ y := x0.Args[0]
+ x1 := v_1
+ if !(clobberIfDead(x0)) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x1, y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVDconst [c]))
+ // result: (CMPWconst [int32(c)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVDconst [c]) x)
+ // result: (InvertFlags (CMPWconst [int32(c)] x))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [subFlags32(int32(x),y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(int32(x), y))
+ return true
+ }
+ // match: (CMPWconst (MOVBUreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPWconst (MOVHUreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [subFlags64(x,y)])
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(x, y))
+ return true
+ }
+ // match: (CMPconst (MOVBUreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPconst (MOVHUreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPconst (MOVWUreg _) [c])
+ // cond: 0xffffffff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg || !(0xffffffff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPconst (ANDconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPconst (SRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n)
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftLL (MOVDconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v1.AuxInt = int64ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLL x (MOVDconst [c]) [d])
+ // result: (CMPconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRA (MOVDconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v1.AuxInt = int64ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRA x (MOVDconst [c]) [d])
+ // result: (CMPconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRL (MOVDconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v1.AuxInt = int64ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRL x (MOVDconst [c]) [d])
+ // result: (CMPconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSEL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CSEL [cc] x (MOVDconst [0]) flag)
+ // result: (CSEL0 [cc] x flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ flag := v_2
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(cc)
+ v.AddArg2(x, flag)
+ return true
+ }
+ // match: (CSEL [cc] (MOVDconst [0]) y flag)
+ // result: (CSEL0 [arm64Negate(cc)] y flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ y := v_1
+ flag := v_2
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(arm64Negate(cc))
+ v.AddArg2(y, flag)
+ return true
+ }
+ // match: (CSEL [cc] x y (InvertFlags cmp))
+ // result: (CSEL [arm64Invert(cc)] x y cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (CSEL [cc] x _ flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: x
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CSEL [cc] _ y flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: y
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ y := v_1
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CSEL [cc] x y (CMPWconst [0] boolval))
+ // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil
+ // result: (CSEL [boolval.Op] x y flagArg(boolval))
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ boolval := v_2.Args[0]
+ if !(cc == OpARM64NotEqual && flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(boolval.Op)
+ v.AddArg3(x, y, flagArg(boolval))
+ return true
+ }
+ // match: (CSEL [cc] x y (CMPWconst [0] boolval))
+ // cond: cc == OpARM64Equal && flagArg(boolval) != nil
+ // result: (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval))
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ boolval := v_2.Args[0]
+ if !(cc == OpARM64Equal && flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(arm64Negate(boolval.Op))
+ v.AddArg3(x, y, flagArg(boolval))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSEL0(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CSEL0 [cc] x (InvertFlags cmp))
+ // result: (CSEL0 [arm64Invert(cc)] x cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_1.Args[0]
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg2(x, cmp)
+ return true
+ }
+ // match: (CSEL0 [cc] x flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: x
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ flag := v_1
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CSEL0 [cc] _ flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: (MOVDconst [0])
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ flag := v_1
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (CSEL0 [cc] x (CMPWconst [0] boolval))
+ // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil
+ // result: (CSEL0 [boolval.Op] x flagArg(boolval))
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ boolval := v_1.Args[0]
+ if !(cc == OpARM64NotEqual && flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(boolval.Op)
+ v.AddArg2(x, flagArg(boolval))
+ return true
+ }
+ // match: (CSEL0 [cc] x (CMPWconst [0] boolval))
+ // cond: cc == OpARM64Equal && flagArg(boolval) != nil
+ // result: (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval))
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ boolval := v_1.Args[0]
+ if !(cc == OpARM64Equal && flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(arm64Negate(boolval.Op))
+ v.AddArg2(x, flagArg(boolval))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64DIV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIV (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [c/d])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c / d)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64DIVW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVW (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(int32(c)/int32(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c) / int32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EON(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EON x (MOVDconst [c]))
+ // result: (XORconst [^c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (EON x x)
+ // result: (MOVDconst [-1])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (EON x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (EONshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64EONshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (EON x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (EONshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64EONshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (EON x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (EONshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64EONshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EONshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EONshiftLL x (MOVDconst [c]) [d])
+ // result: (XORconst x [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (EONshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EONshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EONshiftRA x (MOVDconst [c]) [d])
+ // result: (XORconst x [^(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (EONshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EONshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EONshiftRL x (MOVDconst [c]) [d])
+ // result: (XORconst x [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (EONshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64Equal(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Equal (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.eq())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.eq()))
+ return true
+ }
+ // match: (Equal (InvertFlags x))
+ // result: (Equal x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64Equal)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FADDD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADDD a (FMULD x y))
+ // result: (FMADDD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARM64FMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMADDD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (FADDD a (FNMULD x y))
+ // result: (FMSUBD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARM64FNMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMSUBD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FADDS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADDS a (FMULS x y))
+ // result: (FMADDS a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARM64FMULS {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMADDS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (FADDS a (FNMULS x y))
+ // result: (FMSUBS a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARM64FNMULS {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMSUBS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FCMPD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FCMPD x (FMOVDconst [0]))
+ // result: (FCMPD0 x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64FCMPD0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FCMPD (FMOVDconst [0]) x)
+ // result: (InvertFlags (FCMPD0 x))
+ for {
+ if v_0.Op != OpARM64FMOVDconst || auxIntToFloat64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD0, types.TypeFlags)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FCMPS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FCMPS x (FMOVSconst [0]))
+ // result: (FCMPS0 x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64FMOVSconst || auxIntToFloat64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64FCMPS0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FCMPS (FMOVSconst [0]) x)
+ // result: (InvertFlags (FCMPS0 x))
+ for {
+ if v_0.Op != OpARM64FMOVSconst || auxIntToFloat64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS0, types.TypeFlags)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDfpgp(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FMOVDfpgp <t> (Arg [off] {sym}))
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDgpfp(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FMOVDgpfp <t> (Arg [off] {sym}))
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _))
+ // result: (FMOVDgpfp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpARM64FMOVDgpfp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVDload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVDloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVDloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (FMOVDload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (FMOVDload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem)
+ // result: (MOVDstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVDgpfp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVDstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVDstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (FMOVDstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (FMOVDstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _))
+ // result: (FMOVSgpfp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpARM64FMOVSgpfp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVSload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVSloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVSloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (FMOVSload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (FMOVSload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem)
+ // result: (MOVWstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVSgpfp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVSstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVSstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (FMOVSstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (FMOVSstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMULD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMULD (FNEGD x) y)
+ // result: (FNMULD x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64FNEGD {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64FNMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMULS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMULS (FNEGS x) y)
+ // result: (FNMULS x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64FNEGS {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64FNMULS)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FNEGD(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEGD (FMULD x y))
+ // result: (FNMULD x y)
+ for {
+ if v_0.Op != OpARM64FMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64FNMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (FNEGD (FNMULD x y))
+ // result: (FMULD x y)
+ for {
+ if v_0.Op != OpARM64FNMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64FMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FNEGS(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEGS (FMULS x y))
+ // result: (FNMULS x y)
+ for {
+ if v_0.Op != OpARM64FMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64FNMULS)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (FNEGS (FNMULS x y))
+ // result: (FMULS x y)
+ for {
+ if v_0.Op != OpARM64FNMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64FMULS)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FNMULD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMULD (FNEGD x) y)
+ // result: (FMULD x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64FNEGD {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64FMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FNMULS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMULS (FNEGS x) y)
+ // result: (FMULS x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64FNEGS {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64FMULS)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FSUBD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUBD a (FMULD x y))
+ // result: (FMSUBD a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64FMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMSUBD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBD (FMULD x y) a)
+ // result: (FNMSUBD a x y)
+ for {
+ if v_0.Op != OpARM64FMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ v.reset(OpARM64FNMSUBD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBD a (FNMULD x y))
+ // result: (FMADDD a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64FNMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMADDD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBD (FNMULD x y) a)
+ // result: (FNMADDD a x y)
+ for {
+ if v_0.Op != OpARM64FNMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ v.reset(OpARM64FNMADDD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FSUBS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUBS a (FMULS x y))
+ // result: (FMSUBS a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64FMULS {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMSUBS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBS (FMULS x y) a)
+ // result: (FNMSUBS a x y)
+ for {
+ if v_0.Op != OpARM64FMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ v.reset(OpARM64FNMSUBS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBS a (FNMULS x y))
+ // result: (FMADDS a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64FNMULS {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMADDS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBS (FNMULS x y) a)
+ // result: (FNMADDS a x y)
+ for {
+ if v_0.Op != OpARM64FNMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ v.reset(OpARM64FNMADDS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqual (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ge())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ge()))
+ return true
+ }
+ // match: (GreaterEqual (InvertFlags x))
+ // result: (LessEqual x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterEqualF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqualF (InvertFlags x))
+ // result: (LessEqualF x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessEqualF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterEqualU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqualU (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.uge())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.uge()))
+ return true
+ }
+ // match: (GreaterEqualU (InvertFlags x))
+ // result: (LessEqualU x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThan (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.gt())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.gt()))
+ return true
+ }
+ // match: (GreaterThan (InvertFlags x))
+ // result: (LessThan x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterThanF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThanF (InvertFlags x))
+ // result: (LessThanF x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessThanF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterThanU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThanU (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ugt())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ugt()))
+ return true
+ }
+ // match: (GreaterThanU (InvertFlags x))
+ // result: (LessThanU x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqual (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.le())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.le()))
+ return true
+ }
+ // match: (LessEqual (InvertFlags x))
+ // result: (GreaterEqual x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessEqualF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqualF (InvertFlags x))
+ // result: (GreaterEqualF x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterEqualF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessEqualU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqualU (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ule())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ule()))
+ return true
+ }
+ // match: (LessEqualU (InvertFlags x))
+ // result: (GreaterEqualU x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThan (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.lt())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.lt()))
+ return true
+ }
+ // match: (LessThan (InvertFlags x))
+ // result: (GreaterThan x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessThanF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThanF (InvertFlags x))
+ // result: (GreaterThanF x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterThanF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessThanU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThanU (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ult())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ult()))
+ return true
+ }
+ // match: (LessThanU (InvertFlags x))
+ // result: (GreaterThanU x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MADD(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MADD a x (MOVDconst [-1]))
+ // result: (SUB a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a _ (MOVDconst [0]))
+ // result: a
+ for {
+ a := v_0
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [1]))
+ // result: (ADD a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (ADDshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && c>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && c>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [-1]) x)
+ // result: (SUB a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ break
+ }
+ x := v_2
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a (MOVDconst [0]) _)
+ // result: a
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MADD a (MOVDconst [1]) x)
+ // result: (ADD a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ x := v_2
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c)
+ // result: (ADDshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c-1) && c>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c+1) && c>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD (MOVDconst [c]) x y)
+ // result: (ADDconst [c] (MUL <x.Type> x y))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64MUL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) (MOVDconst [d]))
+ // result: (ADDconst [c*d] a)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_2.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c * d)
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: (SUB a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADDW a _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: a
+ for {
+ a := v_0
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: (ADD a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (ADDshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: int32(c)==-1
+ // result: (SUB a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) _)
+ // cond: int32(c)==0
+ // result: a
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: int32(c)==1
+ // result: (ADD a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c)
+ // result: (ADDshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW (MOVDconst [c]) x y)
+ // result: (ADDconst [c] (MULW <x.Type> x y))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) (MOVDconst [d]))
+ // result: (ADDconst [int64(int32(c)*int32(d))] a)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_2.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d)))
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MNEG x (MOVDconst [-1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MNEG _ (MOVDconst [0]))
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [1]))
+ // result: (NEG x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (NEG (SLLconst <x.Type> [log64(c)] x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && c >= 3
+ // result: (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && c >= 7
+ // result: (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.Type = x.Type
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.Type = x.Type
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [-c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-c * d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == -1) {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 0) {
+ continue
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: (NEG x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 1) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (NEG (SLLconst <x.Type> [log64(c)] x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && int32(c) >= 3
+ // result: (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && int32(c) >= 7
+ // result: (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.Type = x.Type
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.Type = x.Type
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [-int64(int32(c)*int32(d))])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-int64(int32(c) * int32(d)))
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOD (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [c%d])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c % d)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MODW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MODW (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(int32(c)%int32(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c) % int32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVBUloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVBUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVBstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVBUload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVBUload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVBstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (ANDconst [c] x))
+ // result: (ANDconst [c&(1<<8-1)] x)
+ for {
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<8 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint8(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ // match: (MOVBUreg x)
+ // cond: x.Type.IsBoolean()
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if !(x.Type.IsBoolean()) {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (SLLconst [sc] x))
+ // cond: isARM64BFMask(sc, 1<<8-1, sc)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, 1<<8-1, sc)) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (SRLconst [sc] x))
+ // cond: isARM64BFMask(sc, 1<<8-1, 0)
+ // result: (UBFX [armBFAuxInt(sc, 8)] x)
+ for {
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, 1<<8-1, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVBloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVBloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVBstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVBload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVBload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVBstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int8(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ // match: (MOVBreg (SLLconst [lc] x))
+ // cond: lc < 8
+ // result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 8) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVBstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} ptr0 (SRLconst [8] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
+ continue
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 8) {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 8) {
+ continue
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 24) {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 24) {
+ continue
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr0 (SRLconst [8] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ break
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ continue
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst {
+ continue
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8 && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpARM64UBFX {
+ break
+ }
+ bfc2 := auxIntToArm64BitField(w0.AuxInt)
+ if w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && bfc.getARM64BFwidth() == 32-bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32-bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb()-8 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8 && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64UBFX {
+ continue
+ }
+ bfc := auxIntToArm64BitField(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64UBFX {
+ continue
+ }
+ bfc2 := auxIntToArm64BitField(w0.AuxInt)
+ if w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && bfc.getARM64BFwidth() == 32-bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32-bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb()-8 && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr0 (SRLconst [j] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ break
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 {
+ break
+ }
+ w0_0 := w0.Args[0]
+ if w0_0.Op != OpARM64MOVDreg || w != w0_0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst {
+ continue
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ continue
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 {
+ continue
+ }
+ w0_0 := w0.Args[0]
+ if w0_0.Op != OpARM64MOVDreg || w != w0_0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) x3:(MOVBstore [i-4] {s} ptr (SRLconst [32] w) x4:(MOVBstore [i-5] {s} ptr (SRLconst [40] w) x5:(MOVBstore [i-6] {s} ptr (SRLconst [48] w) x6:(MOVBstore [i-7] {s} ptr (SRLconst [56] w) mem))))))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)
+ // result: (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if ptr != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] {
+ break
+ }
+ x3 := x2.Args[2]
+ if x3.Op != OpARM64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] {
+ break
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64SRLconst || auxIntToInt64(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
+ break
+ }
+ x4 := x3.Args[2]
+ if x4.Op != OpARM64MOVBstore || auxIntToInt32(x4.AuxInt) != i-5 || auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[2]
+ if ptr != x4.Args[0] {
+ break
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpARM64SRLconst || auxIntToInt64(x4_1.AuxInt) != 40 || w != x4_1.Args[0] {
+ break
+ }
+ x5 := x4.Args[2]
+ if x5.Op != OpARM64MOVBstore || auxIntToInt32(x5.AuxInt) != i-6 || auxToSym(x5.Aux) != s {
+ break
+ }
+ _ = x5.Args[2]
+ if ptr != x5.Args[0] {
+ break
+ }
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpARM64SRLconst || auxIntToInt64(x5_1.AuxInt) != 48 || w != x5_1.Args[0] {
+ break
+ }
+ x6 := x5.Args[2]
+ if x6.Op != OpARM64MOVBstore || auxIntToInt32(x6.AuxInt) != i-7 || auxToSym(x6.Aux) != s {
+ break
+ }
+ mem := x6.Args[2]
+ if ptr != x6.Args[0] {
+ break
+ }
+ x6_1 := x6.Args[1]
+ if x6_1.Op != OpARM64SRLconst || auxIntToInt64(x6_1.AuxInt) != 56 || w != x6_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(i - 7)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x6.Pos, OpARM64REV, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [7] {s} p w x0:(MOVBstore [6] {s} p (SRLconst [8] w) x1:(MOVBstore [5] {s} p (SRLconst [16] w) x2:(MOVBstore [4] {s} p (SRLconst [24] w) x3:(MOVBstore [3] {s} p (SRLconst [32] w) x4:(MOVBstore [2] {s} p (SRLconst [40] w) x5:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [48] w) x6:(MOVBstoreidx ptr0 idx0 (SRLconst [56] w) mem))))))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)
+ // result: (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 7 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 6 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != 4 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if p != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] {
+ break
+ }
+ x3 := x2.Args[2]
+ if x3.Op != OpARM64MOVBstore || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[2]
+ if p != x3.Args[0] {
+ break
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64SRLconst || auxIntToInt64(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
+ break
+ }
+ x4 := x3.Args[2]
+ if x4.Op != OpARM64MOVBstore || auxIntToInt32(x4.AuxInt) != 2 || auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[2]
+ if p != x4.Args[0] {
+ break
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpARM64SRLconst || auxIntToInt64(x4_1.AuxInt) != 40 || w != x4_1.Args[0] {
+ break
+ }
+ x5 := x4.Args[2]
+ if x5.Op != OpARM64MOVBstore || auxIntToInt32(x5.AuxInt) != 1 || auxToSym(x5.Aux) != s {
+ break
+ }
+ _ = x5.Args[2]
+ p1 := x5.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpARM64SRLconst || auxIntToInt64(x5_1.AuxInt) != 48 || w != x5_1.Args[0] {
+ continue
+ }
+ x6 := x5.Args[2]
+ if x6.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x6.Args[3]
+ ptr0 := x6.Args[0]
+ idx0 := x6.Args[1]
+ x6_2 := x6.Args[2]
+ if x6_2.Op != OpARM64SRLconst || auxIntToInt64(x6_2.AuxInt) != 56 || w != x6_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ continue
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v0 := b.NewValue0(x5.Pos, OpARM64REV, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstore [i-3] {s} ptr (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
+ // result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if ptr != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64UBFX || auxIntToArm64BitField(x0_1.AuxInt) != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64UBFX || auxIntToArm64BitField(x1_1.AuxInt) != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
+ break
+ }
+ mem := x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64UBFX || auxIntToArm64BitField(x2_1.AuxInt) != armBFAuxInt(24, 8) || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
+ // result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64UBFX || auxIntToArm64BitField(x0_1.AuxInt) != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64UBFX || auxIntToArm64BitField(x1_1.AuxInt) != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
+ continue
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x2.Args[3]
+ ptr0 := x2.Args[0]
+ idx0 := x2.Args[1]
+ x2_2 := x2.Args[2]
+ if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] (MOVDreg w)) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] (MOVDreg w)) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
+ // result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if ptr != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 {
+ break
+ }
+ x0_1_0 := x0_1.Args[0]
+ if x0_1_0.Op != OpARM64MOVDreg || w != x0_1_0.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 {
+ break
+ }
+ x1_1_0 := x1_1.Args[0]
+ if x1_1_0.Op != OpARM64MOVDreg || w != x1_1_0.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
+ break
+ }
+ mem := x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 {
+ break
+ }
+ x2_1_0 := x2_1.Args[0]
+ if x2_1_0.Op != OpARM64MOVDreg || w != x2_1_0.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] (MOVDreg w)) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] (MOVDreg w)) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
+ // result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 {
+ break
+ }
+ x0_1_0 := x0_1.Args[0]
+ if x0_1_0.Op != OpARM64MOVDreg || w != x0_1_0.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 {
+ continue
+ }
+ x1_1_0 := x1_1.Args[0]
+ if x1_1_0.Op != OpARM64MOVDreg || w != x1_1_0.Args[0] {
+ continue
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x2.Args[3]
+ ptr0 := x2.Args[0]
+ idx0 := x2.Args[1]
+ x2_2 := x2.Args[2]
+ if x2_2.Op != OpARM64SRLconst || auxIntToInt64(x2_2.AuxInt) != 24 {
+ continue
+ }
+ x2_2_0 := x2_2.Args[0]
+ if x2_2_0.Op != OpARM64MOVDreg || w != x2_2_0.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
+ // result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if ptr != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
+ break
+ }
+ mem := x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] w) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
+ // result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ continue
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x2.Args[3]
+ ptr0 := x2.Args[0]
+ idx0 := x2.Args[1]
+ x2_2 := x2.Args[2]
+ if x2_2.Op != OpARM64SRLconst || auxIntToInt64(x2_2.AuxInt) != 24 || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpARM64SRLconst || auxIntToInt64(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr1 := v_0_0
+ idx1 := v_0_1
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr0 := x.Args[0]
+ idx0 := x.Args[1]
+ x_2 := x.Args[2]
+ if x_2.Op != OpARM64SRLconst || auxIntToInt64(x_2.AuxInt) != 8 || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpARM64UBFX || auxIntToArm64BitField(x_1.AuxInt) != armBFAuxInt(8, 8) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr1 := v_0_0
+ idx1 := v_0_1
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr0 := x.Args[0]
+ idx0 := x.Args[1]
+ x_2 := x.Args[2]
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpARM64SRLconst || auxIntToInt64(x_1.AuxInt) != 8 {
+ break
+ }
+ x_1_0 := x_1.Args[0]
+ if x_1_0.Op != OpARM64MOVDreg || w != x_1_0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr1 := v_0_0
+ idx1 := v_0_1
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr0 := x.Args[0]
+ idx0 := x.Args[1]
+ x_2 := x.Args[2]
+ if x_2.Op != OpARM64SRLconst || auxIntToInt64(x_2.AuxInt) != 8 {
+ continue
+ }
+ x_2_0 := x_2.Args[0]
+ if x_2_0.Op != OpARM64MOVDreg || w != x_2_0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpARM64UBFX || auxIntToArm64BitField(x_1.AuxInt) != armBFAuxInt(8, 24) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr1 := v_0_0
+ idx1 := v_0_1
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr0 := x.Args[0]
+ idx0 := x.Args[1]
+ x_2 := x.Args[2]
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 24) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (MOVBstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (MOVBstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVDconst [0]) mem)
+ // result: (MOVBstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVBstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVBreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVBreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVBUreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVHreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVHUreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVWUreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr (ADDconst [1] idx) (SRLconst [8] w) x:(MOVBstoreidx ptr idx w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstoreidx ptr idx w mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 8 {
+ break
+ }
+ w := v_2.Args[0]
+ x := v_3
+ if x.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ mem := x.Args[3]
+ if ptr != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, w, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr (ADDconst [3] idx) w x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
+ // result: (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ w := v_2
+ x0 := v_3
+ if x0.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ _ = x0.Args[3]
+ if ptr != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 2 || idx != x0_1.Args[0] {
+ break
+ }
+ x0_2 := x0.Args[2]
+ if x0_2.Op != OpARM64UBFX || auxIntToArm64BitField(x0_2.AuxInt) != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
+ break
+ }
+ x1 := x0.Args[3]
+ if x1.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ _ = x1.Args[3]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] {
+ break
+ }
+ x1_2 := x1.Args[2]
+ if x1_2.Op != OpARM64UBFX || auxIntToArm64BitField(x1_2.AuxInt) != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
+ break
+ }
+ x2 := x1.Args[3]
+ if x2.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ mem := x2.Args[3]
+ if ptr != x2.Args[0] || idx != x2.Args[1] {
+ break
+ }
+ x2_2 := x2.Args[2]
+ if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx w x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
+ // result: (MOVWstoreidx ptr idx w mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ w := v_2
+ x0 := v_3
+ if x0.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ _ = x0.Args[3]
+ if ptr != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 1 || idx != x0_1.Args[0] {
+ break
+ }
+ x0_2 := x0.Args[2]
+ if x0_2.Op != OpARM64UBFX || auxIntToArm64BitField(x0_2.AuxInt) != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
+ break
+ }
+ x1 := x0.Args[3]
+ if x1.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ _ = x1.Args[3]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] {
+ break
+ }
+ x1_2 := x1.Args[2]
+ if x1_2.Op != OpARM64UBFX || auxIntToArm64BitField(x1_2.AuxInt) != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
+ break
+ }
+ x2 := x1.Args[3]
+ if x2.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ mem := x2.Args[3]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 3 || idx != x2_1.Args[0] {
+ break
+ }
+ x2_2 := x2.Args[2]
+ if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr, idx, w, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ w := v_2
+ x := v_3
+ if x.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ mem := x.Args[3]
+ if ptr != x.Args[0] || idx != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstoreidx ptr idx w mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ w := v_2
+ x := v_3
+ if x.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ mem := x.Args[3]
+ if ptr != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpARM64ADDconst || auxIntToInt64(x_1.AuxInt) != 1 || idx != x_1.Args[0] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVBstorezeroidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem))
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),1) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ x := v_1
+ if x.Op != OpARM64MOVBstorezero {
+ break
+ }
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ ptr1 := x.Args[0]
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 1) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr0, mem)
+ return true
+ }
+ // match: (MOVBstorezero [1] {s} (ADD ptr0 idx0) x:(MOVBstorezeroidx ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstorezeroidx ptr1 idx1 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ x := v_1
+ if x.Op != OpARM64MOVBstorezeroidx {
+ continue
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstorezeroidx)
+ v.AddArg3(ptr1, idx1, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstorezeroidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezeroidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVBstorezero [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezeroidx (MOVDconst [c]) idx mem)
+ // cond: is32Bit(c)
+ // result: (MOVBstorezero [int32(c)] idx mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(idx, mem)
+ return true
+ }
+ // match: (MOVBstorezeroidx ptr (ADDconst [1] idx) x:(MOVBstorezeroidx ptr idx mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstorezeroidx {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] || idx != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _))
+ // result: (FMOVDfpgp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpARM64FMOVDfpgp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDloadidx8 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVDload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVDload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVDload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDloadidx ptr (SLLconst [3] idx) mem)
+ // result: (MOVDloadidx8 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDloadidx (SLLconst [3] idx) ptr mem)
+ // result: (MOVDloadidx8 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDloadidx8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDloadidx8 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<3)
+ // result: (MOVDload [int32(c)<<3] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 3)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 3)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDstorezeroidx8 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVDreg x)
+ // cond: x.Uses == 1
+ // result: (MOVDnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64MOVDnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDreg (MOVDconst [c]))
+ // result: (MOVDconst [c])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem)
+ // result: (FMOVDstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVDfpgp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDstoreidx8 ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (MOVDstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (MOVDstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx ptr (SLLconst [3] idx) val mem)
+ // result: (MOVDstoreidx8 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx (SLLconst [3] idx) ptr val mem)
+ // result: (MOVDstoreidx8 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx ptr idx (MOVDconst [0]) mem)
+ // result: (MOVDstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVDstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstoreidx8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstoreidx8 ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c<<3)
+ // result: (MOVDstore [int32(c)<<3] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c << 3)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c) << 3)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx8 ptr idx (MOVDconst [0]) mem)
+ // result: (MOVDstorezeroidx8 ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVDstorezeroidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDstorezeroidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDstorezeroidx8 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezeroidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem))
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),8) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVQstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ x := v_1
+ if x.Op != OpARM64MOVDstorezero {
+ break
+ }
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ ptr1 := x.Args[0]
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 8) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr0, mem)
+ return true
+ }
+ // match: (MOVDstorezero [8] {s} p0:(ADD ptr0 idx0) x:(MOVDstorezeroidx ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVQstorezero [0] {s} p0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 8 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ if p0.Op != OpARM64ADD {
+ break
+ }
+ _ = p0.Args[1]
+ p0_0 := p0.Args[0]
+ p0_1 := p0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p0_0, p0_1 = _i0+1, p0_1, p0_0 {
+ ptr0 := p0_0
+ idx0 := p0_1
+ x := v_1
+ if x.Op != OpARM64MOVDstorezeroidx {
+ continue
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVDstorezero [8] {s} p0:(ADDshiftLL [3] ptr0 idx0) x:(MOVDstorezeroidx8 ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVQstorezero [0] {s} p0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 8 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ if p0.Op != OpARM64ADDshiftLL || auxIntToInt64(p0.AuxInt) != 3 {
+ break
+ }
+ idx0 := p0.Args[1]
+ ptr0 := p0.Args[0]
+ x := v_1
+ if x.Op != OpARM64MOVDstorezeroidx8 {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstorezeroidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstorezeroidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVDstorezero [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezeroidx (MOVDconst [c]) idx mem)
+ // cond: is32Bit(c)
+ // result: (MOVDstorezero [int32(c)] idx mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(idx, mem)
+ return true
+ }
+ // match: (MOVDstorezeroidx ptr (SLLconst [3] idx) mem)
+ // result: (MOVDstorezeroidx8 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVDstorezeroidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDstorezeroidx (SLLconst [3] idx) ptr mem)
+ // result: (MOVDstorezeroidx8 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstorezeroidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstorezeroidx8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstorezeroidx8 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<3)
+ // result: (MOVDstorezero [int32(c<<3)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 3)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c << 3))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHUloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHUloadidx2 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHUloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVHUload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVHUload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUloadidx ptr (SLLconst [1] idx) mem)
+ // result: (MOVHUloadidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHUloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUloadidx ptr (ADD idx idx) mem)
+ // result: (MOVHUloadidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADD {
+ break
+ }
+ idx := v_1.Args[1]
+ if idx != v_1.Args[0] {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVHUloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUloadidx (ADD idx idx) ptr mem)
+ // result: (MOVHUloadidx2 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ if idx != v_0.Args[0] {
+ break
+ }
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHUloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUloadidx2(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUloadidx2 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<1)
+ // result: (MOVHUload [int32(c)<<1] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 1)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHstorezeroidx2 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (ANDconst [c] x))
+ // result: (ANDconst [c&(1<<16-1)] x)
+ for {
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<16 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ // match: (MOVHUreg (SLLconst [sc] x))
+ // cond: isARM64BFMask(sc, 1<<16-1, sc)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, 1<<16-1, sc)) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (SRLconst [sc] x))
+ // cond: isARM64BFMask(sc, 1<<16-1, 0)
+ // result: (UBFX [armBFAuxInt(sc, 16)] x)
+ for {
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, 1<<16-1, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHloadidx2 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVHload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVHload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx ptr (SLLconst [1] idx) mem)
+ // result: (MOVHloadidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHloadidx ptr (ADD idx idx) mem)
+ // result: (MOVHloadidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADD {
+ break
+ }
+ idx := v_1.Args[1]
+ if idx != v_1.Args[0] {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVHloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHloadidx (ADD idx idx) ptr mem)
+ // result: (MOVHloadidx2 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ if idx != v_0.Args[0] {
+ break
+ }
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHloadidx2(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHloadidx2 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<1)
+ // result: (MOVHload [int32(c)<<1] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 1)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHstorezeroidx2 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ // match: (MOVHreg (SLLconst [lc] x))
+ // cond: lc < 16
+ // result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 16) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} ptr0 (SRLconst [16] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVWstore [i-2] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVWstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ continue
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx2 {
+ break
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg(idx1)
+ v.AddArg4(ptr1, v0, w, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} ptr0 (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVWstore [i-2] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVWstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
+ continue
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx2 {
+ break
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg(idx1)
+ v.AddArg4(ptr1, v0, w, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} ptr0 (SRLconst [16] (MOVDreg w)) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVWstore [i-2] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ break
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVWstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ continue
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ break
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx2 {
+ break
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg(idx1)
+ v.AddArg4(ptr1, v0, w, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVHstore [i-2] {s} ptr1 w0:(SRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVWstore [i-2] {s} ptr0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w0, mem)
+ return true
+ }
+ // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx ptr1 idx1 w0:(SRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVWstoreidx ptr1 idx1 w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst {
+ continue
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr1, idx1, w0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx2 ptr1 idx1 w0:(SRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx2 {
+ break
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg(idx1)
+ v.AddArg4(ptr1, v0, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (MOVHstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (MOVHstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr (SLLconst [1] idx) val mem)
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr (ADD idx idx) val mem)
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADD {
+ break
+ }
+ idx := v_1.Args[1]
+ if idx != v_1.Args[0] {
+ break
+ }
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (SLLconst [1] idx) ptr val mem)
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (ADD idx idx) ptr val mem)
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ if idx != v_0.Args[0] {
+ break
+ }
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVDconst [0]) mem)
+ // result: (MOVHstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVHstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVHreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVHUreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVWUreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr (ADDconst [2] idx) (SRLconst [16] w) x:(MOVHstoreidx ptr idx w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx ptr idx w mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 16 {
+ break
+ }
+ w := v_2.Args[0]
+ x := v_3
+ if x.Op != OpARM64MOVHstoreidx {
+ break
+ }
+ mem := x.Args[3]
+ if ptr != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr, idx, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreidx2 ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c<<1)
+ // result: (MOVHstore [int32(c)<<1] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c << 1)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c) << 1)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVDconst [0]) mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVHreg x) mem)
+ // result: (MOVHstoreidx2 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVHUreg x) mem)
+ // result: (MOVHstoreidx2 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVWreg x) mem)
+ // result: (MOVHstoreidx2 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVWUreg x) mem)
+ // result: (MOVHstoreidx2 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHstorezeroidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem))
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),2) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVWstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ x := v_1
+ if x.Op != OpARM64MOVHstorezero {
+ break
+ }
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ ptr1 := x.Args[0]
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 2) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr0, mem)
+ return true
+ }
+ // match: (MOVHstorezero [2] {s} (ADD ptr0 idx0) x:(MOVHstorezeroidx ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVWstorezeroidx ptr1 idx1 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ x := v_1
+ if x.Op != OpARM64MOVHstorezeroidx {
+ continue
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstorezeroidx)
+ v.AddArg3(ptr1, idx1, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVHstorezero [2] {s} (ADDshiftLL [1] ptr0 idx0) x:(MOVHstorezeroidx2 ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVWstorezeroidx ptr1 (SLLconst <idx1.Type> [1] idx1) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ x := v_1
+ if x.Op != OpARM64MOVHstorezeroidx2 {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezeroidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg(idx1)
+ v.AddArg3(ptr1, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezeroidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVHstorezero [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx (MOVDconst [c]) idx mem)
+ // cond: is32Bit(c)
+ // result: (MOVHstorezero [int32(c)] idx mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx ptr (SLLconst [1] idx) mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx ptr (ADD idx idx) mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADD {
+ break
+ }
+ idx := v_1.Args[1]
+ if idx != v_1.Args[0] {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx (SLLconst [1] idx) ptr mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx (ADD idx idx) ptr mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ if idx != v_0.Args[0] {
+ break
+ }
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx ptr (ADDconst [2] idx) x:(MOVHstorezeroidx ptr idx mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstorezeroidx {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] || idx != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstorezeroidx2(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezeroidx2 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<1)
+ // result: (MOVHstorezero [int32(c<<1)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 1)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c << 1))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVQstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVQstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _))
+ // result: (FMOVSfpgp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpARM64FMOVSfpgp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWUloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWUloadidx4 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWUloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWUloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVWUload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVWUload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUloadidx ptr (SLLconst [2] idx) mem)
+ // result: (MOVWUloadidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWUloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWUloadidx (SLLconst [2] idx) ptr mem)
+ // result: (MOVWUloadidx4 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWUloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUloadidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWUloadidx4 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<2)
+ // result: (MOVWUload [int32(c)<<2] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWstorezeroidx4 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUloadidx4 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWUloadidx4 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (ANDconst [c] x))
+ // result: (ANDconst [c&(1<<32-1)] x)
+ for {
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<32 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (MOVWUreg (SLLconst [sc] x))
+ // cond: isARM64BFMask(sc, 1<<32-1, sc)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, 1<<32-1, sc)) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (SRLconst [sc] x))
+ // cond: isARM64BFMask(sc, 1<<32-1, 0)
+ // result: (UBFX [armBFAuxInt(sc, 32)] x)
+ for {
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, 1<<32-1, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWloadidx4 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVWload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVWload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SLLconst [2] idx) mem)
+ // result: (MOVWloadidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx (SLLconst [2] idx) ptr mem)
+ // result: (MOVWloadidx4 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWloadidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadidx4 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<2)
+ // result: (MOVWload [int32(c)<<2] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWstorezeroidx4 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWloadidx4 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWloadidx4 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ // match: (MOVWreg (SLLconst [lc] x))
+ // cond: lc < 32
+ // result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 32) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem)
+ // result: (FMOVSstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVSfpgp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWstoreidx4 ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} ptr0 (SRLconst [32] w) x:(MOVWstore [i-4] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVDstore [i-4] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVDstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
+ continue
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx4 ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstoreidx4 {
+ break
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg(idx1)
+ v.AddArg4(ptr1, v0, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVWstore [i-4] {s} ptr1 w0:(SRLconst [j-32] w) mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVDstore [i-4] {s} ptr0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx ptr1 idx1 w0:(SRLconst [j-32] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVDstoreidx ptr1 idx1 w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst {
+ continue
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v.AddArg4(ptr1, idx1, w0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx4 ptr1 idx1 w0:(SRLconst [j-32] w) mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstoreidx4 {
+ break
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg(idx1)
+ v.AddArg4(ptr1, v0, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (MOVWstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (MOVWstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SLLconst [2] idx) val mem)
+ // result: (MOVWstoreidx4 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SLLconst [2] idx) ptr val mem)
+ // result: (MOVWstoreidx4 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVDconst [0]) mem)
+ // result: (MOVWstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVWstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVWstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVWUreg x) mem)
+ // result: (MOVWstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (ADDconst [4] idx) (SRLconst [32] w) x:(MOVWstoreidx ptr idx w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVDstoreidx ptr idx w mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 4 {
+ break
+ }
+ idx := v_1.Args[0]
+ if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 32 {
+ break
+ }
+ w := v_2.Args[0]
+ x := v_3
+ if x.Op != OpARM64MOVWstoreidx {
+ break
+ }
+ mem := x.Args[3]
+ if ptr != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v.AddArg4(ptr, idx, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstoreidx4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreidx4 ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c<<2)
+ // result: (MOVWstore [int32(c)<<2] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c) << 2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx4 ptr idx (MOVDconst [0]) mem)
+ // result: (MOVWstorezeroidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVWstorezeroidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstoreidx4 ptr idx (MOVWreg x) mem)
+ // result: (MOVWstoreidx4 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVWstoreidx4 ptr idx (MOVWUreg x) mem)
+ // result: (MOVWstoreidx4 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWstorezeroidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWstorezeroidx4 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezeroidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem))
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),4) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVDstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ x := v_1
+ if x.Op != OpARM64MOVWstorezero {
+ break
+ }
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ ptr1 := x.Args[0]
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 4) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr0, mem)
+ return true
+ }
+ // match: (MOVWstorezero [4] {s} (ADD ptr0 idx0) x:(MOVWstorezeroidx ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVDstorezeroidx ptr1 idx1 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ x := v_1
+ if x.Op != OpARM64MOVWstorezeroidx {
+ continue
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVDstorezeroidx)
+ v.AddArg3(ptr1, idx1, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVWstorezero [4] {s} (ADDshiftLL [2] ptr0 idx0) x:(MOVWstorezeroidx4 ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVDstorezeroidx ptr1 (SLLconst <idx1.Type> [2] idx1) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ x := v_1
+ if x.Op != OpARM64MOVWstorezeroidx4 {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezeroidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg(idx1)
+ v.AddArg3(ptr1, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezeroidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVWstorezero [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezeroidx (MOVDconst [c]) idx mem)
+ // cond: is32Bit(c)
+ // result: (MOVWstorezero [int32(c)] idx mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(idx, mem)
+ return true
+ }
+ // match: (MOVWstorezeroidx ptr (SLLconst [2] idx) mem)
+ // result: (MOVWstorezeroidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWstorezeroidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstorezeroidx (SLLconst [2] idx) ptr mem)
+ // result: (MOVWstorezeroidx4 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWstorezeroidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstorezeroidx ptr (ADDconst [4] idx) x:(MOVWstorezeroidx ptr idx mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVDstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 4 {
+ break
+ }
+ idx := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstorezeroidx {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] || idx != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstorezeroidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezeroidx4 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<2)
+ // result: (MOVWstorezero [int32(c<<2)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c << 2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MSUB a x (MOVDconst [-1]))
+ // result: (ADD a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a _ (MOVDconst [0]))
+ // result: a
+ for {
+ a := v_0
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [1]))
+ // result: (SUB a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SUBshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && c>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && c>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [-1]) x)
+ // result: (ADD a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ break
+ }
+ x := v_2
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [0]) _)
+ // result: a
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [1]) x)
+ // result: (SUB a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ x := v_2
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c)
+ // result: (SUBshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c-1) && c>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c+1) && c>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB (MOVDconst [c]) x y)
+ // result: (ADDconst [c] (MNEG <x.Type> x y))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64MNEG, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) (MOVDconst [d]))
+ // result: (SUBconst [c*d] a)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_2.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(c * d)
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: (ADD a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUBW a _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: a
+ for {
+ a := v_0
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: (SUB a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SUBshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: int32(c)==-1
+ // result: (ADD a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) _)
+ // cond: int32(c)==0
+ // result: a
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: int32(c)==1
+ // result: (SUB a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c)
+ // result: (SUBshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW (MOVDconst [c]) x y)
+ // result: (ADDconst [c] (MNEGW <x.Type> x y))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) (MOVDconst [d]))
+ // result: (SUBconst [int64(int32(c)*int32(d))] a)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_2.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d)))
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MUL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MUL (NEG x) y)
+ // result: (MNEG x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64NEG {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64MNEG)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [-1]))
+ // result: (NEG x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL _ (MOVDconst [0]))
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SLLconst [log64(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && c >= 3
+ // result: (ADDshiftLL x x [log64(c-1)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c - 1))
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && c >= 7
+ // result: (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MULW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULW (NEG x) y)
+ // result: (MNEGW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64NEG {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64MNEGW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: (NEG x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == -1) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULW _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 0) {
+ continue
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 1) {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SLLconst [log64(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && int32(c) >= 3
+ // result: (ADDshiftLL x x [log64(c-1)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c - 1))
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && int32(c) >= 7
+ // result: (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [int64(int32(c)*int32(d))])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d)))
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVN(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVN (XOR x y))
+ // result: (EON x y)
+ for {
+ if v_0.Op != OpARM64XOR {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64EON)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MVN (MOVDconst [c]))
+ // result: (MOVDconst [^c])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ return true
+ }
+ // match: (MVN x:(SLLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftLL [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN x:(SRLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftRL [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN x:(SRAconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftRA [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftLL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftLL (MOVDconst [c]) [d])
+ // result: (MOVDconst [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftRA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRA (MOVDconst [c]) [d])
+ // result: (MOVDconst [^(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftRL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRL (MOVDconst [c]) [d])
+ // result: (MOVDconst [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEG (MUL x y))
+ // result: (MNEG x y)
+ for {
+ if v_0.Op != OpARM64MUL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64MNEG)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (NEG (MULW x y))
+ // result: (MNEGW x y)
+ for {
+ if v_0.Op != OpARM64MULW {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64MNEGW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (NEG (MOVDconst [c]))
+ // result: (MOVDconst [-c])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ // match: (NEG x:(SLLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftLL [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (NEG x:(SRLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftRL [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (NEG x:(SRAconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftRA [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftLL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGshiftLL (MOVDconst [c]) [d])
+ // result: (MOVDconst [-int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-int64(uint64(c) << uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftRA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGshiftRA (MOVDconst [c]) [d])
+ // result: (MOVDconst [-(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-(c >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftRL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGshiftRL (MOVDconst [c]) [d])
+ // result: (MOVDconst [-int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-int64(uint64(c) >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NotEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NotEqual (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ne())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ne()))
+ return true
+ }
+ // match: (NotEqual (InvertFlags x))
+ // result: (NotEqual x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64NotEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64OR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OR x (MOVDconst [c]))
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (OR x (MVN y))
+ // result: (ORN x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MVN {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64ORN)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ORshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ORshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ORshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (OR (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (ROR x (NEG <t> y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64ROR)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (OR (SRL <typ.UInt64> x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (ROR x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR (SLL x (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> [cc] (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (RORW x (NEG <t> y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64RORW)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (OR (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> [cc] (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64MOVWUreg {
+ continue
+ }
+ x := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y))
+ // cond: ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
+ // result: (BFI [bfc] y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64UBFIZ {
+ continue
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ continue
+ }
+ ac := auxIntToInt64(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(ac == ^((1<<uint(bfc.getARM64BFwidth()) - 1) << uint(bfc.getARM64BFlsb()))) {
+ continue
+ }
+ v.reset(OpARM64BFI)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg2(y, x)
+ return true
+ }
+ break
+ }
+ // match: (OR (UBFX [bfc] x) (ANDconst [ac] y))
+ // cond: ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
+ // result: (BFXIL [bfc] y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64UBFX {
+ continue
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ continue
+ }
+ ac := auxIntToInt64(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(ac == ^(1<<uint(bfc.getARM64BFwidth()) - 1)) {
+ continue
+ }
+ v.reset(OpARM64BFXIL)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg2(y, x)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ s0 := o1.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload {
+ continue
+ }
+ i3 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o1.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y2 := o0.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ continue
+ }
+ i1 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ continue
+ }
+ y3 := v_1
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload {
+ continue
+ }
+ i0 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t)
+ v.copyOf(v0)
+ v0.Aux = symToAux(s)
+ v1 := b.NewValue0(x3.Pos, OpOffPtr, p.Type)
+ v1.AuxInt = int64ToAuxInt(int64(i0))
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr0 idx0 mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr0 idx0 mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ s0 := o1.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 3 {
+ continue
+ }
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o1.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y2 := o0.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ p1 := x2.Args[0]
+ if p1.Op != OpARM64ADD {
+ continue
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, p1_0, p1_1 = _i1+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x2.Args[1] {
+ continue
+ }
+ y3 := v_1
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x3.Args[2]
+ ptr0 := x3.Args[0]
+ idx0 := x3.Args[1]
+ if mem != x3.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr0, idx0, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr idx mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ s0 := o1.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 3 {
+ continue
+ }
+ idx := x0_1.Args[0]
+ y1 := o1.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ continue
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ continue
+ }
+ y2 := o0.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ continue
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 1 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ continue
+ }
+ y3 := v_1
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] || idx != x3.Args[1] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
+ continue
+ }
+ _ = o2.Args[1]
+ o3 := o2.Args[0]
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
+ continue
+ }
+ _ = o3.Args[1]
+ o4 := o3.Args[0]
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
+ continue
+ }
+ _ = o4.Args[1]
+ o5 := o4.Args[0]
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
+ continue
+ }
+ _ = o5.Args[1]
+ s0 := o5.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload {
+ continue
+ }
+ i7 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o5.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ continue
+ }
+ i6 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y2 := o4.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ continue
+ }
+ i5 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ continue
+ }
+ y3 := o3.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload {
+ continue
+ }
+ i4 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ continue
+ }
+ y4 := o2.Args[1]
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload {
+ continue
+ }
+ i3 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ y5 := o1.Args[1]
+ if y5.Op != OpARM64MOVDnop {
+ continue
+ }
+ x5 := y5.Args[0]
+ if x5.Op != OpARM64MOVBUload {
+ continue
+ }
+ i2 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ y6 := o0.Args[1]
+ if y6.Op != OpARM64MOVDnop {
+ continue
+ }
+ x6 := y6.Args[0]
+ if x6.Op != OpARM64MOVBUload {
+ continue
+ }
+ i1 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ if p != x6.Args[0] || mem != x6.Args[1] {
+ continue
+ }
+ y7 := v_1
+ if y7.Op != OpARM64MOVDnop {
+ continue
+ }
+ x7 := y7.Args[0]
+ if x7.Op != OpARM64MOVBUload {
+ continue
+ }
+ i0 := auxIntToInt32(x7.AuxInt)
+ if auxToSym(x7.Aux) != s {
+ continue
+ }
+ _ = x7.Args[1]
+ if p != x7.Args[0] || mem != x7.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(x7.Pos, OpARM64MOVDload, t)
+ v.copyOf(v0)
+ v0.Aux = symToAux(s)
+ v1 := b.NewValue0(x7.Pos, OpOffPtr, p.Type)
+ v1.AuxInt = int64ToAuxInt(int64(i0))
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr0 idx0 mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr0 idx0 mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
+ continue
+ }
+ _ = o2.Args[1]
+ o3 := o2.Args[0]
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
+ continue
+ }
+ _ = o3.Args[1]
+ o4 := o3.Args[0]
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
+ continue
+ }
+ _ = o4.Args[1]
+ o5 := o4.Args[0]
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
+ continue
+ }
+ _ = o5.Args[1]
+ s0 := o5.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 7 {
+ continue
+ }
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o5.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 6 || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y2 := o4.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ continue
+ }
+ y3 := o3.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 4 || auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ continue
+ }
+ y4 := o2.Args[1]
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 3 || auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ y5 := o1.Args[1]
+ if y5.Op != OpARM64MOVDnop {
+ continue
+ }
+ x5 := y5.Args[0]
+ if x5.Op != OpARM64MOVBUload || auxIntToInt32(x5.AuxInt) != 2 || auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ y6 := o0.Args[1]
+ if y6.Op != OpARM64MOVDnop {
+ continue
+ }
+ x6 := y6.Args[0]
+ if x6.Op != OpARM64MOVBUload || auxIntToInt32(x6.AuxInt) != 1 || auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ p1 := x6.Args[0]
+ if p1.Op != OpARM64ADD {
+ continue
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, p1_0, p1_1 = _i1+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x6.Args[1] {
+ continue
+ }
+ y7 := v_1
+ if y7.Op != OpARM64MOVDnop {
+ continue
+ }
+ x7 := y7.Args[0]
+ if x7.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x7.Args[2]
+ ptr0 := x7.Args[0]
+ idx0 := x7.Args[1]
+ if mem != x7.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr0, idx0, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr idx mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
+ continue
+ }
+ _ = o2.Args[1]
+ o3 := o2.Args[0]
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
+ continue
+ }
+ _ = o3.Args[1]
+ o4 := o3.Args[0]
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
+ continue
+ }
+ _ = o4.Args[1]
+ o5 := o4.Args[0]
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
+ continue
+ }
+ _ = o5.Args[1]
+ s0 := o5.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 7 {
+ continue
+ }
+ idx := x0_1.Args[0]
+ y1 := o5.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ continue
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 6 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ continue
+ }
+ y2 := o4.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ continue
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ continue
+ }
+ y3 := o3.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] {
+ continue
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 4 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ continue
+ }
+ y4 := o2.Args[1]
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x4.Args[2]
+ if ptr != x4.Args[0] {
+ continue
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 3 || idx != x4_1.Args[0] || mem != x4.Args[2] {
+ continue
+ }
+ y5 := o1.Args[1]
+ if y5.Op != OpARM64MOVDnop {
+ continue
+ }
+ x5 := y5.Args[0]
+ if x5.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x5.Args[2]
+ if ptr != x5.Args[0] {
+ continue
+ }
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpARM64ADDconst || auxIntToInt64(x5_1.AuxInt) != 2 || idx != x5_1.Args[0] || mem != x5.Args[2] {
+ continue
+ }
+ y6 := o0.Args[1]
+ if y6.Op != OpARM64MOVDnop {
+ continue
+ }
+ x6 := y6.Args[0]
+ if x6.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x6.Args[2]
+ if ptr != x6.Args[0] {
+ continue
+ }
+ x6_1 := x6.Args[1]
+ if x6_1.Op != OpARM64ADDconst || auxIntToInt64(x6_1.AuxInt) != 1 || idx != x6_1.Args[0] || mem != x6.Args[2] {
+ continue
+ }
+ y7 := v_1
+ if y7.Op != OpARM64MOVDnop {
+ continue
+ }
+ x7 := y7.Args[0]
+ if x7.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x7.Args[2]
+ if ptr != x7.Args[0] || idx != x7.Args[1] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
+ // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ s0 := o1.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o1.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y2 := o0.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ continue
+ }
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ continue
+ }
+ y3 := v_1
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload {
+ continue
+ }
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(x3.Pos, OpARM64REVW, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t)
+ v1.Aux = symToAux(s)
+ v2 := b.NewValue0(x3.Pos, OpOffPtr, p.Type)
+ v2.AuxInt = int64ToAuxInt(int64(i0))
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ s0 := o1.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := o1.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
+ continue
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ continue
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, p1_0, p1_1 = _i1+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x1.Args[1] {
+ continue
+ }
+ y2 := o0.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ p := x2.Args[0]
+ if mem != x2.Args[1] {
+ continue
+ }
+ y3 := v_1
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(x3.Pos, OpARM64REVW, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x3.Pos, OpARM64MOVWUloadidx, t)
+ v1.AddArg3(ptr0, idx0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ s0 := o1.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ idx := x0.Args[1]
+ y1 := o1.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ continue
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ continue
+ }
+ y2 := o0.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ continue
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ continue
+ }
+ y3 := v_1
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] {
+ continue
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(v.Pos, OpARM64REVW, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t)
+ v1.AddArg3(ptr, idx, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)))
+ // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
+ continue
+ }
+ _ = o2.Args[1]
+ o3 := o2.Args[0]
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
+ continue
+ }
+ _ = o3.Args[1]
+ o4 := o3.Args[0]
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
+ continue
+ }
+ _ = o4.Args[1]
+ o5 := o4.Args[0]
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
+ continue
+ }
+ _ = o5.Args[1]
+ s0 := o5.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o5.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y2 := o4.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ continue
+ }
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ continue
+ }
+ y3 := o3.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload {
+ continue
+ }
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ continue
+ }
+ y4 := o2.Args[1]
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload {
+ continue
+ }
+ i4 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ y5 := o1.Args[1]
+ if y5.Op != OpARM64MOVDnop {
+ continue
+ }
+ x5 := y5.Args[0]
+ if x5.Op != OpARM64MOVBUload {
+ continue
+ }
+ i5 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ y6 := o0.Args[1]
+ if y6.Op != OpARM64MOVDnop {
+ continue
+ }
+ x6 := y6.Args[0]
+ if x6.Op != OpARM64MOVBUload {
+ continue
+ }
+ i6 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ if p != x6.Args[0] || mem != x6.Args[1] {
+ continue
+ }
+ y7 := v_1
+ if y7.Op != OpARM64MOVDnop {
+ continue
+ }
+ x7 := y7.Args[0]
+ if x7.Op != OpARM64MOVBUload {
+ continue
+ }
+ i7 := auxIntToInt32(x7.AuxInt)
+ if auxToSym(x7.Aux) != s {
+ continue
+ }
+ _ = x7.Args[1]
+ if p != x7.Args[0] || mem != x7.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(x7.Pos, OpARM64REV, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x7.Pos, OpARM64MOVDload, t)
+ v1.Aux = symToAux(s)
+ v2 := b.NewValue0(x7.Pos, OpOffPtr, p.Type)
+ v2.AuxInt = int64ToAuxInt(int64(i0))
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [7] {s} p mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
+ continue
+ }
+ _ = o2.Args[1]
+ o3 := o2.Args[0]
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
+ continue
+ }
+ _ = o3.Args[1]
+ o4 := o3.Args[0]
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
+ continue
+ }
+ _ = o4.Args[1]
+ o5 := o4.Args[0]
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
+ continue
+ }
+ _ = o5.Args[1]
+ s0 := o5.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := o5.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
+ continue
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ continue
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, p1_0, p1_1 = _i1+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x1.Args[1] {
+ continue
+ }
+ y2 := o4.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ p := x2.Args[0]
+ if mem != x2.Args[1] {
+ continue
+ }
+ y3 := o3.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ continue
+ }
+ y4 := o2.Args[1]
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 4 || auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ y5 := o1.Args[1]
+ if y5.Op != OpARM64MOVDnop {
+ continue
+ }
+ x5 := y5.Args[0]
+ if x5.Op != OpARM64MOVBUload || auxIntToInt32(x5.AuxInt) != 5 || auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ y6 := o0.Args[1]
+ if y6.Op != OpARM64MOVDnop {
+ continue
+ }
+ x6 := y6.Args[0]
+ if x6.Op != OpARM64MOVBUload || auxIntToInt32(x6.AuxInt) != 6 || auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ if p != x6.Args[0] || mem != x6.Args[1] {
+ continue
+ }
+ y7 := v_1
+ if y7.Op != OpARM64MOVDnop {
+ continue
+ }
+ x7 := y7.Args[0]
+ if x7.Op != OpARM64MOVBUload || auxIntToInt32(x7.AuxInt) != 7 || auxToSym(x7.Aux) != s {
+ continue
+ }
+ _ = x7.Args[1]
+ if p != x7.Args[0] || mem != x7.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(x7.Pos, OpARM64REV, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x7.Pos, OpARM64MOVDloadidx, t)
+ v1.AddArg3(ptr0, idx0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr idx mem))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
+ continue
+ }
+ _ = o2.Args[1]
+ o3 := o2.Args[0]
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
+ continue
+ }
+ _ = o3.Args[1]
+ o4 := o3.Args[0]
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
+ continue
+ }
+ _ = o4.Args[1]
+ o5 := o4.Args[0]
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
+ continue
+ }
+ _ = o5.Args[1]
+ s0 := o5.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ idx := x0.Args[1]
+ y1 := o5.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ continue
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ continue
+ }
+ y2 := o4.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ continue
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ continue
+ }
+ y3 := o3.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] {
+ continue
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ continue
+ }
+ y4 := o2.Args[1]
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x4.Args[2]
+ if ptr != x4.Args[0] {
+ continue
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 4 || idx != x4_1.Args[0] || mem != x4.Args[2] {
+ continue
+ }
+ y5 := o1.Args[1]
+ if y5.Op != OpARM64MOVDnop {
+ continue
+ }
+ x5 := y5.Args[0]
+ if x5.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x5.Args[2]
+ if ptr != x5.Args[0] {
+ continue
+ }
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpARM64ADDconst || auxIntToInt64(x5_1.AuxInt) != 5 || idx != x5_1.Args[0] || mem != x5.Args[2] {
+ continue
+ }
+ y6 := o0.Args[1]
+ if y6.Op != OpARM64MOVDnop {
+ continue
+ }
+ x6 := y6.Args[0]
+ if x6.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x6.Args[2]
+ if ptr != x6.Args[0] {
+ continue
+ }
+ x6_1 := x6.Args[1]
+ if x6_1.Op != OpARM64ADDconst || auxIntToInt64(x6_1.AuxInt) != 6 || idx != x6_1.Args[0] || mem != x6.Args[2] {
+ continue
+ }
+ y7 := v_1
+ if y7.Op != OpARM64MOVDnop {
+ continue
+ }
+ x7 := y7.Args[0]
+ if x7.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x7.Args[2]
+ if ptr != x7.Args[0] {
+ continue
+ }
+ x7_1 := x7.Args[1]
+ if x7_1.Op != OpARM64ADDconst || auxIntToInt64(x7_1.AuxInt) != 7 || idx != x7_1.Args[0] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(v.Pos, OpARM64REV, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t)
+ v1.AddArg3(ptr, idx, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORN x (MOVDconst [c]))
+ // result: (ORconst [^c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORN x x)
+ // result: (MOVDconst [-1])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORN x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORNshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64ORNshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (ORN x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORNshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64ORNshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (ORN x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORNshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64ORNshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORNshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORNshiftLL x (MOVDconst [c]) [d])
+ // result: (ORconst x [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORNshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORNshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORNshiftRA x (MOVDconst [c]) [d])
+ // result: (ORconst x [^(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORNshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORNshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORNshiftRL x (MOVDconst [c]) [d])
+ // result: (ORconst x [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORNshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVDconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c|d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORconst [c1] (ANDconst [c2] x))
+ // cond: c2|c1 == ^0
+ // result: (ORconst [c1] x)
+ for {
+ c1 := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c2|c1 == ^0) {
+ break
+ }
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c1)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORshiftLL (MOVDconst [c]) x [d])
+ // result: (ORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLL x (MOVDconst [c]) [d])
+ // result: (ORconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL y:(SLLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SLLconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: ( ORshiftLL [c] (SRLconst x [64-c]) x)
+ // result: (RORconst [64-c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: ( ORshiftLL <t> [c] (UBFX [bfc] x) x)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (RORWconst [32-c] x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
+ // result: (REV16W x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: ( ORshiftLL [c] (SRLconst x [64-c]) x2)
+ // result: (EXTRconst [64-c] x2 x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ x2 := v_1
+ v.reset(OpARM64EXTRconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ // match: ( ORshiftLL <t> [c] (UBFX [bfc] x) x2)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (EXTRWconst [32-c] x2 x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ x2 := v_1
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64EXTRWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ // match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y))
+ // cond: sc == bfc.getARM64BFwidth()
+ // result: (BFXIL [bfc] y x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != sc {
+ break
+ }
+ y := v_1.Args[0]
+ if !(sc == bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64BFXIL)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, y0, y1)
+ // result: @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ y0 := v_0
+ if y0.Op != OpARM64MOVDnop {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := v_1
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ break
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t)
+ v.copyOf(v0)
+ v0.Aux = symToAux(s)
+ v1 := b.NewValue0(x1.Pos, OpOffPtr, p.Type)
+ v1.AuxInt = int64ToAuxInt(int64(i0))
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0, x1, y0, y1)
+ // result: @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr0 idx0 mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ y0 := v_0
+ if y0.Op != OpARM64MOVDnop {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := v_1
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x1.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0, x1, y0, y1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpARM64MOVHUloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr0, idx0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, y0, y1)
+ // result: @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr idx mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ y0 := v_0
+ if y0.Op != OpARM64MOVDnop {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ idx := x0.Args[1]
+ y1 := v_1
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] x0:(MOVHUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem)))
+ // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ x0 := o0.Args[0]
+ if x0.Op != OpARM64MOVHUload {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ break
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ break
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ break
+ }
+ i3 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y1, y2, o0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t)
+ v.copyOf(v0)
+ v0.Aux = symToAux(s)
+ v1 := b.NewValue0(x2.Pos, OpOffPtr, p.Type)
+ v1.AuxInt = int64ToAuxInt(int64(i0))
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [3] {s} p mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 idx0 mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ x0 := o0.Args[0]
+ if x0.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x1.Args[1] {
+ continue
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 3 || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ p := x2.Args[0]
+ if mem != x2.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, y1, y2, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr0, idx0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx ptr idx mem) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr idx mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ x0 := o0.Args[0]
+ if x0.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ idx := x0.Args[1]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ break
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 3 || idx != x2_1.Args[0] || mem != x2.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y1, y2, o0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx2 ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADDshiftLL [1] ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [3] {s} p mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0, x1, x2, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 (SLLconst <idx0.Type> [1] idx0) mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ x0 := o0.Args[0]
+ if x0.Op != OpARM64MOVHUloadidx2 {
+ break
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADDshiftLL || auxIntToInt64(p1.AuxInt) != 1 {
+ break
+ }
+ idx1 := p1.Args[1]
+ ptr1 := p1.Args[0]
+ if mem != x1.Args[1] {
+ break
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 3 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ p := x2.Args[0]
+ if mem != x2.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0, x1, x2, y1, y2, o0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x2.Pos, OpARM64SLLconst, idx0.Type)
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg(idx0)
+ v0.AddArg3(ptr0, v1, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem)))
+ // cond: i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ x0 := o2.Args[0]
+ if x0.Op != OpARM64MOVWUload {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ break
+ }
+ i4 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ break
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ break
+ }
+ i5 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ break
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ break
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload {
+ break
+ }
+ i6 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ break
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ break
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload {
+ break
+ }
+ i7 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] || !(i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(x4.Pos, OpARM64MOVDload, t)
+ v.copyOf(v0)
+ v0.Aux = symToAux(s)
+ v1 := b.NewValue0(x4.Pos, OpOffPtr, p.Type)
+ v1.AuxInt = int64ToAuxInt(int64(i0))
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [7] {s} p mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 idx0 mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ x0 := o2.Args[0]
+ if x0.Op != OpARM64MOVWUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x1.Args[1] {
+ continue
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ p := x2.Args[0]
+ if mem != x2.Args[1] {
+ continue
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 6 || auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ continue
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 7 || auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr0, idx0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx4 ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADDshiftLL [2] ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [7] {s} p mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 (SLLconst <idx0.Type> [2] idx0) mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ x0 := o2.Args[0]
+ if x0.Op != OpARM64MOVWUloadidx4 {
+ break
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADDshiftLL || auxIntToInt64(p1.AuxInt) != 2 {
+ break
+ }
+ idx1 := p1.Args[1]
+ ptr1 := p1.Args[0]
+ if mem != x1.Args[1] {
+ break
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ p := x2.Args[0]
+ if mem != x2.Args[1] {
+ break
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ break
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 6 || auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ break
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ break
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 7 || auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x4.Pos, OpARM64SLLconst, idx0.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg(idx0)
+ v0.AddArg3(ptr0, v1, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx ptr idx mem) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr idx mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ x0 := o2.Args[0]
+ if x0.Op != OpARM64MOVWUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ idx := x0.Args[1]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 4 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ break
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ break
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ break
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] {
+ break
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 6 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ break
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ break
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x4.Args[2]
+ if ptr != x4.Args[0] {
+ break
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 7 || idx != x4_1.Args[0] || mem != x4.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, y0, y1)
+ // result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ y0 := v_0
+ if y0.Op != OpARM64MOVDnop {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload {
+ break
+ }
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := v_1
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ break
+ }
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpARM64REV16W, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr0 idx0 mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0, x1, y0, y1)
+ // result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr0 idx0 mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ y0 := v_0
+ if y0.Op != OpARM64MOVDnop {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p1 := x0.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ y1 := v_1
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x1.Args[2]
+ ptr0 := x1.Args[0]
+ idx0 := x1.Args[1]
+ if mem != x1.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0, x1, y0, y1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpARM64REV16W, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpARM64MOVHUloadidx, t)
+ v1.AddArg3(ptr0, idx0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [1] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, y0, y1)
+ // result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr idx mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ y0 := v_0
+ if y0.Op != OpARM64MOVDnop {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 1 {
+ break
+ }
+ idx := x0_1.Args[0]
+ y1 := v_1
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t)
+ v1.AddArg3(ptr, idx, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [i2] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y0, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ y0 := o0.Args[0]
+ if y0.Op != OpARM64REV16W {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVHUload {
+ break
+ }
+ i2 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ break
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ break
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ break
+ }
+ i0 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] || !(i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y0, y1, y2, o0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, OpARM64REVW, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t)
+ v1.Aux = symToAux(s)
+ v2 := b.NewValue0(x2.Pos, OpOffPtr, p.Type)
+ v2.AuxInt = int64ToAuxInt(int64(i0))
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [2] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr0 idx0 mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, y0, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ y0 := o0.Args[0]
+ if y0.Op != OpARM64REV16W {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVHUload || auxIntToInt32(x0.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x1.Args[1] {
+ continue
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x2.Args[2]
+ ptr0 := x2.Args[0]
+ idx0 := x2.Args[1]
+ if mem != x2.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, y0, y1, y2, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x1.Pos, OpARM64REVW, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpARM64MOVWUloadidx, t)
+ v1.AddArg3(ptr0, idx0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUloadidx ptr (ADDconst [2] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y0, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ y0 := o0.Args[0]
+ if y0.Op != OpARM64REV16W {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 2 {
+ break
+ }
+ idx := x0_1.Args[0]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ break
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] || idx != x2.Args[1] || mem != x2.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y0, y1, y2, o0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(v.Pos, OpARM64REVW, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t)
+ v1.AddArg3(ptr, idx, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [i4] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ y0 := o2.Args[0]
+ if y0.Op != OpARM64REVW {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVWUload {
+ break
+ }
+ i4 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ break
+ }
+ i3 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ break
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ break
+ }
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ break
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ break
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload {
+ break
+ }
+ i1 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ break
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ break
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload {
+ break
+ }
+ i0 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(x4.Pos, OpARM64REV, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x4.Pos, OpARM64MOVDload, t)
+ v1.Aux = symToAux(s)
+ v2 := b.NewValue0(x4.Pos, OpOffPtr, p.Type)
+ v2.AuxInt = int64ToAuxInt(int64(i0))
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [4] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [3] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr0 idx0 mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ y0 := o2.Args[0]
+ if y0.Op != OpARM64REVW {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVWUload || auxIntToInt32(x0.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 3 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ break
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ break
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ break
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 1 || auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[1]
+ p1 := x3.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x3.Args[1] {
+ continue
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x4.Args[2]
+ ptr0 := x4.Args[0]
+ idx0 := x4.Args[1]
+ if mem != x4.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(x3.Pos, OpARM64REV, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x3.Pos, OpARM64MOVDloadidx, t)
+ v1.AddArg3(ptr0, idx0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUloadidx ptr (ADDconst [4] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr idx mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ y0 := o2.Args[0]
+ if y0.Op != OpARM64REVW {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVWUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 4 {
+ break
+ }
+ idx := x0_1.Args[0]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 3 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ break
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ break
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ break
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] {
+ break
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 1 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ break
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ break
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x4.Args[2]
+ if ptr != x4.Args[0] || idx != x4.Args[1] || mem != x4.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(v.Pos, OpARM64REV, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t)
+ v1.AddArg3(ptr, idx, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRA (MOVDconst [c]) x [d])
+ // result: (ORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRA x (MOVDconst [c]) [d])
+ // result: (ORconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRA y:(SRAconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRAconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRL (MOVDconst [c]) x [d])
+ // result: (ORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRL x (MOVDconst [c]) [d])
+ // result: (ORconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRL y:(SRLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRLconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: ( ORshiftRL [c] (SLLconst x [64-c]) x)
+ // result: (RORconst [ c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: ( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x))
+ // cond: c < 32 && t.Size() == 4
+ // result: (RORWconst [c] x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64MOVWUreg || x != v_1.Args[0] || !(c < 32 && t.Size() == 4) {
+ break
+ }
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
+ // cond: lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
+ // result: (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ ac := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(lc > rc && ac == ^((1<<uint(64-lc)-1)<<uint64(lc-rc))) {
+ break
+ }
+ v.reset(OpARM64BFI)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x))
+ // cond: lc < rc && ac == ^((1<<uint(64-rc)-1))
+ // result: (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ ac := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_1.AuxInt)
+ x := v_1.Args[0]
+ if !(lc < rc && ac == ^(1<<uint(64-rc)-1)) {
+ break
+ }
+ v.reset(OpARM64BFXIL)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64RORWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RORWconst [c] (RORWconst [d] x))
+ // result: (RORWconst [(c+d)&31] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORWconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt((c + d) & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64RORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RORconst [c] (RORconst [d] x))
+ // result: (RORconst [(c+d)&63] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt((c + d) & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SBCSflags(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> bo)))))
+ // result: (SBCSflags x y bo)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpARM64NEGSflags {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpARM64NEG || v_2_0_0.Type != typ.UInt64 {
+ break
+ }
+ v_2_0_0_0 := v_2_0_0.Args[0]
+ if v_2_0_0_0.Op != OpARM64NGCzerocarry || v_2_0_0_0.Type != typ.UInt64 {
+ break
+ }
+ bo := v_2_0_0_0.Args[0]
+ v.reset(OpARM64SBCSflags)
+ v.AddArg3(x, y, bo)
+ return true
+ }
+ // match: (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (MOVDconst [0]))))
+ // result: (SUBSflags x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpARM64NEGSflags {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64SUBSflags)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLL x (MOVDconst [c]))
+ // result: (SLLconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [d<<uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(d << uint64(c))
+ return true
+ }
+ // match: (SLLconst [c] (SRLconst [c] x))
+ // cond: 0 < c && c < 64
+ // result: (ANDconst [^(1<<uint(c)-1)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if !(0 < c && c < 64) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^(1<<uint(c) - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [sc] (ANDconst [ac] x))
+ // cond: isARM64BFMask(sc, ac, 0)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ ac := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, ac, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [sc] (MOVWUreg x))
+ // cond: isARM64BFMask(sc, 1<<32-1, 0)
+ // result: (UBFIZ [armBFAuxInt(sc, 32)] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, 1<<32-1, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [sc] (MOVHUreg x))
+ // cond: isARM64BFMask(sc, 1<<16-1, 0)
+ // result: (UBFIZ [armBFAuxInt(sc, 16)] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, 1<<16-1, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [sc] (MOVBUreg x))
+ // cond: isARM64BFMask(sc, 1<<8-1, 0)
+ // result: (UBFIZ [armBFAuxInt(sc, 8)] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, 1<<8-1, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [sc] (UBFIZ [bfc] x))
+ // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRA x (MOVDconst [c]))
+ // result: (SRAconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
+ return true
+ }
+ // match: (SRAconst [rc] (SLLconst [lc] x))
+ // cond: lc > rc
+ // result: (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc > rc) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [rc] (SLLconst [lc] x))
+ // cond: lc <= rc
+ // result: (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc <= rc) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [rc] (MOVWreg x))
+ // cond: rc < 32
+ // result: (SBFX [armBFAuxInt(rc, 32-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 32) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [rc] (MOVHreg x))
+ // cond: rc < 16
+ // result: (SBFX [armBFAuxInt(rc, 16-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 16) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [rc] (MOVBreg x))
+ // cond: rc < 8
+ // result: (SBFX [armBFAuxInt(rc, 8-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 8) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [sc] (SBFIZ [bfc] x))
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [sc] (SBFIZ [bfc] x))
+ // cond: sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRL x (MOVDconst [c]))
+ // result: (SRLconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(uint64(d)>>uint64(c))])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c)))
+ return true
+ }
+ // match: (SRLconst [c] (SLLconst [c] x))
+ // cond: 0 < c && c < 64
+ // result: (ANDconst [1<<uint(64-c)-1] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if !(0 < c && c < 64) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(1<<uint(64-c) - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [rc] (SLLconst [lc] x))
+ // cond: lc > rc
+ // result: (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc > rc) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (ANDconst [ac] x))
+ // cond: isARM64BFMask(sc, ac, sc)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ ac := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, ac, sc)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (MOVWUreg x))
+ // cond: isARM64BFMask(sc, 1<<32-1, sc)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, 1<<32-1, sc)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (MOVHUreg x))
+ // cond: isARM64BFMask(sc, 1<<16-1, sc)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, 1<<16-1, sc)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (MOVBUreg x))
+ // cond: isARM64BFMask(sc, 1<<8-1, sc)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, 1<<8-1, sc)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [rc] (SLLconst [lc] x))
+ // cond: lc < rc
+ // result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < rc) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (UBFX [bfc] x))
+ // cond: sc < bfc.getARM64BFwidth()
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (UBFIZ [bfc] x))
+ // cond: sc == bfc.getARM64BFlsb()
+ // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc == bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (UBFIZ [bfc] x))
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (UBFIZ [bfc] x))
+ // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64STP(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (STP [off1+int32(off2)] {sym} ptr val1 val2 mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val1 := v_1
+ val2 := v_2
+ mem := v_3
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg4(ptr, val1, val2, mem)
+ return true
+ }
+ // match: (STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val1 := v_1
+ val2 := v_2
+ mem := v_3
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg4(ptr, val1, val2, mem)
+ return true
+ }
+ // match: (STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem)
+ // result: (MOVQstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 || v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUB x (MOVDconst [c]))
+ // result: (SUBconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB a l:(MUL x y))
+ // cond: l.Uses==1 && clobber(l)
+ // result: (MSUB a x y)
+ for {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MUL {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ v.reset(OpARM64MSUB)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUB a l:(MNEG x y))
+ // cond: l.Uses==1 && clobber(l)
+ // result: (MADD a x y)
+ for {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MNEG {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ v.reset(OpARM64MADD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUB a l:(MULW x y))
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+ // result: (MSUBW a x y)
+ for {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MULW {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+ break
+ }
+ v.reset(OpARM64MSUBW)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUB a l:(MNEGW x y))
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+ // result: (MADDW a x y)
+ for {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MNEGW {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+ break
+ }
+ v.reset(OpARM64MADDW)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUB x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUB x (SUB y z))
+ // result: (SUB (ADD <v.Type> x z) y)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64SUB {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, v.Type)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (SUB (SUB x y) z)
+ // result: (SUB x (ADD <y.Type> y z))
+ for {
+ if v_0.Op != OpARM64SUB {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_1
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, y.Type)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SUB x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (SUBshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (SUB x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (SUBshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (SUB x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (SUBshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [d-c])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(d - c)
+ return true
+ }
+ // match: (SUBconst [c] (SUBconst [d] x))
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SUBconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (ADDconst [d] x))
+ // result: (ADDconst [-c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBshiftLL x (MOVDconst [c]) [d])
+ // result: (SUBconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBshiftRA x (MOVDconst [c]) [d])
+ // result: (SUBconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBshiftRL x (MOVDconst [c]) [d])
+ // result: (SUBconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TST(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (TST x (MOVDconst [c]))
+ // result: (TSTconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TST x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64TSTshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64TSTshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64TSTshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (TSTW x (MOVDconst [c]))
+ // result: (TSTWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TSTWconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [logicFlags32(int32(x)&y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(logicFlags32(int32(x) & y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TSTconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [logicFlags64(x&y)])
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(logicFlags64(x & y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftLL (MOVDconst [c]) x [d])
+ // result: (TSTconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftLL x (MOVDconst [c]) [d])
+ // result: (TSTconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRA (MOVDconst [c]) x [d])
+ // result: (TSTconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRA x (MOVDconst [c]) [d])
+ // result: (TSTconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRL (MOVDconst [c]) x [d])
+ // result: (TSTconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRL x (MOVDconst [c]) [d])
+ // result: (TSTconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (UBFIZ [bfc] (SLLconst [sc] x))
+ // cond: sc < bfc.getARM64BFwidth()
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (UBFX [bfc] (SRLconst [sc] x))
+ // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ // match: (UBFX [bfc] (SLLconst [sc] x))
+ // cond: sc == bfc.getARM64BFlsb()
+ // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc == bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (UBFX [bfc] (SLLconst [sc] x))
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ // match: (UBFX [bfc] (SLLconst [sc] x))
+ // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UDIV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (UDIV x (MOVDconst [1]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (UDIV x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SRLconst [log64(c)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (UDIV (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint64(c)/uint64(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UDIVW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (UDIVW x (MOVDconst [c]))
+ // cond: uint32(c)==1
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) == 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (UDIVW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c) && is32Bit(c)
+ // result: (SRLconst [log64(c)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (UDIVW (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint32(c)/uint32(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c) / uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UMOD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (UMOD <typ.UInt64> x y)
+ // result: (MSUB <typ.UInt64> x y (UDIV <typ.UInt64> x y))
+ for {
+ if v.Type != typ.UInt64 {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MSUB)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpARM64UDIV, typ.UInt64)
+ v0.AddArg2(x, y)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (UMOD _ (MOVDconst [1]))
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (UMOD x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (UMOD (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint64(c)%uint64(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UMODW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (UMODW <typ.UInt32> x y)
+ // result: (MSUBW <typ.UInt32> x y (UDIVW <typ.UInt32> x y))
+ for {
+ if v.Type != typ.UInt32 {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MSUBW)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpARM64UDIVW, typ.UInt32)
+ v0.AddArg2(x, y)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (UMODW _ (MOVDconst [c]))
+ // cond: uint32(c)==1
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (UMODW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c) && is32Bit(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (UMODW (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint32(c)%uint32(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c) % uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XOR x (MOVDconst [c]))
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XOR x (MVN y))
+ // result: (EON x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MVN {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64EON)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (XORshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64XORshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (XORshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64XORshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (XORshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64XORshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (ROR x (NEG <t> y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64ROR)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SRL <typ.UInt64> x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (ROR x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLL x (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> [cc] (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (RORW x (NEG <t> y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64RORW)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> [cc] (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64MOVWUreg {
+ continue
+ }
+ x := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [-1] x)
+ // result: (MVN x)
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpARM64MVN)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c^d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64XORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORshiftLL (MOVDconst [c]) x [d])
+ // result: (XORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLL x (MOVDconst [c]) [d])
+ // result: (XORconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XORshiftLL [c] (SRLconst x [64-c]) x)
+ // result: (RORconst [64-c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL <t> [c] (UBFX [bfc] x) x)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (RORWconst [32-c] x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
+ // result: (REV16W x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL [c] (SRLconst x [64-c]) x2)
+ // result: (EXTRconst [64-c] x2 x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ x2 := v_1
+ v.reset(OpARM64EXTRconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ // match: (XORshiftLL <t> [c] (UBFX [bfc] x) x2)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (EXTRWconst [32-c] x2 x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ x2 := v_1
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64EXTRWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRA (MOVDconst [c]) x [d])
+ // result: (XORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRA x (MOVDconst [c]) [d])
+ // result: (XORconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRL (MOVDconst [c]) x [d])
+ // result: (XORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRL x (MOVDconst [c]) [d])
+ // result: (XORconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XORshiftRL [c] (SLLconst x [64-c]) x)
+ // result: (RORconst [ c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x))
+ // cond: c < 32 && t.Size() == 4
+ // result: (RORWconst [c] x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64MOVWUreg || x != v_1.Args[0] || !(c < 32 && t.Size() == 4) {
+ break
+ }
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpARM64MOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicAnd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd32 ptr val mem)
+ // result: (Select1 (LoweredAtomicAnd32 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd32, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicAnd32Variant(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd32Variant ptr val mem)
+ // result: (Select1 (LoweredAtomicAnd32Variant ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd32Variant, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8 ptr val mem)
+ // result: (Select1 (LoweredAtomicAnd8 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicAnd8Variant(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8Variant ptr val mem)
+ // result: (Select1 (LoweredAtomicAnd8Variant ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8Variant, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicOr32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr32 ptr val mem)
+ // result: (Select1 (LoweredAtomicOr32 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr32, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicOr32Variant(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr32Variant ptr val mem)
+ // result: (Select1 (LoweredAtomicOr32Variant ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr32Variant, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8 ptr val mem)
+ // result: (Select1 (LoweredAtomicOr8 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicOr8Variant(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8Variant ptr val mem)
+ // result: (Select1 (LoweredAtomicOr8Variant ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8Variant, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, t)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpARM64SUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueARM64_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 x)
+ // result: (SUB (MOVDconst [32]) (CLZW <typ.Int> x))
+ for {
+ x := v_0
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpARM64CLZW, typ.Int)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
+ for {
+ x := v_0
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpARM64CLZ, typ.Int)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpBitRev16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitRev16 x)
+ // result: (SRLconst [48] (RBIT <typ.UInt64> x))
+ for {
+ x := v_0
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(48)
+ v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpBitRev8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitRev8 x)
+ // result: (SRLconst [56] (RBIT <typ.UInt64> x))
+ for {
+ x := v_0
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(56)
+ v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCondSelect(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CondSelect x y boolval)
+ // cond: flagArg(boolval) != nil
+ // result: (CSEL [boolval.Op] x y flagArg(boolval))
+ for {
+ x := v_0
+ y := v_1
+ boolval := v_2
+ if !(flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(boolval.Op)
+ v.AddArg3(x, y, flagArg(boolval))
+ return true
+ }
+ // match: (CondSelect x y boolval)
+ // cond: flagArg(boolval) == nil
+ // result: (CSEL [OpARM64NotEqual] x y (CMPWconst [0] boolval))
+ for {
+ x := v_0
+ y := v_1
+ boolval := v_2
+ if !(flagArg(boolval) == nil) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(boolval)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst32F(v *Value) bool {
+ // match: (Const32F [val])
+ // result: (FMOVSconst [float64(val)])
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpARM64FMOVSconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst64F(v *Value) bool {
+ // match: (Const64F [val])
+ // result: (FMOVDconst [float64(val)])
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpARM64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [b])
+ // result: (MOVDconst [b2i(b)])
+ for {
+ b := auxIntToBool(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(b))
+ return true
+ }
+}
+func rewriteValueARM64_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 <t> x)
+ // result: (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64CLZW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64RBITW, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpARM64ORconst, typ.UInt32)
+ v1.AuxInt = int64ToAuxInt(0x10000)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Ctz32 <t> x)
+ // result: (CLZW (RBITW <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64CLZW)
+ v0 := b.NewValue0(v.Pos, OpARM64RBITW, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Ctz64 <t> x)
+ // result: (CLZ (RBIT <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64CLZ)
+ v0 := b.NewValue0(v.Pos, OpARM64RBIT, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 <t> x)
+ // result: (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64CLZW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64RBITW, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpARM64ORconst, typ.UInt32)
+ v1.AuxInt = int64ToAuxInt(0x100)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 [false] x y)
+ // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64UDIVW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div32 [false] x y)
+ // result: (DIVW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64DIVW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 [false] x y)
+ // result: (DIV x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64DIV)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64UDIVW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (Equal (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (Equal (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64 x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (Equal (FCMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XOR (MOVDconst [1]) (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64XOR)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpARM64XOR, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpFMA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMA x y z)
+ // result: (FMADDD z x y)
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ v.reset(OpARM64FMADDD)
+ v.AddArg3(z, x, y)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRAconst (MULL <typ.Int64> x y) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARM64MULL, typ.Int64)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRAconst (UMULL <typ.UInt64> x y) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARM64UMULL, typ.UInt64)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (LessThanU (CMP idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil ptr)
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v_0
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(0)
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (LessEqualU (CMP idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x zero:(MOVDconst [0]))
+ // result: (Eq16 x zero)
+ for {
+ x := v_0
+ zero := v_1
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpEq16)
+ v.AddArg2(x, zero)
+ return true
+ }
+ // match: (Leq16U (MOVDconst [1]) x)
+ // result: (Neq16 (MOVDconst [0]) x)
+ for {
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq16U x y)
+ // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (LessEqual (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (LessEqualF (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualF)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x zero:(MOVDconst [0]))
+ // result: (Eq32 x zero)
+ for {
+ x := v_0
+ zero := v_1
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpEq32)
+ v.AddArg2(x, zero)
+ return true
+ }
+ // match: (Leq32U (MOVDconst [1]) x)
+ // result: (Neq32 (MOVDconst [0]) x)
+ for {
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq32U x y)
+ // result: (LessEqualU (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64 x y)
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (LessEqualF (FCMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualF)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x zero:(MOVDconst [0]))
+ // result: (Eq64 x zero)
+ for {
+ x := v_0
+ zero := v_1
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpEq64)
+ v.AddArg2(x, zero)
+ return true
+ }
+ // match: (Leq64U (MOVDconst [1]) x)
+ // result: (Neq64 (MOVDconst [0]) x)
+ for {
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq64U x y)
+ // result: (LessEqualU (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x zero:(MOVDconst [0]))
+ // result: (Eq8 x zero)
+ for {
+ x := v_0
+ zero := v_1
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpEq8)
+ v.AddArg2(x, zero)
+ return true
+ }
+ // match: (Leq8U (MOVDconst [1]) x)
+ // result: (Neq8 (MOVDconst [0]) x)
+ for {
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq8U x y)
+ // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U zero:(MOVDconst [0]) x)
+ // result: (Neq16 zero x)
+ for {
+ zero := v_0
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq16)
+ v.AddArg2(zero, x)
+ return true
+ }
+ // match: (Less16U x (MOVDconst [1]))
+ // result: (Eq16 x (MOVDconst [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less16U x y)
+ // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (LessThan (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (LessThanF (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanF)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U zero:(MOVDconst [0]) x)
+ // result: (Neq32 zero x)
+ for {
+ zero := v_0
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq32)
+ v.AddArg2(zero, x)
+ return true
+ }
+ // match: (Less32U x (MOVDconst [1]))
+ // result: (Eq32 x (MOVDconst [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less32U x y)
+ // result: (LessThanU (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64 x y)
+ // result: (LessThan (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (LessThanF (FCMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanF)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64U zero:(MOVDconst [0]) x)
+ // result: (Neq64 zero x)
+ for {
+ zero := v_0
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq64)
+ v.AddArg2(zero, x)
+ return true
+ }
+ // match: (Less64U x (MOVDconst [1]))
+ // result: (Eq64 x (MOVDconst [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less64U x y)
+ // result: (LessThanU (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U zero:(MOVDconst [0]) x)
+ // result: (Neq8 zero x)
+ for {
+ zero := v_0
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq8)
+ v.AddArg2(zero, x)
+ return true
+ }
+ // match: (Less8U x (MOVDconst [1]))
+ // result: (Eq8 x (MOVDconst [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less8U x y)
+ // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && isSigned(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && !isSigned(t))
+ // result: (MOVWUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpARM64MOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (MODW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MODW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64UMODW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod32 x y)
+ // result: (MODW x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MODW)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 x y)
+ // result: (MOD x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MOD)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (MODW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MODW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64UMODW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVWstore dst (MOVWUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVBstore [6] dst (MOVBUload [6] src mem) (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] dst src mem)
+ // result: (MOVWstore [8] dst (MOVWUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [24] dst src mem)
+ // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s%8 != 0 && s > 8
+ // result: (Move [s%8] (OffPtr <dst.Type> dst [s-s%8]) (OffPtr <src.Type> src [s-s%8]) (Move [s-s%8] dst src mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%8 != 0 && s > 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s % 8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s - s%8)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s - s%8)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(s - s%8)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 32 && s <= 16*64 && s%16 == 8 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem) (DUFFCOPY <types.TypeMem> [8*(64-(s-8)/16)] dst src mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 32 && s <= 16*64 && s%16 == 8 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(s - 8))
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(int32(s - 8))
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64DUFFCOPY, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(8 * (64 - (s-8)/16))
+ v1.AddArg3(dst, src, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [8 * (64 - s/16)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARM64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(8 * (64 - s/16))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 24 && s%8 == 0 && logLargeCopy(v, s)
+ // result: (LoweredMove dst src (ADDconst <src.Type> src [s-8]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 24 && s%8 == 0 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARM64LoweredMove)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDconst, src.Type)
+ v0.AuxInt = int64ToAuxInt(s - 8)
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (NotEqual (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (NotEqual (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64 x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (NotEqual (FCMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Not x)
+ // result: (XOR (MOVDconst [1]) x)
+ for {
+ x := v_0
+ v.reset(OpARM64XOR)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueARM64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr:(SP))
+ // cond: is32Bit(off)
+ // result: (MOVDaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP || !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpARM64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDconst [off] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(off)
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueARM64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpARM64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpARM64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpARM64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 <t> x)
+ // result: (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt16to64 x)))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64FMOVDfpgp)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpPopCount32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount32 <t> x)
+ // result: (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt32to64 x)))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64FMOVDfpgp)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpPopCount64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount64 <t> x)
+ // result: (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> x))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64FMOVDfpgp)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64)
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVDconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RotateLeft32 x y)
+ // result: (RORW x (NEG <y.Type> y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64RORW)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RotateLeft64 x y)
+ // result: (ROR x (NEG <y.Type> y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64ROR)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVDconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x y)
+ // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x y)
+ // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 x y)
+ // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 x y)
+ // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 x y)
+ // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v0.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v0.AddArg3(v1, v2, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 x y)
+ // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v0.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v0.AddArg3(v1, v2, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x64 x y)
+ // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v0.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v1 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 x y)
+ // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v0.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v0.AddArg3(v1, v2, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x y)
+ // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x y)
+ // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Add64carry x y c))
+ // result: (Select0 <typ.UInt64> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2.AuxInt = int64ToAuxInt(-1)
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Sub64borrow x y bo))
+ // result: (Select0 <typ.UInt64> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ bo := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2.AddArg(bo)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Add64carry x y c))
+ // result: (ADCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c)))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARM64ADCzerocarry)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(-1)
+ v3.AddArg(c)
+ v2.AddArg(v3)
+ v1.AddArg3(x, y, v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Sub64borrow x y bo))
+ // result: (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ bo := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARM64NEG)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpARM64NGCzerocarry, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v4.AddArg(bo)
+ v3.AddArg(v4)
+ v2.AddArg3(x, y, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAconst (NEG <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (FMOVSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVHstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVWstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [8] ptr mem)
+ // result: (MOVDstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [5] ptr mem)
+ // result: (MOVBstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] ptr mem)
+ // result: (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [7] ptr mem)
+ // result: (MOVBstore [6] ptr (MOVDconst [0]) (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [9] ptr mem)
+ // result: (MOVBstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 9 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [10] ptr mem)
+ // result: (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 10 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [11] ptr mem)
+ // result: (MOVBstore [10] ptr (MOVDconst [0]) (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 11 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(10)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [12] ptr mem)
+ // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [13] ptr mem)
+ // result: (MOVBstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 13 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(12)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [14] ptr mem)
+ // result: (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 14 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(12)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [15] ptr mem)
+ // result: (MOVBstore [14] ptr (MOVDconst [0]) (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 15 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(14)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(12)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(8)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [16] ptr mem)
+ // result: (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg4(ptr, v0, v0, mem)
+ return true
+ }
+ // match: (Zero [32] ptr mem)
+ // result: (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg4(ptr, v0, v0, mem)
+ v.AddArg4(ptr, v0, v0, v1)
+ return true
+ }
+ // match: (Zero [48] ptr mem)
+ // result: (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 48 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(16)
+ v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg4(ptr, v0, v0, mem)
+ v1.AddArg4(ptr, v0, v0, v2)
+ v.AddArg4(ptr, v0, v0, v1)
+ return true
+ }
+ // match: (Zero [64] ptr mem)
+ // result: (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 64 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(48)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(32)
+ v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(16)
+ v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg4(ptr, v0, v0, mem)
+ v2.AddArg4(ptr, v0, v0, v3)
+ v1.AddArg4(ptr, v0, v0, v2)
+ v.AddArg4(ptr, v0, v0, v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s%16 != 0 && s%16 <= 8 && s > 16
+ // result: (Zero [8] (OffPtr <ptr.Type> ptr [s-8]) (Zero [s-s%16] ptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s%16 != 0 && s%16 <= 8 && s > 16) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type)
+ v0.AuxInt = int64ToAuxInt(s - 8)
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(s - s%16)
+ v1.AddArg2(ptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s%16 != 0 && s%16 > 8 && s > 16
+ // result: (Zero [16] (OffPtr <ptr.Type> ptr [s-16]) (Zero [s-s%16] ptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s%16 != 0 && s%16 > 8 && s > 16) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type)
+ v0.AuxInt = int64ToAuxInt(s - 16)
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(s - s%16)
+ v1.AddArg2(ptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice
+ // result: (DUFFZERO [4 * (64 - s/16)] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpARM64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(4 * (64 - s/16))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice)
+ // result: (LoweredZero ptr (ADDconst <ptr.Type> [s-16] ptr) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice)) {
+ break
+ }
+ v.reset(OpARM64LoweredZero)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDconst, ptr.Type)
+ v0.AuxInt = int64ToAuxInt(s - 16)
+ v0.AddArg(ptr)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockARM64(b *Block) bool {
+ switch b.Kind {
+ case BlockARM64EQ:
+ // match: (EQ (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (EQ (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (EQ (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (EQ (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (EQ (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMP x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPW x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPW {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] x) yes no)
+ // result: (Z x yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64Z, x)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] x) yes no)
+ // result: (ZW x yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64ZW, x)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (EQ (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (EQ (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (EQ (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (EQ (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (TSTconst [c] x) yes no)
+ // cond: oneBit(c)
+ // result: (TBZ [int64(ntz64(c))] x yes no)
+ for b.Controls[0].Op == OpARM64TSTconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(c)) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ // match: (EQ (TSTWconst [c] x) yes no)
+ // cond: oneBit(int64(uint32(c)))
+ // result: (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+ for b.Controls[0].Op == OpARM64TSTWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(int64(uint32(c)))) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c)))))
+ return true
+ }
+ // match: (EQ (FlagConstant [fc]) yes no)
+ // cond: fc.eq()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.eq()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagConstant [fc]) yes no)
+ // cond: !fc.eq()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.eq()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64EQ, cmp)
+ return true
+ }
+ case BlockARM64FGE:
+ // match: (FGE (InvertFlags cmp) yes no)
+ // result: (FLE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLE, cmp)
+ return true
+ }
+ case BlockARM64FGT:
+ // match: (FGT (InvertFlags cmp) yes no)
+ // result: (FLT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLT, cmp)
+ return true
+ }
+ case BlockARM64FLE:
+ // match: (FLE (InvertFlags cmp) yes no)
+ // result: (FGE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGE, cmp)
+ return true
+ }
+ case BlockARM64FLT:
+ // match: (FLT (InvertFlags cmp) yes no)
+ // result: (FGT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGT, cmp)
+ return true
+ }
+ case BlockARM64GE:
+ // match: (GE (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GE (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GE (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GEnoov (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GEnoov (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GEnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GEnoov (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMP x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ // match: (GE (CMPW x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPW {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GEnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GEnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GEnoov (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GEnoov (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] x) yes no)
+ // result: (TBZ [31] x yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(31)
+ return true
+ }
+ // match: (GE (CMPconst [0] x) yes no)
+ // result: (TBZ [63] x yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(63)
+ return true
+ }
+ // match: (GE (FlagConstant [fc]) yes no)
+ // cond: fc.ge()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagConstant [fc]) yes no)
+ // cond: !fc.ge()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64LE, cmp)
+ return true
+ }
+ case BlockARM64GEnoov:
+ // match: (GEnoov (FlagConstant [fc]) yes no)
+ // cond: fc.geNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.geNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GEnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.geNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.geNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GEnoov (InvertFlags cmp) yes no)
+ // result: (LEnoov cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64LEnoov, cmp)
+ return true
+ }
+ case BlockARM64GT:
+ // match: (GT (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GT (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GT (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GTnoov (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GTnoov (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GTnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GTnoov (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMP x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ // match: (GT (CMPW x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPW {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GTnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GTnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GTnoov (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GTnoov (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (FlagConstant [fc]) yes no)
+ // cond: fc.gt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.gt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (FlagConstant [fc]) yes no)
+ // cond: !fc.gt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.gt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64LT, cmp)
+ return true
+ }
+ case BlockARM64GTnoov:
+ // match: (GTnoov (FlagConstant [fc]) yes no)
+ // cond: fc.gtNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.gtNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GTnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.gtNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.gtNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GTnoov (InvertFlags cmp) yes no)
+ // result: (LTnoov cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64LTnoov, cmp)
+ return true
+ }
+ case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARM64Equal {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64EQ, cc)
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARM64NotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64NE, cc)
+ return true
+ }
+ // match: (If (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64LT, cc)
+ return true
+ }
+ // match: (If (LessThanU cc) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULT, cc)
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64LE, cc)
+ return true
+ }
+ // match: (If (LessEqualU cc) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULE, cc)
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64GT, cc)
+ return true
+ }
+ // match: (If (GreaterThanU cc) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGT, cc)
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64GE, cc)
+ return true
+ }
+ // match: (If (GreaterEqualU cc) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGE, cc)
+ return true
+ }
+ // match: (If (LessThanF cc) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLT, cc)
+ return true
+ }
+ // match: (If (LessEqualF cc) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLE, cc)
+ return true
+ }
+ // match: (If (GreaterThanF cc) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGT, cc)
+ return true
+ }
+ // match: (If (GreaterEqualF cc) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGE, cc)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (NZ cond yes no)
+ for {
+ cond := b.Controls[0]
+ b.resetWithControl(BlockARM64NZ, cond)
+ return true
+ }
+ case BlockARM64LE:
+ // match: (LE (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LE (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LE (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LEnoov (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LEnoov (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LEnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LEnoov (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMP x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ // match: (LE (CMPW x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPW {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LEnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LEnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LEnoov (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LEnoov (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (FlagConstant [fc]) yes no)
+ // cond: fc.le()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.le()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagConstant [fc]) yes no)
+ // cond: !fc.le()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.le()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64GE, cmp)
+ return true
+ }
+ case BlockARM64LEnoov:
+ // match: (LEnoov (FlagConstant [fc]) yes no)
+ // cond: fc.leNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.leNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LEnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.leNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.leNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LEnoov (InvertFlags cmp) yes no)
+ // result: (GEnoov cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64GEnoov, cmp)
+ return true
+ }
+ case BlockARM64LT:
+ // match: (LT (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LT (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LT (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LTnoov (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LTnoov (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LTnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LTnoov (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMP x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ // match: (LT (CMPW x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPW {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LTnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LTnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LTnoov (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LTnoov (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] x) yes no)
+ // result: (TBNZ [31] x yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(31)
+ return true
+ }
+ // match: (LT (CMPconst [0] x) yes no)
+ // result: (TBNZ [63] x yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(63)
+ return true
+ }
+ // match: (LT (FlagConstant [fc]) yes no)
+ // cond: fc.lt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.lt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagConstant [fc]) yes no)
+ // cond: !fc.lt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.lt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64GT, cmp)
+ return true
+ }
+ case BlockARM64LTnoov:
+ // match: (LTnoov (FlagConstant [fc]) yes no)
+ // cond: fc.ltNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ltNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LTnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.ltNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ltNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LTnoov (InvertFlags cmp) yes no)
+ // result: (GTnoov cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64GTnoov, cmp)
+ return true
+ }
+ case BlockARM64NE:
+ // match: (NE (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (NE (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (NE (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (NE (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (NE (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMP x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPW x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPW {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] x) yes no)
+ // result: (NZ x yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64NZ, x)
+ return true
+ }
+ // match: (NE (CMPWconst [0] x) yes no)
+ // result: (NZW x yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64NZW, x)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (NE (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (NE (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (NE (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (NE (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (TSTconst [c] x) yes no)
+ // cond: oneBit(c)
+ // result: (TBNZ [int64(ntz64(c))] x yes no)
+ for b.Controls[0].Op == OpARM64TSTconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(c)) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ // match: (NE (TSTWconst [c] x) yes no)
+ // cond: oneBit(int64(uint32(c)))
+ // result: (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+ for b.Controls[0].Op == OpARM64TSTWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(int64(uint32(c)))) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c)))))
+ return true
+ }
+ // match: (NE (FlagConstant [fc]) yes no)
+ // cond: fc.ne()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ne()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagConstant [fc]) yes no)
+ // cond: !fc.ne()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ne()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64NE, cmp)
+ return true
+ }
+ case BlockARM64NZ:
+ // match: (NZ (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARM64Equal {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64EQ, cc)
+ return true
+ }
+ // match: (NZ (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARM64NotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64NE, cc)
+ return true
+ }
+ // match: (NZ (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64LT, cc)
+ return true
+ }
+ // match: (NZ (LessThanU cc) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULT, cc)
+ return true
+ }
+ // match: (NZ (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64LE, cc)
+ return true
+ }
+ // match: (NZ (LessEqualU cc) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULE, cc)
+ return true
+ }
+ // match: (NZ (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64GT, cc)
+ return true
+ }
+ // match: (NZ (GreaterThanU cc) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGT, cc)
+ return true
+ }
+ // match: (NZ (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64GE, cc)
+ return true
+ }
+ // match: (NZ (GreaterEqualU cc) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGE, cc)
+ return true
+ }
+ // match: (NZ (LessThanF cc) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLT, cc)
+ return true
+ }
+ // match: (NZ (LessEqualF cc) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLE, cc)
+ return true
+ }
+ // match: (NZ (GreaterThanF cc) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGT, cc)
+ return true
+ }
+ // match: (NZ (GreaterEqualF cc) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGE, cc)
+ return true
+ }
+ // match: (NZ (ANDconst [c] x) yes no)
+ // cond: oneBit(c)
+ // result: (TBNZ [int64(ntz64(c))] x yes no)
+ for b.Controls[0].Op == OpARM64ANDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(c)) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ // match: (NZ (MOVDconst [0]) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NZ (MOVDconst [c]) yes no)
+ // cond: c != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockARM64NZW:
+ // match: (NZW (ANDconst [c] x) yes no)
+ // cond: oneBit(int64(uint32(c)))
+ // result: (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+ for b.Controls[0].Op == OpARM64ANDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(int64(uint32(c)))) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c)))))
+ return true
+ }
+ // match: (NZW (MOVDconst [c]) yes no)
+ // cond: int32(c) == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NZW (MOVDconst [c]) yes no)
+ // cond: int32(c) != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(int32(c) != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockARM64UGE:
+ // match: (UGE (FlagConstant [fc]) yes no)
+ // cond: fc.uge()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.uge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagConstant [fc]) yes no)
+ // cond: !fc.uge()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.uge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (InvertFlags cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULE, cmp)
+ return true
+ }
+ case BlockARM64UGT:
+ // match: (UGT (FlagConstant [fc]) yes no)
+ // cond: fc.ugt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ugt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGT (FlagConstant [fc]) yes no)
+ // cond: !fc.ugt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ugt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (InvertFlags cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULT, cmp)
+ return true
+ }
+ case BlockARM64ULE:
+ // match: (ULE (FlagConstant [fc]) yes no)
+ // cond: fc.ule()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ule()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagConstant [fc]) yes no)
+ // cond: !fc.ule()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ule()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULE (InvertFlags cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGE, cmp)
+ return true
+ }
+ case BlockARM64ULT:
+ // match: (ULT (FlagConstant [fc]) yes no)
+ // cond: fc.ult()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ult()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagConstant [fc]) yes no)
+ // cond: !fc.ult()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ult()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (InvertFlags cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGT, cmp)
+ return true
+ }
+ case BlockARM64Z:
+ // match: (Z (ANDconst [c] x) yes no)
+ // cond: oneBit(c)
+ // result: (TBZ [int64(ntz64(c))] x yes no)
+ for b.Controls[0].Op == OpARM64ANDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(c)) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ // match: (Z (MOVDconst [0]) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (Z (MOVDconst [c]) yes no)
+ // cond: c != 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockARM64ZW:
+ // match: (ZW (ANDconst [c] x) yes no)
+ // cond: oneBit(int64(uint32(c)))
+ // result: (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+ for b.Controls[0].Op == OpARM64ANDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(int64(uint32(c)))) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c)))))
+ return true
+ }
+ // match: (ZW (MOVDconst [c]) yes no)
+ // cond: int32(c) == 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ZW (MOVDconst [c]) yes no)
+ // cond: int32(c) != 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(int32(c) != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteCond_test.go b/src/cmd/compile/internal/ssa/rewriteCond_test.go
new file mode 100644
index 0000000..2c26fdf
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteCond_test.go
@@ -0,0 +1,597 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "math"
+ "math/rand"
+ "testing"
+)
+
+var (
+ x64 int64 = math.MaxInt64 - 2
+ x64b int64 = math.MaxInt64 - 2
+ x64c int64 = math.MaxInt64 - 2
+ y64 int64 = math.MinInt64 + 1
+ x32 int32 = math.MaxInt32 - 2
+ x32b int32 = math.MaxInt32 - 2
+ x32c int32 = math.MaxInt32 - 2
+ y32 int32 = math.MinInt32 + 1
+ one64 int64 = 1
+ one32 int32 = 1
+ v64 int64 = 11 // ensure it's not 2**n +/- 1
+ v64_n int64 = -11
+ v32 int32 = 11
+ v32_n int32 = -11
+ uv32 uint32 = 19
+ uz uint8 = 1 // for lowering to SLL/SRL/SRA
+)
+
+var crTests = []struct {
+ name string
+ tf func(t *testing.T)
+}{
+ {"AddConst64", testAddConst64},
+ {"AddConst32", testAddConst32},
+ {"AddVar64", testAddVar64},
+ {"AddVar32", testAddVar32},
+ {"MAddVar64", testMAddVar64},
+ {"MAddVar32", testMAddVar32},
+ {"MSubVar64", testMSubVar64},
+ {"MSubVar32", testMSubVar32},
+ {"AddShift32", testAddShift32},
+ {"SubShift32", testSubShift32},
+}
+
+var crBenches = []struct {
+ name string
+ bf func(b *testing.B)
+}{
+ {"SoloJump", benchSoloJump},
+ {"CombJump", benchCombJump},
+}
+
+// Test int32/int64's add/sub/madd/msub operations with boundary values to
+// ensure the optimization to 'comparing to zero' expressions of if-statements
+// yield expected results.
+// 32 rewriting rules are covered. At least two scenarios for "Canonicalize
+// the order of arguments to comparisons", which helps with CSE, are covered.
+// The tedious if-else structures are necessary to ensure all concerned rules
+// and machine code sequences are covered.
+// It's for arm64 initially, please see https://github.com/golang/go/issues/38740
+func TestCondRewrite(t *testing.T) {
+ for _, test := range crTests {
+ t.Run(test.name, test.tf)
+ }
+}
+
+// Profile the aforementioned optimization from two angles:
+// SoloJump: generated branching code has one 'jump', for '<' and '>='
+// CombJump: generated branching code has two consecutive 'jump', for '<=' and '>'
+// We expect that 'CombJump' is generally on par with the non-optimized code, and
+// 'SoloJump' demonstrates some improvement.
+// It's for arm64 initially, please see https://github.com/golang/go/issues/38740
+func BenchmarkCondRewrite(b *testing.B) {
+ for _, bench := range crBenches {
+ b.Run(bench.name, bench.bf)
+ }
+}
+
+// var +/- const
+func testAddConst64(t *testing.T) {
+ if x64+11 < 0 {
+ } else {
+ t.Errorf("'%#x + 11 < 0' failed", x64)
+ }
+
+ if x64+13 <= 0 {
+ } else {
+ t.Errorf("'%#x + 13 <= 0' failed", x64)
+ }
+
+ if y64-11 > 0 {
+ } else {
+ t.Errorf("'%#x - 11 > 0' failed", y64)
+ }
+
+ if y64-13 >= 0 {
+ } else {
+ t.Errorf("'%#x - 13 >= 0' failed", y64)
+ }
+
+ if x64+19 > 0 {
+ t.Errorf("'%#x + 19 > 0' failed", x64)
+ }
+
+ if x64+23 >= 0 {
+ t.Errorf("'%#x + 23 >= 0' failed", x64)
+ }
+
+ if y64-19 < 0 {
+ t.Errorf("'%#x - 19 < 0' failed", y64)
+ }
+
+ if y64-23 <= 0 {
+ t.Errorf("'%#x - 23 <= 0' failed", y64)
+ }
+}
+
+// 32-bit var +/- const
+func testAddConst32(t *testing.T) {
+ if x32+11 < 0 {
+ } else {
+ t.Errorf("'%#x + 11 < 0' failed", x32)
+ }
+
+ if x32+13 <= 0 {
+ } else {
+ t.Errorf("'%#x + 13 <= 0' failed", x32)
+ }
+
+ if y32-11 > 0 {
+ } else {
+ t.Errorf("'%#x - 11 > 0' failed", y32)
+ }
+
+ if y32-13 >= 0 {
+ } else {
+ t.Errorf("'%#x - 13 >= 0' failed", y32)
+ }
+
+ if x32+19 > 0 {
+ t.Errorf("'%#x + 19 > 0' failed", x32)
+ }
+
+ if x32+23 >= 0 {
+ t.Errorf("'%#x + 23 >= 0' failed", x32)
+ }
+
+ if y32-19 < 0 {
+ t.Errorf("'%#x - 19 < 0' failed", y32)
+ }
+
+ if y32-23 <= 0 {
+ t.Errorf("'%#x - 23 <= 0' failed", y32)
+ }
+}
+
+// var + var
+func testAddVar64(t *testing.T) {
+ if x64+v64 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x < 0' failed", x64, v64)
+ }
+
+ if x64+v64 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x <= 0' failed", x64, v64)
+ }
+
+ if y64+v64_n > 0 {
+ } else {
+ t.Errorf("'%#x + %#x > 0' failed", y64, v64_n)
+ }
+
+ if y64+v64_n >= 0 {
+ } else {
+ t.Errorf("'%#x + %#x >= 0' failed", y64, v64_n)
+ }
+
+ if x64+v64 > 0 {
+ t.Errorf("'%#x + %#x > 0' failed", x64, v64)
+ }
+
+ if x64+v64 >= 0 {
+ t.Errorf("'%#x + %#x >= 0' failed", x64, v64)
+ }
+
+ if y64+v64_n < 0 {
+ t.Errorf("'%#x + %#x < 0' failed", y64, v64_n)
+ }
+
+ if y64+v64_n <= 0 {
+ t.Errorf("'%#x + %#x <= 0' failed", y64, v64_n)
+ }
+}
+
+// 32-bit var+var
+func testAddVar32(t *testing.T) {
+ if x32+v32 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x < 0' failed", x32, v32)
+ }
+
+ if x32+v32 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x <= 0' failed", x32, v32)
+ }
+
+ if y32+v32_n > 0 {
+ } else {
+ t.Errorf("'%#x + %#x > 0' failed", y32, v32_n)
+ }
+
+ if y32+v32_n >= 0 {
+ } else {
+ t.Errorf("'%#x + %#x >= 0' failed", y32, v32_n)
+ }
+
+ if x32+v32 > 0 {
+ t.Errorf("'%#x + %#x > 0' failed", x32, v32)
+ }
+
+ if x32+v32 >= 0 {
+ t.Errorf("'%#x + %#x >= 0' failed", x32, v32)
+ }
+
+ if y32+v32_n < 0 {
+ t.Errorf("'%#x + %#x < 0' failed", y32, v32_n)
+ }
+
+ if y32+v32_n <= 0 {
+ t.Errorf("'%#x + %#x <= 0' failed", y32, v32_n)
+ }
+}
+
+// multiply-add
+func testMAddVar64(t *testing.T) {
+ if x64+v64*one64 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 < 0' failed", x64, v64)
+ }
+
+ if x64+v64*one64 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 <= 0' failed", x64, v64)
+ }
+
+ if y64+v64_n*one64 > 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 > 0' failed", y64, v64_n)
+ }
+
+ if y64+v64_n*one64 >= 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 >= 0' failed", y64, v64_n)
+ }
+
+ if x64+v64*one64 > 0 {
+ t.Errorf("'%#x + %#x*1 > 0' failed", x64, v64)
+ }
+
+ if x64+v64*one64 >= 0 {
+ t.Errorf("'%#x + %#x*1 >= 0' failed", x64, v64)
+ }
+
+ if y64+v64_n*one64 < 0 {
+ t.Errorf("'%#x + %#x*1 < 0' failed", y64, v64_n)
+ }
+
+ if y64+v64_n*one64 <= 0 {
+ t.Errorf("'%#x + %#x*1 <= 0' failed", y64, v64_n)
+ }
+}
+
+// 32-bit multiply-add
+func testMAddVar32(t *testing.T) {
+ if x32+v32*one32 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 < 0' failed", x32, v32)
+ }
+
+ if x32+v32*one32 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 <= 0' failed", x32, v32)
+ }
+
+ if y32+v32_n*one32 > 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 > 0' failed", y32, v32_n)
+ }
+
+ if y32+v32_n*one32 >= 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 >= 0' failed", y32, v32_n)
+ }
+
+ if x32+v32*one32 > 0 {
+ t.Errorf("'%#x + %#x*1 > 0' failed", x32, v32)
+ }
+
+ if x32+v32*one32 >= 0 {
+ t.Errorf("'%#x + %#x*1 >= 0' failed", x32, v32)
+ }
+
+ if y32+v32_n*one32 < 0 {
+ t.Errorf("'%#x + %#x*1 < 0' failed", y32, v32_n)
+ }
+
+ if y32+v32_n*one32 <= 0 {
+ t.Errorf("'%#x + %#x*1 <= 0' failed", y32, v32_n)
+ }
+}
+
+// multiply-sub
+func testMSubVar64(t *testing.T) {
+ if x64-v64_n*one64 < 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 < 0' failed", x64, v64_n)
+ }
+
+ if x64-v64_n*one64 <= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 <= 0' failed", x64, v64_n)
+ }
+
+ if y64-v64*one64 > 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 > 0' failed", y64, v64)
+ }
+
+ if y64-v64*one64 >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", y64, v64)
+ }
+
+ if x64-v64_n*one64 > 0 {
+ t.Errorf("'%#x - %#x*1 > 0' failed", x64, v64_n)
+ }
+
+ if x64-v64_n*one64 >= 0 {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", x64, v64_n)
+ }
+
+ if y64-v64*one64 < 0 {
+ t.Errorf("'%#x - %#x*1 < 0' failed", y64, v64)
+ }
+
+ if y64-v64*one64 <= 0 {
+ t.Errorf("'%#x - %#x*1 <= 0' failed", y64, v64)
+ }
+
+ if x64-x64b*one64 < 0 {
+ t.Errorf("'%#x - %#x*1 < 0' failed", x64, x64b)
+ }
+
+ if x64-x64b*one64 >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", x64, x64b)
+ }
+}
+
+// 32-bit multiply-sub
+func testMSubVar32(t *testing.T) {
+ if x32-v32_n*one32 < 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 < 0' failed", x32, v32_n)
+ }
+
+ if x32-v32_n*one32 <= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 <= 0' failed", x32, v32_n)
+ }
+
+ if y32-v32*one32 > 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 > 0' failed", y32, v32)
+ }
+
+ if y32-v32*one32 >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", y32, v32)
+ }
+
+ if x32-v32_n*one32 > 0 {
+ t.Errorf("'%#x - %#x*1 > 0' failed", x32, v32_n)
+ }
+
+ if x32-v32_n*one32 >= 0 {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", x32, v32_n)
+ }
+
+ if y32-v32*one32 < 0 {
+ t.Errorf("'%#x - %#x*1 < 0' failed", y32, v32)
+ }
+
+ if y32-v32*one32 <= 0 {
+ t.Errorf("'%#x - %#x*1 <= 0' failed", y32, v32)
+ }
+
+ if x32-x32b*one32 < 0 {
+ t.Errorf("'%#x - %#x*1 < 0' failed", x32, x32b)
+ }
+
+ if x32-x32b*one32 >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", x32, x32b)
+ }
+}
+
+// 32-bit ADDshift, pick up 1~2 scenarios randomly for each condition
+func testAddShift32(t *testing.T) {
+ if x32+v32<<1 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x<<%#x < 0' failed", x32, v32, 1)
+ }
+
+ if x32+v32>>1 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x>>%#x <= 0' failed", x32, v32, 1)
+ }
+
+ if x32+int32(uv32>>1) > 0 {
+ t.Errorf("'%#x + int32(%#x>>%#x) > 0' failed", x32, uv32, 1)
+ }
+
+ if x32+v32<<uz >= 0 {
+ t.Errorf("'%#x + %#x<<%#x >= 0' failed", x32, v32, uz)
+ }
+
+ if x32+v32>>uz > 0 {
+ t.Errorf("'%#x + %#x>>%#x > 0' failed", x32, v32, uz)
+ }
+
+ if x32+int32(uv32>>uz) < 0 {
+ } else {
+ t.Errorf("'%#x + int32(%#x>>%#x) < 0' failed", x32, uv32, uz)
+ }
+}
+
+// 32-bit SUBshift, pick up 1~2 scenarios randomly for each condition
+func testSubShift32(t *testing.T) {
+ if y32-v32<<1 > 0 {
+ } else {
+ t.Errorf("'%#x - %#x<<%#x > 0' failed", y32, v32, 1)
+ }
+
+ if y32-v32>>1 < 0 {
+ t.Errorf("'%#x - %#x>>%#x < 0' failed", y32, v32, 1)
+ }
+
+ if y32-int32(uv32>>1) >= 0 {
+ } else {
+ t.Errorf("'%#x - int32(%#x>>%#x) >= 0' failed", y32, uv32, 1)
+ }
+
+ if y32-v32<<uz < 0 {
+ t.Errorf("'%#x - %#x<<%#x < 0' failed", y32, v32, uz)
+ }
+
+ if y32-v32>>uz >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x>>%#x >= 0' failed", y32, v32, uz)
+ }
+
+ if y32-int32(uv32>>uz) <= 0 {
+ t.Errorf("'%#x - int32(%#x>>%#x) <= 0' failed", y32, uv32, uz)
+ }
+}
+
+var rnd = rand.New(rand.NewSource(0))
+var sink int64
+
+func benchSoloJump(b *testing.B) {
+ r1 := x64
+ r2 := x64b
+ r3 := x64c
+ r4 := y64
+ d := rnd.Int63n(10)
+
+ // 6 out 10 conditions evaluate to true
+ for i := 0; i < b.N; i++ {
+ if r1+r2 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+r3 >= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+r2*one64 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r2+r3*one64 >= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1-r2*v64 >= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r3-r4*v64 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+11 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+13 >= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r4-17 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r4-19 >= 0 {
+ d *= 2
+ d /= 2
+ }
+ }
+ sink = d
+}
+
+func benchCombJump(b *testing.B) {
+ r1 := x64
+ r2 := x64b
+ r3 := x64c
+ r4 := y64
+ d := rnd.Int63n(10)
+
+ // 6 out 10 conditions evaluate to true
+ for i := 0; i < b.N; i++ {
+ if r1+r2 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+r3 > 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+r2*one64 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r2+r3*one64 > 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1-r2*v64 > 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r3-r4*v64 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+11 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+13 > 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r4-17 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r4-19 > 0 {
+ d *= 2
+ d /= 2
+ }
+ }
+ sink = d
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go
new file mode 100644
index 0000000..3fc5527
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go
@@ -0,0 +1,7535 @@
+// Code generated from gen/MIPS.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValueMIPS(v *Value) bool {
+ switch v.Op {
+ case OpAdd16:
+ v.Op = OpMIPSADD
+ return true
+ case OpAdd32:
+ v.Op = OpMIPSADD
+ return true
+ case OpAdd32F:
+ v.Op = OpMIPSADDF
+ return true
+ case OpAdd32withcarry:
+ return rewriteValueMIPS_OpAdd32withcarry(v)
+ case OpAdd64F:
+ v.Op = OpMIPSADDD
+ return true
+ case OpAdd8:
+ v.Op = OpMIPSADD
+ return true
+ case OpAddPtr:
+ v.Op = OpMIPSADD
+ return true
+ case OpAddr:
+ return rewriteValueMIPS_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpMIPSAND
+ return true
+ case OpAnd32:
+ v.Op = OpMIPSAND
+ return true
+ case OpAnd8:
+ v.Op = OpMIPSAND
+ return true
+ case OpAndB:
+ v.Op = OpMIPSAND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpMIPSLoweredAtomicAdd
+ return true
+ case OpAtomicAnd32:
+ v.Op = OpMIPSLoweredAtomicAnd
+ return true
+ case OpAtomicAnd8:
+ return rewriteValueMIPS_OpAtomicAnd8(v)
+ case OpAtomicCompareAndSwap32:
+ v.Op = OpMIPSLoweredAtomicCas
+ return true
+ case OpAtomicExchange32:
+ v.Op = OpMIPSLoweredAtomicExchange
+ return true
+ case OpAtomicLoad32:
+ v.Op = OpMIPSLoweredAtomicLoad32
+ return true
+ case OpAtomicLoad8:
+ v.Op = OpMIPSLoweredAtomicLoad8
+ return true
+ case OpAtomicLoadPtr:
+ v.Op = OpMIPSLoweredAtomicLoad32
+ return true
+ case OpAtomicOr32:
+ v.Op = OpMIPSLoweredAtomicOr
+ return true
+ case OpAtomicOr8:
+ return rewriteValueMIPS_OpAtomicOr8(v)
+ case OpAtomicStore32:
+ v.Op = OpMIPSLoweredAtomicStore32
+ return true
+ case OpAtomicStore8:
+ v.Op = OpMIPSLoweredAtomicStore8
+ return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpMIPSLoweredAtomicStore32
+ return true
+ case OpAvg32u:
+ return rewriteValueMIPS_OpAvg32u(v)
+ case OpBitLen32:
+ return rewriteValueMIPS_OpBitLen32(v)
+ case OpClosureCall:
+ v.Op = OpMIPSCALLclosure
+ return true
+ case OpCom16:
+ return rewriteValueMIPS_OpCom16(v)
+ case OpCom32:
+ return rewriteValueMIPS_OpCom32(v)
+ case OpCom8:
+ return rewriteValueMIPS_OpCom8(v)
+ case OpConst16:
+ return rewriteValueMIPS_OpConst16(v)
+ case OpConst32:
+ return rewriteValueMIPS_OpConst32(v)
+ case OpConst32F:
+ v.Op = OpMIPSMOVFconst
+ return true
+ case OpConst64F:
+ v.Op = OpMIPSMOVDconst
+ return true
+ case OpConst8:
+ return rewriteValueMIPS_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueMIPS_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueMIPS_OpConstNil(v)
+ case OpCtz32:
+ return rewriteValueMIPS_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpMIPSTRUNCFW
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpMIPSMOVFD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpMIPSMOVWF
+ return true
+ case OpCvt32to64F:
+ v.Op = OpMIPSMOVWD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpMIPSTRUNCDW
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpMIPSMOVDF
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueMIPS_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueMIPS_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueMIPS_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpMIPSDIVF
+ return true
+ case OpDiv32u:
+ return rewriteValueMIPS_OpDiv32u(v)
+ case OpDiv64F:
+ v.Op = OpMIPSDIVD
+ return true
+ case OpDiv8:
+ return rewriteValueMIPS_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueMIPS_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueMIPS_OpEq16(v)
+ case OpEq32:
+ return rewriteValueMIPS_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueMIPS_OpEq32F(v)
+ case OpEq64F:
+ return rewriteValueMIPS_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueMIPS_OpEq8(v)
+ case OpEqB:
+ return rewriteValueMIPS_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueMIPS_OpEqPtr(v)
+ case OpGetCallerPC:
+ v.Op = OpMIPSLoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpMIPSLoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpMIPSLoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ return rewriteValueMIPS_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueMIPS_OpHmul32u(v)
+ case OpInterCall:
+ v.Op = OpMIPSCALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueMIPS_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueMIPS_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueMIPS_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueMIPS_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueMIPS_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueMIPS_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueMIPS_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueMIPS_OpLeq32U(v)
+ case OpLeq64F:
+ return rewriteValueMIPS_OpLeq64F(v)
+ case OpLeq8:
+ return rewriteValueMIPS_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueMIPS_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueMIPS_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueMIPS_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueMIPS_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueMIPS_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueMIPS_OpLess32U(v)
+ case OpLess64F:
+ return rewriteValueMIPS_OpLess64F(v)
+ case OpLess8:
+ return rewriteValueMIPS_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueMIPS_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueMIPS_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueMIPS_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueMIPS_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueMIPS_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueMIPS_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueMIPS_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueMIPS_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueMIPS_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueMIPS_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueMIPS_OpLsh32x8(v)
+ case OpLsh8x16:
+ return rewriteValueMIPS_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueMIPS_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueMIPS_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueMIPS_OpLsh8x8(v)
+ case OpMIPSADD:
+ return rewriteValueMIPS_OpMIPSADD(v)
+ case OpMIPSADDconst:
+ return rewriteValueMIPS_OpMIPSADDconst(v)
+ case OpMIPSAND:
+ return rewriteValueMIPS_OpMIPSAND(v)
+ case OpMIPSANDconst:
+ return rewriteValueMIPS_OpMIPSANDconst(v)
+ case OpMIPSCMOVZ:
+ return rewriteValueMIPS_OpMIPSCMOVZ(v)
+ case OpMIPSCMOVZzero:
+ return rewriteValueMIPS_OpMIPSCMOVZzero(v)
+ case OpMIPSLoweredAtomicAdd:
+ return rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v)
+ case OpMIPSLoweredAtomicStore32:
+ return rewriteValueMIPS_OpMIPSLoweredAtomicStore32(v)
+ case OpMIPSMOVBUload:
+ return rewriteValueMIPS_OpMIPSMOVBUload(v)
+ case OpMIPSMOVBUreg:
+ return rewriteValueMIPS_OpMIPSMOVBUreg(v)
+ case OpMIPSMOVBload:
+ return rewriteValueMIPS_OpMIPSMOVBload(v)
+ case OpMIPSMOVBreg:
+ return rewriteValueMIPS_OpMIPSMOVBreg(v)
+ case OpMIPSMOVBstore:
+ return rewriteValueMIPS_OpMIPSMOVBstore(v)
+ case OpMIPSMOVBstorezero:
+ return rewriteValueMIPS_OpMIPSMOVBstorezero(v)
+ case OpMIPSMOVDload:
+ return rewriteValueMIPS_OpMIPSMOVDload(v)
+ case OpMIPSMOVDstore:
+ return rewriteValueMIPS_OpMIPSMOVDstore(v)
+ case OpMIPSMOVFload:
+ return rewriteValueMIPS_OpMIPSMOVFload(v)
+ case OpMIPSMOVFstore:
+ return rewriteValueMIPS_OpMIPSMOVFstore(v)
+ case OpMIPSMOVHUload:
+ return rewriteValueMIPS_OpMIPSMOVHUload(v)
+ case OpMIPSMOVHUreg:
+ return rewriteValueMIPS_OpMIPSMOVHUreg(v)
+ case OpMIPSMOVHload:
+ return rewriteValueMIPS_OpMIPSMOVHload(v)
+ case OpMIPSMOVHreg:
+ return rewriteValueMIPS_OpMIPSMOVHreg(v)
+ case OpMIPSMOVHstore:
+ return rewriteValueMIPS_OpMIPSMOVHstore(v)
+ case OpMIPSMOVHstorezero:
+ return rewriteValueMIPS_OpMIPSMOVHstorezero(v)
+ case OpMIPSMOVWload:
+ return rewriteValueMIPS_OpMIPSMOVWload(v)
+ case OpMIPSMOVWreg:
+ return rewriteValueMIPS_OpMIPSMOVWreg(v)
+ case OpMIPSMOVWstore:
+ return rewriteValueMIPS_OpMIPSMOVWstore(v)
+ case OpMIPSMOVWstorezero:
+ return rewriteValueMIPS_OpMIPSMOVWstorezero(v)
+ case OpMIPSMUL:
+ return rewriteValueMIPS_OpMIPSMUL(v)
+ case OpMIPSNEG:
+ return rewriteValueMIPS_OpMIPSNEG(v)
+ case OpMIPSNOR:
+ return rewriteValueMIPS_OpMIPSNOR(v)
+ case OpMIPSNORconst:
+ return rewriteValueMIPS_OpMIPSNORconst(v)
+ case OpMIPSOR:
+ return rewriteValueMIPS_OpMIPSOR(v)
+ case OpMIPSORconst:
+ return rewriteValueMIPS_OpMIPSORconst(v)
+ case OpMIPSSGT:
+ return rewriteValueMIPS_OpMIPSSGT(v)
+ case OpMIPSSGTU:
+ return rewriteValueMIPS_OpMIPSSGTU(v)
+ case OpMIPSSGTUconst:
+ return rewriteValueMIPS_OpMIPSSGTUconst(v)
+ case OpMIPSSGTUzero:
+ return rewriteValueMIPS_OpMIPSSGTUzero(v)
+ case OpMIPSSGTconst:
+ return rewriteValueMIPS_OpMIPSSGTconst(v)
+ case OpMIPSSGTzero:
+ return rewriteValueMIPS_OpMIPSSGTzero(v)
+ case OpMIPSSLL:
+ return rewriteValueMIPS_OpMIPSSLL(v)
+ case OpMIPSSLLconst:
+ return rewriteValueMIPS_OpMIPSSLLconst(v)
+ case OpMIPSSRA:
+ return rewriteValueMIPS_OpMIPSSRA(v)
+ case OpMIPSSRAconst:
+ return rewriteValueMIPS_OpMIPSSRAconst(v)
+ case OpMIPSSRL:
+ return rewriteValueMIPS_OpMIPSSRL(v)
+ case OpMIPSSRLconst:
+ return rewriteValueMIPS_OpMIPSSRLconst(v)
+ case OpMIPSSUB:
+ return rewriteValueMIPS_OpMIPSSUB(v)
+ case OpMIPSSUBconst:
+ return rewriteValueMIPS_OpMIPSSUBconst(v)
+ case OpMIPSXOR:
+ return rewriteValueMIPS_OpMIPSXOR(v)
+ case OpMIPSXORconst:
+ return rewriteValueMIPS_OpMIPSXORconst(v)
+ case OpMod16:
+ return rewriteValueMIPS_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueMIPS_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueMIPS_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueMIPS_OpMod32u(v)
+ case OpMod8:
+ return rewriteValueMIPS_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueMIPS_OpMod8u(v)
+ case OpMove:
+ return rewriteValueMIPS_OpMove(v)
+ case OpMul16:
+ v.Op = OpMIPSMUL
+ return true
+ case OpMul32:
+ v.Op = OpMIPSMUL
+ return true
+ case OpMul32F:
+ v.Op = OpMIPSMULF
+ return true
+ case OpMul32uhilo:
+ v.Op = OpMIPSMULTU
+ return true
+ case OpMul64F:
+ v.Op = OpMIPSMULD
+ return true
+ case OpMul8:
+ v.Op = OpMIPSMUL
+ return true
+ case OpNeg16:
+ v.Op = OpMIPSNEG
+ return true
+ case OpNeg32:
+ v.Op = OpMIPSNEG
+ return true
+ case OpNeg32F:
+ v.Op = OpMIPSNEGF
+ return true
+ case OpNeg64F:
+ v.Op = OpMIPSNEGD
+ return true
+ case OpNeg8:
+ v.Op = OpMIPSNEG
+ return true
+ case OpNeq16:
+ return rewriteValueMIPS_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueMIPS_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueMIPS_OpNeq32F(v)
+ case OpNeq64F:
+ return rewriteValueMIPS_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueMIPS_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpMIPSXOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueMIPS_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpMIPSLoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueMIPS_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueMIPS_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpMIPSOR
+ return true
+ case OpOr32:
+ v.Op = OpMIPSOR
+ return true
+ case OpOr8:
+ v.Op = OpMIPSOR
+ return true
+ case OpOrB:
+ v.Op = OpMIPSOR
+ return true
+ case OpPanicBounds:
+ return rewriteValueMIPS_OpPanicBounds(v)
+ case OpPanicExtend:
+ return rewriteValueMIPS_OpPanicExtend(v)
+ case OpRotateLeft16:
+ return rewriteValueMIPS_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueMIPS_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValueMIPS_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValueMIPS_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueMIPS_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueMIPS_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueMIPS_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueMIPS_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueMIPS_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueMIPS_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueMIPS_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueMIPS_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueMIPS_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueMIPS_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueMIPS_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueMIPS_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueMIPS_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueMIPS_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueMIPS_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueMIPS_OpRsh32x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueMIPS_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueMIPS_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueMIPS_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueMIPS_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueMIPS_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueMIPS_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueMIPS_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueMIPS_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueMIPS_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueMIPS_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpMIPSMOVHreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpMIPSMOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpMIPSMOVBreg
+ return true
+ case OpSignmask:
+ return rewriteValueMIPS_OpSignmask(v)
+ case OpSlicemask:
+ return rewriteValueMIPS_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpMIPSSQRTD
+ return true
+ case OpStaticCall:
+ v.Op = OpMIPSCALLstatic
+ return true
+ case OpStore:
+ return rewriteValueMIPS_OpStore(v)
+ case OpSub16:
+ v.Op = OpMIPSSUB
+ return true
+ case OpSub32:
+ v.Op = OpMIPSSUB
+ return true
+ case OpSub32F:
+ v.Op = OpMIPSSUBF
+ return true
+ case OpSub32withcarry:
+ return rewriteValueMIPS_OpSub32withcarry(v)
+ case OpSub64F:
+ v.Op = OpMIPSSUBD
+ return true
+ case OpSub8:
+ v.Op = OpMIPSSUB
+ return true
+ case OpSubPtr:
+ v.Op = OpMIPSSUB
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpMIPSLoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpMIPSXOR
+ return true
+ case OpXor32:
+ v.Op = OpMIPSXOR
+ return true
+ case OpXor8:
+ v.Op = OpMIPSXOR
+ return true
+ case OpZero:
+ return rewriteValueMIPS_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpMIPSMOVHUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpMIPSMOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpMIPSMOVBUreg
+ return true
+ case OpZeromask:
+ return rewriteValueMIPS_OpZeromask(v)
+ }
+ return false
+}
+func rewriteValueMIPS_OpAdd32withcarry(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Add32withcarry <t> x y c)
+ // result: (ADD c (ADD <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ c := v_2
+ v.reset(OpMIPSADD)
+ v0 := b.NewValue0(v.Pos, OpMIPSADD, t)
+ v0.AddArg2(x, y)
+ v.AddArg2(c, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpMIPSMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8 ptr val mem)
+ // cond: !config.BigEndian
+ // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(!config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicAnd)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v4.AddArg(val)
+ v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(3)
+ v6.AddArg(ptr)
+ v5.AddArg(v6)
+ v3.AddArg2(v4, v5)
+ v7 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
+ v7.AuxInt = int32ToAuxInt(0)
+ v8 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v9.AuxInt = int32ToAuxInt(0xff)
+ v8.AddArg2(v9, v5)
+ v7.AddArg(v8)
+ v2.AddArg2(v3, v7)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ // match: (AtomicAnd8 ptr val mem)
+ // cond: config.BigEndian
+ // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicAnd)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v4.AddArg(val)
+ v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(3)
+ v7 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
+ v7.AuxInt = int32ToAuxInt(3)
+ v7.AddArg(ptr)
+ v6.AddArg(v7)
+ v5.AddArg(v6)
+ v3.AddArg2(v4, v5)
+ v8 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(0)
+ v9 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v10.AuxInt = int32ToAuxInt(0xff)
+ v9.AddArg2(v10, v5)
+ v8.AddArg(v9)
+ v2.AddArg2(v3, v8)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8 ptr val mem)
+ // cond: !config.BigEndian
+ // result: (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(!config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicOr)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v3.AddArg(val)
+ v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(3)
+ v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(3)
+ v5.AddArg(ptr)
+ v4.AddArg(v5)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ // match: (AtomicOr8 ptr val mem)
+ // cond: config.BigEndian
+ // result: (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicOr)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v3.AddArg(val)
+ v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(3)
+ v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(3)
+ v6.AddArg(ptr)
+ v5.AddArg(v6)
+ v4.AddArg(v5)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpAvg32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg32u <t> x y)
+ // result: (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSADD)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRLconst, t)
+ v0.AuxInt = int32ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPSSUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueMIPS_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 <t> x)
+ // result: (SUB (MOVWconst [32]) (CLZ <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpMIPSSUB)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com16 x)
+ // result: (NORconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com32 x)
+ // result: (NORconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com8 x)
+ // result: (NORconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConstBool(v *Value) bool {
+ // match: (ConstBool [b])
+ // result: (MOVWconst [b2i32(b)])
+ for {
+ b := auxIntToBool(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(b))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVWconst [0])
+ for {
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 <t> x)
+ // result: (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpMIPSSUB)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
+ v2 := b.NewValue0(v.Pos, OpMIPSSUBconst, t)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpMIPSAND, t)
+ v4 := b.NewValue0(v.Pos, OpMIPSNEG, t)
+ v4.AddArg(x)
+ v3.AddArg2(x, v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 x y)
+ // result: (Select1 (DIV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (Select1 (DIVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (SGTUconst [1] (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (FPFlagTrue (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (FPFlagTrue (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XORconst [1] (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x y)
+ // result: (SGTUconst [1] (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (Select0 (MULT x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSMULT, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (Select0 (MULTU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSMULTU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IsInBounds idx len)
+ // result: (SGTU len idx)
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpMIPSSGTU)
+ v.AddArg2(len, idx)
+ return true
+ }
+}
+func rewriteValueMIPS_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil ptr)
+ // result: (SGTU ptr (MOVWconst [0]))
+ for {
+ ptr := v_0
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsSliceInBounds idx len)
+ // result: (XORconst [1] (SGTU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (XORconst [1] (SGT x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (FPFlagTrue (CMPGEF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (XORconst [1] (SGTU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (FPFlagTrue (CMPGED y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (SGT (SignExt16to32 y) (SignExt16to32 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less32 x y)
+ // result: (SGT y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGT)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (FPFlagTrue (CMPGTF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less32U x y)
+ // result: (SGTU y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (FPFlagTrue (CMPGTD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (SGT (SignExt8to32 y) (SignExt8to32 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpMIPSMOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVFload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpMIPSMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint32(c) < 16
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint32(c) >= 16
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 16) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint32(c) < 32
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint32(c) >= 32
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 32) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint32(c) < 8
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint32(c) >= 8
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 8) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMIPSADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADD x (MOVWconst [c]))
+ // result: (ADDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (NEG y))
+ // result: (SUB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSNEG {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpMIPSSUB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
+ // result: (MOVWaddr [off1+off2] {sym} ptr)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ v.reset(OpMIPSMOVWaddr)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [int32(c+d)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(c + d))
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBconst [d] x))
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSAND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AND x (MOVWconst [c]))
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (AND (SGTUconst [1] x) (SGTUconst [1] y))
+ // result: (SGTUconst [1] (OR <x.Type> x y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSSGTUconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpMIPSSGTUconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [0] _)
+ // result: (MOVWconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSCMOVZ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVZ _ f (MOVWconst [0]))
+ // result: f
+ for {
+ f := v_1
+ if v_2.Op != OpMIPSMOVWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(f)
+ return true
+ }
+ // match: (CMOVZ a _ (MOVWconst [c]))
+ // cond: c!=0
+ // result: a
+ for {
+ a := v_0
+ if v_2.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (CMOVZ a (MOVWconst [0]) c)
+ // result: (CMOVZzero a c)
+ for {
+ a := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ c := v_2
+ v.reset(OpMIPSCMOVZzero)
+ v.AddArg2(a, c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSCMOVZzero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVZzero _ (MOVWconst [0]))
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (CMOVZzero a (MOVWconst [c]))
+ // cond: c!=0
+ // result: a
+ for {
+ a := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicAdd ptr (MOVWconst [c]) mem)
+ // cond: is16Bit(int64(c))
+ // result: (LoweredAtomicAddconst [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(int64(c))) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicAddconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSLoweredAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicStore32 ptr (MOVWconst [0]) mem)
+ // result: (LoweredAtomicStorezero ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPSLoweredAtomicStorezero)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVBUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBUreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpMIPSMOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpMIPSMOVBUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUreg (ANDconst [c] x))
+ // result: (ANDconst [c&0xff] x)
+ for {
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0xff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(uint8(c))])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpMIPSMOVBUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpMIPSMOVBload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBreg (ANDconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDconst [c&0x7f] x)
+ for {
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(int8(c))])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPSMOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVBstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVDstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVFload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVFstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVFstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVFstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVHUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVHUreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVHUload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVHUreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpMIPSMOVHload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpMIPSMOVHUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUreg (ANDconst [c] x))
+ // result: (ANDconst [c&0xffff] x)
+ for {
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(uint16(c))])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVHreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVHload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVHreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpMIPSMOVHUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpMIPSMOVHload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHreg (ANDconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDconst [c&0x7fff] x)
+ for {
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(int16(c))])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPSMOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVHstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWreg x)
+ // cond: x.Uses == 1
+ // result: (MOVWnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVWnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVWconst [c]))
+ // result: (MOVWconst [c])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPSMOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVWstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MUL (MOVWconst [0]) _ )
+ // result: (MOVWconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [1]) x )
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [-1]) x )
+ // result: (NEG x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpMIPSNEG)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [c]) x )
+ // cond: isPowerOfTwo64(int64(uint32(c)))
+ // result: (SLLconst [int32(log2uint32(int64(c)))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ if !(isPowerOfTwo64(int64(uint32(c)))) {
+ continue
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log2uint32(int64(c))))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [c]) (MOVWconst [d]))
+ // result: (MOVWconst [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSNEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEG (MOVWconst [c]))
+ // result: (MOVWconst [-c])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSNOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NOR x (MOVWconst [c]))
+ // result: (NORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSNORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [^(c|d)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(^(c | d))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (OR x (MOVWconst [c]))
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (OR (SGTUzero x) (SGTUzero y))
+ // result: (SGTUzero (OR <x.Type> x y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSSGTUzero {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpMIPSSGTUzero {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpMIPSSGTUzero)
+ v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVWconst [-1])
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSORconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSORconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGT(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGT (MOVWconst [c]) x)
+ // result: (SGTconst [c] x)
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpMIPSSGTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SGT x (MOVWconst [0]))
+ // result: (SGTzero x)
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPSSGTzero)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGTU (MOVWconst [c]) x)
+ // result: (SGTUconst [c] x)
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SGTU x (MOVWconst [0]))
+ // result: (SGTUzero x)
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPSSGTUzero)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTUconst [c] (MOVWconst [d]))
+ // cond: uint32(c) > uint32(d)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(c) > uint32(d)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVWconst [d]))
+ // cond: uint32(c) <= uint32(d)
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(c) <= uint32(d)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVBUreg _))
+ // cond: 0xff < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBUreg || !(0xff < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVHUreg _))
+ // cond: 0xffff < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHUreg || !(0xffff < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (ANDconst [m] _))
+ // cond: uint32(m) < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(m) < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (SRLconst _ [d]))
+ // cond: uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSSRLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTUzero(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTUzero (MOVWconst [d]))
+ // cond: d != 0
+ // result: (MOVWconst [1])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUzero (MOVWconst [d]))
+ // cond: d == 0
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTconst [c] (MOVWconst [d]))
+ // cond: c > d
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(c > d) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVWconst [d]))
+ // cond: c <= d
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(c <= d) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: 0x7f < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBreg || !(0x7f < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: c <= -0x80
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBreg || !(c <= -0x80) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: 0xff < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: c < 0
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: 0x7fff < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHreg || !(0x7fff < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: c <= -0x8000
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHreg || !(c <= -0x8000) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: 0xffff < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: c < 0
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (ANDconst [m] _))
+ // cond: 0 <= m && m < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (SRLconst _ [d]))
+ // cond: 0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSSRLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTzero(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTzero (MOVWconst [d]))
+ // cond: d > 0
+ // result: (MOVWconst [1])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d > 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTzero (MOVWconst [d]))
+ // cond: d <= 0
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d <= 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLL x (MOVWconst [c]))
+ // result: (SLLconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d<<uint32(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(d << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRA x (MOVWconst [c]))
+ // result: (SRAconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSRAconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d>>uint32(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(d >> uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRL x (MOVWconst [c]))
+ // result: (SRLconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [int32(uint32(d)>>uint32(c))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(d) >> uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUB x (MOVWconst [c]))
+ // result: (SUBconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SUB (MOVWconst [0]) x)
+ // result: (NEG x)
+ for {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpMIPSNEG)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSUBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d-c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(d - c)
+ return true
+ }
+ // match: (SUBconst [c] (SUBconst [d] x))
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (ADDconst [d] x))
+ // result: (ADDconst [-c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSXOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVWconst [c]))
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSXORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [-1] x)
+ // result: (NORconst [0] x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSXORconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // result: (Select0 (DIV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (Select0 (DIVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v2.AuxInt = int32ToAuxInt(1)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(2)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v4.AuxInt = int32ToAuxInt(2)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [16] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [12] dst (MOVWload [12] src mem) (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(12)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(12)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(4)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(4)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: (s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0)
+ // result: (LoweredMove [int32(t.Alignment())] dst src (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) {
+ break
+ }
+ v.reset(OpMIPSLoweredMove)
+ v.AuxInt = int32ToAuxInt(int32(t.Alignment()))
+ v0 := b.NewValue0(v.Pos, OpMIPSADDconst, src.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (SGTU (XOR x y) (MOVWconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (FPFlagFalse (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (FPFlagFalse (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqPtr x y)
+ // result: (SGTU (XOR x y) (MOVWconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr:(SP))
+ // result: (MOVWaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpMIPSMOVWaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueMIPS_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpPanicExtend(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicExtendA [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicExtendA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicExtendB [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicExtendB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicExtendC [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicExtendC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVWconst [c]))
+ // result: (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x32, t)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft32 <t> x (MOVWconst [c]))
+ // result: (Or32 (Lsh32x32 <t> x (MOVWconst [c&31])) (Rsh32Ux32 <t> x (MOVWconst [-c&31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpLsh32x32, t)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 31)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 31)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft64 <t> x (MOVWconst [c]))
+ // result: (Or64 (Lsh64x32 <t> x (MOVWconst [c&63])) (Rsh64Ux32 <t> x (MOVWconst [-c&63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpLsh64x32, t)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 63)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 63)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVWconst [c]))
+ // result: (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x32, t)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint32(c) < 16
+ // result: (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint32(c) >= 16
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 16) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(31)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(31)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint32(c) < 16
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint32(c) >= 16
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 16) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(31)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint32(c) < 32
+ // result: (SRLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint32(c) >= 32
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 32) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // result: (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(31)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v0.AddArg3(v1, v2, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 x y)
+ // result: (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(31)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint32(c) < 32
+ // result: (SRAconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint32(c) >= 32
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 32) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // result: (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(31)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v0.AddArg3(v1, v2, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint32(c) < 8
+ // result: (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint32(c) >= 8
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 8) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(31)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(31)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint32(c) < 8
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint32(c) >= 8
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 8) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(31)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Add32carry <t> x y))
+ // result: (ADD <t.FieldType(0)> x y)
+ for {
+ if v_0.Op != OpAdd32carry {
+ break
+ }
+ t := v_0.Type
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPSADD)
+ v.Type = t.FieldType(0)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Select0 (Sub32carry <t> x y))
+ // result: (SUB <t.FieldType(0)> x y)
+ for {
+ if v_0.Op != OpSub32carry {
+ break
+ }
+ t := v_0.Type
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPSSUB)
+ v.Type = t.FieldType(0)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Select0 (MULTU (MOVWconst [0]) _ ))
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (MULTU (MOVWconst [1]) _ ))
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (MULTU (MOVWconst [-1]) x ))
+ // result: (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSADDconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(-1)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(v0, v1, x)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (MULTU (MOVWconst [c]) x ))
+ // cond: isPowerOfTwo64(int64(uint32(c)))
+ // result: (SRLconst [int32(32-log2uint32(int64(c)))] x)
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isPowerOfTwo64(int64(uint32(c)))) {
+ continue
+ }
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(32 - log2uint32(int64(c))))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (MULTU (MOVWconst [c]) (MOVWconst [d])))
+ // result: (MOVWconst [int32((int64(uint32(c))*int64(uint32(d)))>>32)])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_0_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32((int64(uint32(c)) * int64(uint32(d))) >> 32))
+ return true
+ }
+ break
+ }
+ // match: (Select0 (DIV (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [c%d])
+ for {
+ if v_0.Op != OpMIPSDIV {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c % d)
+ return true
+ }
+ // match: (Select0 (DIVU (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [int32(uint32(c)%uint32(d))])
+ for {
+ if v_0.Op != OpMIPSDIVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) % uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Add32carry <t> x y))
+ // result: (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
+ for {
+ if v_0.Op != OpAdd32carry {
+ break
+ }
+ t := v_0.Type
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPSSGTU)
+ v.Type = typ.Bool
+ v0 := b.NewValue0(v.Pos, OpMIPSADD, t.FieldType(0))
+ v0.AddArg2(x, y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Select1 (Sub32carry <t> x y))
+ // result: (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
+ for {
+ if v_0.Op != OpSub32carry {
+ break
+ }
+ t := v_0.Type
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPSSGTU)
+ v.Type = typ.Bool
+ v0 := b.NewValue0(v.Pos, OpMIPSSUB, t.FieldType(0))
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Select1 (MULTU (MOVWconst [0]) _ ))
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULTU (MOVWconst [1]) x ))
+ // result: x
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_0_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULTU (MOVWconst [-1]) x ))
+ // result: (NEG <x.Type> x)
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpMIPSNEG)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULTU (MOVWconst [c]) x ))
+ // cond: isPowerOfTwo64(int64(uint32(c)))
+ // result: (SLLconst [int32(log2uint32(int64(c)))] x)
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isPowerOfTwo64(int64(uint32(c)))) {
+ continue
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log2uint32(int64(c))))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULTU (MOVWconst [c]) (MOVWconst [d])))
+ // result: (MOVWconst [int32(uint32(c)*uint32(d))])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_0_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) * uint32(d)))
+ return true
+ }
+ break
+ }
+ // match: (Select1 (DIV (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [c/d])
+ for {
+ if v_0.Op != OpMIPSDIV {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c / d)
+ return true
+ }
+ // match: (Select1 (DIVU (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [int32(uint32(c)/uint32(d))])
+ for {
+ if v_0.Op != OpMIPSDIVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) / uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpSignmask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Signmask x)
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAconst (NEG <t> x) [31])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpMIPSNEG, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (MOVFstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPSMOVFstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPSMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpSub32withcarry(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub32withcarry <t> x y c)
+ // result: (SUB (SUB <t> x y) c)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ c := v_2
+ v.reset(OpMIPSSUB)
+ v0 := b.NewValue0(v.Pos, OpMIPSSUB, t)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, c)
+ return true
+ }
+}
+func rewriteValueMIPS_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPSMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] ptr (MOVWconst [0]) (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [12] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [16] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [12] ptr (MOVWconst [0]) (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(12)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(4)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: (s > 16 || t.Alignment()%4 != 0)
+ // result: (LoweredZero [int32(t.Alignment())] ptr (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s > 16 || t.Alignment()%4 != 0) {
+ break
+ }
+ v.reset(OpMIPSLoweredZero)
+ v.AuxInt = int32ToAuxInt(int32(t.Alignment()))
+ v0 := b.NewValue0(v.Pos, OpMIPSADDconst, ptr.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
+ v0.AddArg(ptr)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpZeromask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Zeromask x)
+ // result: (NEG (SGTU x (MOVWconst [0])))
+ for {
+ x := v_0
+ v.reset(OpMIPSNEG)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v0.AddArg2(x, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlockMIPS(b *Block) bool {
+ switch b.Kind {
+ case BlockMIPSEQ:
+ // match: (EQ (FPFlagTrue cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpMIPSFPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPSFPF, cmp)
+ return true
+ }
+ // match: (EQ (FPFlagFalse cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpMIPSFPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPSFPT, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGT {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTU {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTconst {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTUconst {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTzero _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTzero {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTUzero _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTUzero {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (SGTUconst [1] x) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpMIPSSGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSNE, x)
+ return true
+ }
+ // match: (EQ (SGTUzero x) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTUzero {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSEQ, x)
+ return true
+ }
+ // match: (EQ (SGTconst [0] x) yes no)
+ // result: (GEZ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSGEZ, x)
+ return true
+ }
+ // match: (EQ (SGTzero x) yes no)
+ // result: (LEZ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTzero {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSLEZ, x)
+ return true
+ }
+ // match: (EQ (MOVWconst [0]) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (MOVWconst [c]) yes no)
+ // cond: c != 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPSGEZ:
+ // match: (GEZ (MOVWconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GEZ (MOVWconst [c]) yes no)
+ // cond: c < 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPSGTZ:
+ // match: (GTZ (MOVWconst [c]) yes no)
+ // cond: c > 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GTZ (MOVWconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (NE cond yes no)
+ for {
+ cond := b.Controls[0]
+ b.resetWithControl(BlockMIPSNE, cond)
+ return true
+ }
+ case BlockMIPSLEZ:
+ // match: (LEZ (MOVWconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LEZ (MOVWconst [c]) yes no)
+ // cond: c > 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPSLTZ:
+ // match: (LTZ (MOVWconst [c]) yes no)
+ // cond: c < 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LTZ (MOVWconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPSNE:
+ // match: (NE (FPFlagTrue cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpMIPSFPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPSFPT, cmp)
+ return true
+ }
+ // match: (NE (FPFlagFalse cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpMIPSFPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPSFPF, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGT {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTU {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTconst {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTUconst {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTzero _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTzero {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTUzero _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTUzero {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (SGTUconst [1] x) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSEQ, x)
+ return true
+ }
+ // match: (NE (SGTUzero x) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpMIPSSGTUzero {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSNE, x)
+ return true
+ }
+ // match: (NE (SGTconst [0] x) yes no)
+ // result: (LTZ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSLTZ, x)
+ return true
+ }
+ // match: (NE (SGTzero x) yes no)
+ // result: (GTZ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTzero {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSGTZ, x)
+ return true
+ }
+ // match: (NE (MOVWconst [0]) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (MOVWconst [c]) yes no)
+ // cond: c != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
new file mode 100644
index 0000000..d78f608
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
@@ -0,0 +1,8040 @@
+// Code generated from gen/MIPS64.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValueMIPS64(v *Value) bool {
+ switch v.Op {
+ case OpAdd16:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAdd32:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAdd32F:
+ v.Op = OpMIPS64ADDF
+ return true
+ case OpAdd64:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAdd64F:
+ v.Op = OpMIPS64ADDD
+ return true
+ case OpAdd8:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAddPtr:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAddr:
+ return rewriteValueMIPS64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAnd32:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAnd64:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAnd8:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAndB:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpMIPS64LoweredAtomicAdd32
+ return true
+ case OpAtomicAdd64:
+ v.Op = OpMIPS64LoweredAtomicAdd64
+ return true
+ case OpAtomicCompareAndSwap32:
+ v.Op = OpMIPS64LoweredAtomicCas32
+ return true
+ case OpAtomicCompareAndSwap64:
+ v.Op = OpMIPS64LoweredAtomicCas64
+ return true
+ case OpAtomicExchange32:
+ v.Op = OpMIPS64LoweredAtomicExchange32
+ return true
+ case OpAtomicExchange64:
+ v.Op = OpMIPS64LoweredAtomicExchange64
+ return true
+ case OpAtomicLoad32:
+ v.Op = OpMIPS64LoweredAtomicLoad32
+ return true
+ case OpAtomicLoad64:
+ v.Op = OpMIPS64LoweredAtomicLoad64
+ return true
+ case OpAtomicLoad8:
+ v.Op = OpMIPS64LoweredAtomicLoad8
+ return true
+ case OpAtomicLoadPtr:
+ v.Op = OpMIPS64LoweredAtomicLoad64
+ return true
+ case OpAtomicStore32:
+ v.Op = OpMIPS64LoweredAtomicStore32
+ return true
+ case OpAtomicStore64:
+ v.Op = OpMIPS64LoweredAtomicStore64
+ return true
+ case OpAtomicStore8:
+ v.Op = OpMIPS64LoweredAtomicStore8
+ return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpMIPS64LoweredAtomicStore64
+ return true
+ case OpAvg64u:
+ return rewriteValueMIPS64_OpAvg64u(v)
+ case OpClosureCall:
+ v.Op = OpMIPS64CALLclosure
+ return true
+ case OpCom16:
+ return rewriteValueMIPS64_OpCom16(v)
+ case OpCom32:
+ return rewriteValueMIPS64_OpCom32(v)
+ case OpCom64:
+ return rewriteValueMIPS64_OpCom64(v)
+ case OpCom8:
+ return rewriteValueMIPS64_OpCom8(v)
+ case OpConst16:
+ return rewriteValueMIPS64_OpConst16(v)
+ case OpConst32:
+ return rewriteValueMIPS64_OpConst32(v)
+ case OpConst32F:
+ return rewriteValueMIPS64_OpConst32F(v)
+ case OpConst64:
+ return rewriteValueMIPS64_OpConst64(v)
+ case OpConst64F:
+ return rewriteValueMIPS64_OpConst64F(v)
+ case OpConst8:
+ return rewriteValueMIPS64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueMIPS64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueMIPS64_OpConstNil(v)
+ case OpCvt32Fto32:
+ v.Op = OpMIPS64TRUNCFW
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpMIPS64TRUNCFV
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpMIPS64MOVFD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpMIPS64MOVWF
+ return true
+ case OpCvt32to64F:
+ v.Op = OpMIPS64MOVWD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpMIPS64TRUNCDW
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpMIPS64MOVDF
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpMIPS64TRUNCDV
+ return true
+ case OpCvt64to32F:
+ v.Op = OpMIPS64MOVVF
+ return true
+ case OpCvt64to64F:
+ v.Op = OpMIPS64MOVVD
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueMIPS64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueMIPS64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueMIPS64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpMIPS64DIVF
+ return true
+ case OpDiv32u:
+ return rewriteValueMIPS64_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValueMIPS64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpMIPS64DIVD
+ return true
+ case OpDiv64u:
+ return rewriteValueMIPS64_OpDiv64u(v)
+ case OpDiv8:
+ return rewriteValueMIPS64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueMIPS64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueMIPS64_OpEq16(v)
+ case OpEq32:
+ return rewriteValueMIPS64_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueMIPS64_OpEq32F(v)
+ case OpEq64:
+ return rewriteValueMIPS64_OpEq64(v)
+ case OpEq64F:
+ return rewriteValueMIPS64_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueMIPS64_OpEq8(v)
+ case OpEqB:
+ return rewriteValueMIPS64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueMIPS64_OpEqPtr(v)
+ case OpGetCallerPC:
+ v.Op = OpMIPS64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpMIPS64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpMIPS64LoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ return rewriteValueMIPS64_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueMIPS64_OpHmul32u(v)
+ case OpHmul64:
+ return rewriteValueMIPS64_OpHmul64(v)
+ case OpHmul64u:
+ return rewriteValueMIPS64_OpHmul64u(v)
+ case OpInterCall:
+ v.Op = OpMIPS64CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueMIPS64_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueMIPS64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueMIPS64_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueMIPS64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueMIPS64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueMIPS64_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueMIPS64_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueMIPS64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueMIPS64_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValueMIPS64_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValueMIPS64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueMIPS64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueMIPS64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueMIPS64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueMIPS64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueMIPS64_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueMIPS64_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueMIPS64_OpLess32U(v)
+ case OpLess64:
+ return rewriteValueMIPS64_OpLess64(v)
+ case OpLess64F:
+ return rewriteValueMIPS64_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValueMIPS64_OpLess64U(v)
+ case OpLess8:
+ return rewriteValueMIPS64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueMIPS64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueMIPS64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueMIPS64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueMIPS64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueMIPS64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueMIPS64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueMIPS64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueMIPS64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueMIPS64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueMIPS64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueMIPS64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueMIPS64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueMIPS64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueMIPS64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueMIPS64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueMIPS64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueMIPS64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueMIPS64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueMIPS64_OpLsh8x8(v)
+ case OpMIPS64ADDV:
+ return rewriteValueMIPS64_OpMIPS64ADDV(v)
+ case OpMIPS64ADDVconst:
+ return rewriteValueMIPS64_OpMIPS64ADDVconst(v)
+ case OpMIPS64AND:
+ return rewriteValueMIPS64_OpMIPS64AND(v)
+ case OpMIPS64ANDconst:
+ return rewriteValueMIPS64_OpMIPS64ANDconst(v)
+ case OpMIPS64LoweredAtomicAdd32:
+ return rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32(v)
+ case OpMIPS64LoweredAtomicAdd64:
+ return rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64(v)
+ case OpMIPS64LoweredAtomicStore32:
+ return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32(v)
+ case OpMIPS64LoweredAtomicStore64:
+ return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64(v)
+ case OpMIPS64MOVBUload:
+ return rewriteValueMIPS64_OpMIPS64MOVBUload(v)
+ case OpMIPS64MOVBUreg:
+ return rewriteValueMIPS64_OpMIPS64MOVBUreg(v)
+ case OpMIPS64MOVBload:
+ return rewriteValueMIPS64_OpMIPS64MOVBload(v)
+ case OpMIPS64MOVBreg:
+ return rewriteValueMIPS64_OpMIPS64MOVBreg(v)
+ case OpMIPS64MOVBstore:
+ return rewriteValueMIPS64_OpMIPS64MOVBstore(v)
+ case OpMIPS64MOVBstorezero:
+ return rewriteValueMIPS64_OpMIPS64MOVBstorezero(v)
+ case OpMIPS64MOVDload:
+ return rewriteValueMIPS64_OpMIPS64MOVDload(v)
+ case OpMIPS64MOVDstore:
+ return rewriteValueMIPS64_OpMIPS64MOVDstore(v)
+ case OpMIPS64MOVFload:
+ return rewriteValueMIPS64_OpMIPS64MOVFload(v)
+ case OpMIPS64MOVFstore:
+ return rewriteValueMIPS64_OpMIPS64MOVFstore(v)
+ case OpMIPS64MOVHUload:
+ return rewriteValueMIPS64_OpMIPS64MOVHUload(v)
+ case OpMIPS64MOVHUreg:
+ return rewriteValueMIPS64_OpMIPS64MOVHUreg(v)
+ case OpMIPS64MOVHload:
+ return rewriteValueMIPS64_OpMIPS64MOVHload(v)
+ case OpMIPS64MOVHreg:
+ return rewriteValueMIPS64_OpMIPS64MOVHreg(v)
+ case OpMIPS64MOVHstore:
+ return rewriteValueMIPS64_OpMIPS64MOVHstore(v)
+ case OpMIPS64MOVHstorezero:
+ return rewriteValueMIPS64_OpMIPS64MOVHstorezero(v)
+ case OpMIPS64MOVVload:
+ return rewriteValueMIPS64_OpMIPS64MOVVload(v)
+ case OpMIPS64MOVVreg:
+ return rewriteValueMIPS64_OpMIPS64MOVVreg(v)
+ case OpMIPS64MOVVstore:
+ return rewriteValueMIPS64_OpMIPS64MOVVstore(v)
+ case OpMIPS64MOVVstorezero:
+ return rewriteValueMIPS64_OpMIPS64MOVVstorezero(v)
+ case OpMIPS64MOVWUload:
+ return rewriteValueMIPS64_OpMIPS64MOVWUload(v)
+ case OpMIPS64MOVWUreg:
+ return rewriteValueMIPS64_OpMIPS64MOVWUreg(v)
+ case OpMIPS64MOVWload:
+ return rewriteValueMIPS64_OpMIPS64MOVWload(v)
+ case OpMIPS64MOVWreg:
+ return rewriteValueMIPS64_OpMIPS64MOVWreg(v)
+ case OpMIPS64MOVWstore:
+ return rewriteValueMIPS64_OpMIPS64MOVWstore(v)
+ case OpMIPS64MOVWstorezero:
+ return rewriteValueMIPS64_OpMIPS64MOVWstorezero(v)
+ case OpMIPS64NEGV:
+ return rewriteValueMIPS64_OpMIPS64NEGV(v)
+ case OpMIPS64NOR:
+ return rewriteValueMIPS64_OpMIPS64NOR(v)
+ case OpMIPS64NORconst:
+ return rewriteValueMIPS64_OpMIPS64NORconst(v)
+ case OpMIPS64OR:
+ return rewriteValueMIPS64_OpMIPS64OR(v)
+ case OpMIPS64ORconst:
+ return rewriteValueMIPS64_OpMIPS64ORconst(v)
+ case OpMIPS64SGT:
+ return rewriteValueMIPS64_OpMIPS64SGT(v)
+ case OpMIPS64SGTU:
+ return rewriteValueMIPS64_OpMIPS64SGTU(v)
+ case OpMIPS64SGTUconst:
+ return rewriteValueMIPS64_OpMIPS64SGTUconst(v)
+ case OpMIPS64SGTconst:
+ return rewriteValueMIPS64_OpMIPS64SGTconst(v)
+ case OpMIPS64SLLV:
+ return rewriteValueMIPS64_OpMIPS64SLLV(v)
+ case OpMIPS64SLLVconst:
+ return rewriteValueMIPS64_OpMIPS64SLLVconst(v)
+ case OpMIPS64SRAV:
+ return rewriteValueMIPS64_OpMIPS64SRAV(v)
+ case OpMIPS64SRAVconst:
+ return rewriteValueMIPS64_OpMIPS64SRAVconst(v)
+ case OpMIPS64SRLV:
+ return rewriteValueMIPS64_OpMIPS64SRLV(v)
+ case OpMIPS64SRLVconst:
+ return rewriteValueMIPS64_OpMIPS64SRLVconst(v)
+ case OpMIPS64SUBV:
+ return rewriteValueMIPS64_OpMIPS64SUBV(v)
+ case OpMIPS64SUBVconst:
+ return rewriteValueMIPS64_OpMIPS64SUBVconst(v)
+ case OpMIPS64XOR:
+ return rewriteValueMIPS64_OpMIPS64XOR(v)
+ case OpMIPS64XORconst:
+ return rewriteValueMIPS64_OpMIPS64XORconst(v)
+ case OpMod16:
+ return rewriteValueMIPS64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueMIPS64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueMIPS64_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueMIPS64_OpMod32u(v)
+ case OpMod64:
+ return rewriteValueMIPS64_OpMod64(v)
+ case OpMod64u:
+ return rewriteValueMIPS64_OpMod64u(v)
+ case OpMod8:
+ return rewriteValueMIPS64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueMIPS64_OpMod8u(v)
+ case OpMove:
+ return rewriteValueMIPS64_OpMove(v)
+ case OpMul16:
+ return rewriteValueMIPS64_OpMul16(v)
+ case OpMul32:
+ return rewriteValueMIPS64_OpMul32(v)
+ case OpMul32F:
+ v.Op = OpMIPS64MULF
+ return true
+ case OpMul64:
+ return rewriteValueMIPS64_OpMul64(v)
+ case OpMul64F:
+ v.Op = OpMIPS64MULD
+ return true
+ case OpMul64uhilo:
+ v.Op = OpMIPS64MULVU
+ return true
+ case OpMul8:
+ return rewriteValueMIPS64_OpMul8(v)
+ case OpNeg16:
+ v.Op = OpMIPS64NEGV
+ return true
+ case OpNeg32:
+ v.Op = OpMIPS64NEGV
+ return true
+ case OpNeg32F:
+ v.Op = OpMIPS64NEGF
+ return true
+ case OpNeg64:
+ v.Op = OpMIPS64NEGV
+ return true
+ case OpNeg64F:
+ v.Op = OpMIPS64NEGD
+ return true
+ case OpNeg8:
+ v.Op = OpMIPS64NEGV
+ return true
+ case OpNeq16:
+ return rewriteValueMIPS64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueMIPS64_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueMIPS64_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValueMIPS64_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValueMIPS64_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueMIPS64_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueMIPS64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpMIPS64LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueMIPS64_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueMIPS64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpMIPS64OR
+ return true
+ case OpOr32:
+ v.Op = OpMIPS64OR
+ return true
+ case OpOr64:
+ v.Op = OpMIPS64OR
+ return true
+ case OpOr8:
+ v.Op = OpMIPS64OR
+ return true
+ case OpOrB:
+ v.Op = OpMIPS64OR
+ return true
+ case OpPanicBounds:
+ return rewriteValueMIPS64_OpPanicBounds(v)
+ case OpRotateLeft16:
+ return rewriteValueMIPS64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueMIPS64_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValueMIPS64_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValueMIPS64_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueMIPS64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueMIPS64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueMIPS64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueMIPS64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueMIPS64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueMIPS64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueMIPS64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueMIPS64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueMIPS64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueMIPS64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueMIPS64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueMIPS64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueMIPS64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueMIPS64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueMIPS64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueMIPS64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueMIPS64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueMIPS64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueMIPS64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueMIPS64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueMIPS64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueMIPS64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueMIPS64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueMIPS64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueMIPS64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueMIPS64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueMIPS64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueMIPS64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueMIPS64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueMIPS64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueMIPS64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueMIPS64_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueMIPS64_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueMIPS64_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpMIPS64MOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpMIPS64MOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpMIPS64MOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpMIPS64MOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpMIPS64MOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpMIPS64MOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValueMIPS64_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpMIPS64SQRTD
+ return true
+ case OpStaticCall:
+ v.Op = OpMIPS64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValueMIPS64_OpStore(v)
+ case OpSub16:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpSub32:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpSub32F:
+ v.Op = OpMIPS64SUBF
+ return true
+ case OpSub64:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpSub64F:
+ v.Op = OpMIPS64SUBD
+ return true
+ case OpSub8:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpSubPtr:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpMIPS64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpXor32:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpXor64:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpXor8:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpZero:
+ return rewriteValueMIPS64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpMIPS64MOVHUreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpMIPS64MOVHUreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpMIPS64MOVWUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpMIPS64MOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpMIPS64MOVBUreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpMIPS64MOVBUreg
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVVaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpMIPS64MOVVaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64ADDV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64SRLVconst, t)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SUBV, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com16 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64NOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com32 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64NOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com64 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64NOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com8 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64NOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst32F(v *Value) bool {
+ // match: (Const32F [val])
+ // result: (MOVFconst [float64(val)])
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpMIPS64MOVFconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst64F(v *Value) bool {
+ // match: (Const64F [val])
+ // result: (MOVDconst [float64(val)])
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpMIPS64MOVDconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [b])
+ // result: (MOVVconst [int64(b2i(b))])
+ for {
+ b := auxIntToBool(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(b2i(b)))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVVconst [0])
+ for {
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 x y)
+ // result: (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div64 x y)
+ // result: (Select1 (DIVV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div64u x y)
+ // result: (Select1 (DIVVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (FPFlagTrue (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (FPFlagTrue (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x y)
+ // result: (SGTU (MOVVconst [1]) (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRAVconst (Select1 <typ.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAVconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpSelect1, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64))
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRLVconst (Select1 <typ.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRLVconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpHmul64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul64 x y)
+ // result: (Select0 (MULV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpHmul64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul64u x y)
+ // result: (Select0 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IsInBounds idx len)
+ // result: (SGTU len idx)
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpMIPS64SGTU)
+ v.AddArg2(len, idx)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil ptr)
+ // result: (SGTU ptr (MOVVconst [0]))
+ for {
+ ptr := v_0
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsSliceInBounds idx len)
+ // result: (XOR (MOVVconst [1]) (SGTU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v1.AddArg2(idx, len)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (FPFlagTrue (CMPGEF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (XOR (MOVVconst [1]) (SGT x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (FPFlagTrue (CMPGED y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (SGT (SignExt16to64 y) (SignExt16to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (SGT (SignExt32to64 y) (SignExt32to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (FPFlagTrue (CMPGTF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64 x y)
+ // result: (SGT y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGT)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (FPFlagTrue (CMPGTD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64U x y)
+ // result: (SGTU y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (SGT (SignExt8to64 y) (SignExt8to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpMIPS64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && isSigned(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && !isSigned(t))
+ // result: (MOVWUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVVload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVFload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVVaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpMIPS64MOVVaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDV x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (ADDVconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDV x (NEGV y))
+ // result: (SUBV x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64NEGV {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpMIPS64SUBV)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr))
+ // cond: is32Bit(off1+int64(off2))
+ // result: (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
+ for {
+ off1 := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ if !(is32Bit(off1 + int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVVaddr)
+ v.AuxInt = int32ToAuxInt(int32(off1) + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDVconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c+d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDVconst [c] (ADDVconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (ADDVconst [c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDVconst [c] (SUBVconst [d] x))
+ // cond: is32Bit(c-d)
+ // result: (ADDVconst [c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64SUBVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c - d)) {
+ break
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [0] _)
+ // result: (MOVVconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c&d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPS64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (LoweredAtomicAddconst32 [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64LoweredAtomicAddconst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (LoweredAtomicAddconst64 [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64LoweredAtomicAddconst64)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicStore32 ptr (MOVVconst [0]) mem)
+ // result: (LoweredAtomicStorezero32 ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64LoweredAtomicStorezero32)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicStore64 ptr (MOVVconst [0]) mem)
+ // result: (LoweredAtomicStorezero64 ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64LoweredAtomicStorezero64)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(uint8(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(int8(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVFload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVVconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVVload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVVload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVVreg x)
+ // cond: x.Uses == 1
+ // result: (MOVVnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPS64MOVVnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVVreg (MOVVconst [c]))
+ // result: (MOVVconst [c])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVVstore [off] {sym} ptr (MOVVconst [0]) mem)
+ // result: (MOVVstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64MOVVstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWUreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVWUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVWUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWreg x:(MOVBload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVWload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVWreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVVconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64NEGV(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGV (MOVVconst [c]))
+ // result: (MOVVconst [-c])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64NOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NOR x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (NORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64NORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64NORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NORconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [^(c|d)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(^(c | d))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (OR x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVVconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c|d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // cond: is32Bit(c|d)
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c | d)) {
+ break
+ }
+ v.reset(OpMIPS64ORconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SGT(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGT (MOVVconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (SGTconst [c] x)
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64SGTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SGTU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGTU (MOVVconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (SGTUconst [c] x)
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64SGTUconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SGTUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTUconst [c] (MOVVconst [d]))
+ // cond: uint64(c)>uint64(d)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(c) > uint64(d)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVVconst [d]))
+ // cond: uint64(c)<=uint64(d)
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(c) <= uint64(d)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVBUreg _))
+ // cond: 0xff < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBUreg || !(0xff < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVHUreg _))
+ // cond: 0xffff < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (ANDconst [m] _))
+ // cond: uint64(m) < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(m) < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (SRLVconst _ [d]))
+ // cond: 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64SRLVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SGTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTconst [c] (MOVVconst [d]))
+ // cond: c>d
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(c > d) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVVconst [d]))
+ // cond: c<=d
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(c <= d) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: 0x7f < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBreg || !(0x7f < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: c <= -0x80
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBreg || !(c <= -0x80) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: 0xff < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: c < 0
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: 0x7fff < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHreg || !(0x7fff < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: c <= -0x8000
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHreg || !(c <= -0x8000) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: 0xffff < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: c < 0
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVWUreg _))
+ // cond: c < 0
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVWUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (ANDconst [m] _))
+ // cond: 0 <= m && m < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= m && m < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (SRLVconst _ [d]))
+ // cond: 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64SRLVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SLLV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLLV _ (MOVVconst [c]))
+ // cond: uint64(c)>=64
+ // result: (MOVVconst [0])
+ for {
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SLLV x (MOVVconst [c]))
+ // result: (SLLVconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpMIPS64SLLVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SLLVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [d<<uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(d << uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SRAV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAV x (MOVVconst [c]))
+ // cond: uint64(c)>=64
+ // result: (SRAVconst x [63])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpMIPS64SRAVconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAV x (MOVVconst [c]))
+ // result: (SRAVconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpMIPS64SRAVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SRAVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SRLV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRLV _ (MOVVconst [c]))
+ // cond: uint64(c)>=64
+ // result: (MOVVconst [0])
+ for {
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLV x (MOVVconst [c]))
+ // result: (SRLVconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpMIPS64SRLVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SRLVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [int64(uint64(d)>>uint64(c))])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SUBV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBV x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBVconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64SUBVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBV x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUBV (MOVVconst [0]) x)
+ // result: (NEGV x)
+ for {
+ if v_0.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpMIPS64NEGV)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBVconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [d-c])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(d - c)
+ return true
+ }
+ // match: (SUBVconst [c] (SUBVconst [d] x))
+ // cond: is32Bit(-c-d)
+ // result: (ADDVconst [-c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64SUBVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c - d)) {
+ break
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBVconst [c] (ADDVconst [d] x))
+ // cond: is32Bit(-c+d)
+ // result: (ADDVconst [-c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c + d)) {
+ break
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [-1] x)
+ // result: (NORconst [0] x)
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpMIPS64NORconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c^d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // cond: is32Bit(c^d)
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64XORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c ^ d)) {
+ break
+ }
+ v.reset(OpMIPS64XORconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64 x y)
+ // result: (Select0 (DIVV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64u x y)
+ // result: (Select0 (DIVVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore dst (MOVHload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore dst (MOVVload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(2)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v4.AuxInt = int32ToAuxInt(2)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(1)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [16] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [24] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpMIPS64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(16 * (128 - s/8))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0
+ // result: (LoweredMove [t.Alignment()] dst src (ADDVconst <src.Type> src [s-moveSize(t.Alignment(), config)]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0) {
+ break
+ }
+ v.reset(OpMIPS64LoweredMove)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, src.Type)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMul16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul16 x y)
+ // result: (Select1 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul32 x y)
+ // result: (Select1 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMul64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul64 x y)
+ // result: (Select1 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMul8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul8 x y)
+ // result: (Select1 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (FPFlagFalse (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x y)
+ // result: (SGTU (XOR x y) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (FPFlagFalse (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqPtr x y)
+ // result: (SGTU (XOR x y) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64XORconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr:(SP))
+ // cond: is32Bit(off)
+ // result: (MOVVaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP || !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDVconst [off] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(off)
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpMIPS64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpMIPS64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpMIPS64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVVconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft32 <t> x (MOVVconst [c]))
+ // result: (Or32 (Lsh32x64 <t> x (MOVVconst [c&31])) (Rsh32Ux64 <t> x (MOVVconst [-c&31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 31)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 31)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft64 <t> x (MOVVconst [c]))
+ // result: (Or64 (Lsh64x64 <t> x (MOVVconst [c&63])) (Rsh64Ux64 <t> x (MOVVconst [-c&63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 63)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 63)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVVconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(x)
+ v3.AddArg2(v4, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(y, v4)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(x)
+ v3.AddArg2(v4, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(y, v4)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(y, v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(x)
+ v3.AddArg2(v4, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(y, v4)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Mul64uover x y))
+ // result: (Select1 <typ.UInt64> (MULVU x y))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSelect1)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (DIVVU _ (MOVVconst [1])))
+ // result: (MOVVconst [0])
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Select0 (DIVVU x (MOVVconst [c])))
+ // cond: isPowerOfTwo64(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpMIPS64ANDconst)
+ v.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d])))
+ // cond: d != 0
+ // result: (MOVVconst [c%d])
+ for {
+ if v_0.Op != OpMIPS64DIVV {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c % d)
+ return true
+ }
+ // match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d])))
+ // cond: d != 0
+ // result: (MOVVconst [int64(uint64(c)%uint64(d))])
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Mul64uover x y))
+ // result: (SGTU <typ.Bool> (Select0 <typ.UInt64> (MULVU x y)) (MOVVconst <typ.UInt64> [0]))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPS64SGTU)
+ v.Type = typ.Bool
+ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Select1 (MULVU x (MOVVconst [-1])))
+ // result: (NEGV x)
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpMIPS64NEGV)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULVU _ (MOVVconst [0])))
+ // result: (MOVVconst [0])
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULVU x (MOVVconst [1])))
+ // result: x
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULVU x (MOVVconst [c])))
+ // cond: isPowerOfTwo64(c)
+ // result: (SLLVconst [log64(c)] x)
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpMIPS64SLLVconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (DIVVU x (MOVVconst [1])))
+ // result: x
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Select1 (DIVVU x (MOVVconst [c])))
+ // cond: isPowerOfTwo64(c)
+ // result: (SRLVconst [log64(c)] x)
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpMIPS64SRLVconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d])))
+ // result: (MOVVconst [c*d])
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d])))
+ // cond: d != 0
+ // result: (MOVVconst [c/d])
+ for {
+ if v_0.Op != OpMIPS64DIVV {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c / d)
+ return true
+ }
+ // match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d])))
+ // cond: d != 0
+ // result: (MOVVconst [int64(uint64(c)/uint64(d))])
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAVconst (NEGV <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpMIPS64SRAVconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !is64BitFloat(val.Type)
+ // result: (MOVVstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (MOVFstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPS64MOVFstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPS64MOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPS64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVBstore [3] ptr (MOVVconst [0]) (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(2)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [12] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [16] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [24] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [8 * (128 - s/8)] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpMIPS64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(8 * (128 - s/8))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0
+ // result: (LoweredZero [t.Alignment()] ptr (ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !((s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0) {
+ break
+ }
+ v.reset(OpMIPS64LoweredZero)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, ptr.Type)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg(ptr)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockMIPS64(b *Block) bool {
+ switch b.Kind {
+ case BlockMIPS64EQ:
+ // match: (EQ (FPFlagTrue cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpMIPS64FPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64FPF, cmp)
+ return true
+ }
+ // match: (EQ (FPFlagFalse cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpMIPS64FPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64FPT, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGT {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTU {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTconst {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTUconst {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, cmp)
+ return true
+ }
+ // match: (EQ (SGTUconst [1] x) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64NE, x)
+ return true
+ }
+ // match: (EQ (SGTU x (MOVVconst [0])) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTU {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, x)
+ return true
+ }
+ // match: (EQ (SGTconst [0] x) yes no)
+ // result: (GEZ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64GEZ, x)
+ return true
+ }
+ // match: (EQ (SGT x (MOVVconst [0])) yes no)
+ // result: (LEZ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGT {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockMIPS64LEZ, x)
+ return true
+ }
+ // match: (EQ (MOVVconst [0]) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (MOVVconst [c]) yes no)
+ // cond: c != 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPS64GEZ:
+ // match: (GEZ (MOVVconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GEZ (MOVVconst [c]) yes no)
+ // cond: c < 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPS64GTZ:
+ // match: (GTZ (MOVVconst [c]) yes no)
+ // cond: c > 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GTZ (MOVVconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (NE cond yes no)
+ for {
+ cond := b.Controls[0]
+ b.resetWithControl(BlockMIPS64NE, cond)
+ return true
+ }
+ case BlockMIPS64LEZ:
+ // match: (LEZ (MOVVconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LEZ (MOVVconst [c]) yes no)
+ // cond: c > 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPS64LTZ:
+ // match: (LTZ (MOVVconst [c]) yes no)
+ // cond: c < 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LTZ (MOVVconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPS64NE:
+ // match: (NE (FPFlagTrue cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpMIPS64FPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64FPT, cmp)
+ return true
+ }
+ // match: (NE (FPFlagFalse cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpMIPS64FPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64FPF, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGT {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTU {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTconst {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTUconst {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, cmp)
+ return true
+ }
+ // match: (NE (SGTUconst [1] x) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64EQ, x)
+ return true
+ }
+ // match: (NE (SGTU x (MOVVconst [0])) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTU {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, x)
+ return true
+ }
+ // match: (NE (SGTconst [0] x) yes no)
+ // result: (LTZ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64LTZ, x)
+ return true
+ }
+ // match: (NE (SGT x (MOVVconst [0])) yes no)
+ // result: (GTZ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGT {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockMIPS64GTZ, x)
+ return true
+ }
+ // match: (NE (MOVVconst [0]) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (MOVVconst [c]) yes no)
+ // cond: c != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
new file mode 100644
index 0000000..455f9b1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -0,0 +1,18258 @@
+// Code generated from gen/PPC64.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
+
+func rewriteValuePPC64(v *Value) bool {
+ switch v.Op {
+ case OpAbs:
+ v.Op = OpPPC64FABS
+ return true
+ case OpAdd16:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAdd32:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAdd32F:
+ v.Op = OpPPC64FADDS
+ return true
+ case OpAdd64:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAdd64F:
+ v.Op = OpPPC64FADD
+ return true
+ case OpAdd64carry:
+ v.Op = OpPPC64LoweredAdd64Carry
+ return true
+ case OpAdd8:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAddPtr:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAddr:
+ return rewriteValuePPC64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpPPC64AND
+ return true
+ case OpAnd32:
+ v.Op = OpPPC64AND
+ return true
+ case OpAnd64:
+ v.Op = OpPPC64AND
+ return true
+ case OpAnd8:
+ v.Op = OpPPC64AND
+ return true
+ case OpAndB:
+ v.Op = OpPPC64AND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpPPC64LoweredAtomicAdd32
+ return true
+ case OpAtomicAdd64:
+ v.Op = OpPPC64LoweredAtomicAdd64
+ return true
+ case OpAtomicAnd32:
+ v.Op = OpPPC64LoweredAtomicAnd32
+ return true
+ case OpAtomicAnd8:
+ v.Op = OpPPC64LoweredAtomicAnd8
+ return true
+ case OpAtomicCompareAndSwap32:
+ return rewriteValuePPC64_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ return rewriteValuePPC64_OpAtomicCompareAndSwap64(v)
+ case OpAtomicCompareAndSwapRel32:
+ return rewriteValuePPC64_OpAtomicCompareAndSwapRel32(v)
+ case OpAtomicExchange32:
+ v.Op = OpPPC64LoweredAtomicExchange32
+ return true
+ case OpAtomicExchange64:
+ v.Op = OpPPC64LoweredAtomicExchange64
+ return true
+ case OpAtomicLoad32:
+ return rewriteValuePPC64_OpAtomicLoad32(v)
+ case OpAtomicLoad64:
+ return rewriteValuePPC64_OpAtomicLoad64(v)
+ case OpAtomicLoad8:
+ return rewriteValuePPC64_OpAtomicLoad8(v)
+ case OpAtomicLoadAcq32:
+ return rewriteValuePPC64_OpAtomicLoadAcq32(v)
+ case OpAtomicLoadAcq64:
+ return rewriteValuePPC64_OpAtomicLoadAcq64(v)
+ case OpAtomicLoadPtr:
+ return rewriteValuePPC64_OpAtomicLoadPtr(v)
+ case OpAtomicOr32:
+ v.Op = OpPPC64LoweredAtomicOr32
+ return true
+ case OpAtomicOr8:
+ v.Op = OpPPC64LoweredAtomicOr8
+ return true
+ case OpAtomicStore32:
+ return rewriteValuePPC64_OpAtomicStore32(v)
+ case OpAtomicStore64:
+ return rewriteValuePPC64_OpAtomicStore64(v)
+ case OpAtomicStore8:
+ return rewriteValuePPC64_OpAtomicStore8(v)
+ case OpAtomicStoreRel32:
+ return rewriteValuePPC64_OpAtomicStoreRel32(v)
+ case OpAtomicStoreRel64:
+ return rewriteValuePPC64_OpAtomicStoreRel64(v)
+ case OpAvg64u:
+ return rewriteValuePPC64_OpAvg64u(v)
+ case OpBitLen32:
+ return rewriteValuePPC64_OpBitLen32(v)
+ case OpBitLen64:
+ return rewriteValuePPC64_OpBitLen64(v)
+ case OpCeil:
+ v.Op = OpPPC64FCEIL
+ return true
+ case OpClosureCall:
+ v.Op = OpPPC64CALLclosure
+ return true
+ case OpCom16:
+ return rewriteValuePPC64_OpCom16(v)
+ case OpCom32:
+ return rewriteValuePPC64_OpCom32(v)
+ case OpCom64:
+ return rewriteValuePPC64_OpCom64(v)
+ case OpCom8:
+ return rewriteValuePPC64_OpCom8(v)
+ case OpCondSelect:
+ return rewriteValuePPC64_OpCondSelect(v)
+ case OpConst16:
+ return rewriteValuePPC64_OpConst16(v)
+ case OpConst32:
+ return rewriteValuePPC64_OpConst32(v)
+ case OpConst32F:
+ v.Op = OpPPC64FMOVSconst
+ return true
+ case OpConst64:
+ return rewriteValuePPC64_OpConst64(v)
+ case OpConst64F:
+ v.Op = OpPPC64FMOVDconst
+ return true
+ case OpConst8:
+ return rewriteValuePPC64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValuePPC64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValuePPC64_OpConstNil(v)
+ case OpCopysign:
+ return rewriteValuePPC64_OpCopysign(v)
+ case OpCtz16:
+ return rewriteValuePPC64_OpCtz16(v)
+ case OpCtz32:
+ return rewriteValuePPC64_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz64:
+ return rewriteValuePPC64_OpCtz64(v)
+ case OpCtz64NonZero:
+ v.Op = OpCtz64
+ return true
+ case OpCtz8:
+ return rewriteValuePPC64_OpCtz8(v)
+ case OpCvt32Fto32:
+ return rewriteValuePPC64_OpCvt32Fto32(v)
+ case OpCvt32Fto64:
+ return rewriteValuePPC64_OpCvt32Fto64(v)
+ case OpCvt32Fto64F:
+ v.Op = OpCopy
+ return true
+ case OpCvt32to32F:
+ return rewriteValuePPC64_OpCvt32to32F(v)
+ case OpCvt32to64F:
+ return rewriteValuePPC64_OpCvt32to64F(v)
+ case OpCvt64Fto32:
+ return rewriteValuePPC64_OpCvt64Fto32(v)
+ case OpCvt64Fto32F:
+ v.Op = OpPPC64FRSP
+ return true
+ case OpCvt64Fto64:
+ return rewriteValuePPC64_OpCvt64Fto64(v)
+ case OpCvt64to32F:
+ return rewriteValuePPC64_OpCvt64to32F(v)
+ case OpCvt64to64F:
+ return rewriteValuePPC64_OpCvt64to64F(v)
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValuePPC64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValuePPC64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValuePPC64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpPPC64FDIVS
+ return true
+ case OpDiv32u:
+ v.Op = OpPPC64DIVWU
+ return true
+ case OpDiv64:
+ return rewriteValuePPC64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpPPC64FDIV
+ return true
+ case OpDiv64u:
+ v.Op = OpPPC64DIVDU
+ return true
+ case OpDiv8:
+ return rewriteValuePPC64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValuePPC64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValuePPC64_OpEq16(v)
+ case OpEq32:
+ return rewriteValuePPC64_OpEq32(v)
+ case OpEq32F:
+ return rewriteValuePPC64_OpEq32F(v)
+ case OpEq64:
+ return rewriteValuePPC64_OpEq64(v)
+ case OpEq64F:
+ return rewriteValuePPC64_OpEq64F(v)
+ case OpEq8:
+ return rewriteValuePPC64_OpEq8(v)
+ case OpEqB:
+ return rewriteValuePPC64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValuePPC64_OpEqPtr(v)
+ case OpFMA:
+ v.Op = OpPPC64FMADD
+ return true
+ case OpFloor:
+ v.Op = OpPPC64FFLOOR
+ return true
+ case OpGetCallerPC:
+ v.Op = OpPPC64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpPPC64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpPPC64LoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ v.Op = OpPPC64MULHW
+ return true
+ case OpHmul32u:
+ v.Op = OpPPC64MULHWU
+ return true
+ case OpHmul64:
+ v.Op = OpPPC64MULHD
+ return true
+ case OpHmul64u:
+ v.Op = OpPPC64MULHDU
+ return true
+ case OpInterCall:
+ v.Op = OpPPC64CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValuePPC64_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValuePPC64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValuePPC64_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValuePPC64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValuePPC64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValuePPC64_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValuePPC64_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValuePPC64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValuePPC64_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValuePPC64_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValuePPC64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValuePPC64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValuePPC64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValuePPC64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValuePPC64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValuePPC64_OpLess32(v)
+ case OpLess32F:
+ return rewriteValuePPC64_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValuePPC64_OpLess32U(v)
+ case OpLess64:
+ return rewriteValuePPC64_OpLess64(v)
+ case OpLess64F:
+ return rewriteValuePPC64_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValuePPC64_OpLess64U(v)
+ case OpLess8:
+ return rewriteValuePPC64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValuePPC64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValuePPC64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValuePPC64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValuePPC64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValuePPC64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValuePPC64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValuePPC64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValuePPC64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValuePPC64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValuePPC64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValuePPC64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValuePPC64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValuePPC64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValuePPC64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValuePPC64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValuePPC64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValuePPC64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValuePPC64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValuePPC64_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValuePPC64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValuePPC64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValuePPC64_OpMod32(v)
+ case OpMod32u:
+ return rewriteValuePPC64_OpMod32u(v)
+ case OpMod64:
+ return rewriteValuePPC64_OpMod64(v)
+ case OpMod64u:
+ return rewriteValuePPC64_OpMod64u(v)
+ case OpMod8:
+ return rewriteValuePPC64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValuePPC64_OpMod8u(v)
+ case OpMove:
+ return rewriteValuePPC64_OpMove(v)
+ case OpMul16:
+ v.Op = OpPPC64MULLW
+ return true
+ case OpMul32:
+ v.Op = OpPPC64MULLW
+ return true
+ case OpMul32F:
+ v.Op = OpPPC64FMULS
+ return true
+ case OpMul64:
+ v.Op = OpPPC64MULLD
+ return true
+ case OpMul64F:
+ v.Op = OpPPC64FMUL
+ return true
+ case OpMul64uhilo:
+ v.Op = OpPPC64LoweredMuluhilo
+ return true
+ case OpMul8:
+ v.Op = OpPPC64MULLW
+ return true
+ case OpNeg16:
+ v.Op = OpPPC64NEG
+ return true
+ case OpNeg32:
+ v.Op = OpPPC64NEG
+ return true
+ case OpNeg32F:
+ v.Op = OpPPC64FNEG
+ return true
+ case OpNeg64:
+ v.Op = OpPPC64NEG
+ return true
+ case OpNeg64F:
+ v.Op = OpPPC64FNEG
+ return true
+ case OpNeg8:
+ v.Op = OpPPC64NEG
+ return true
+ case OpNeq16:
+ return rewriteValuePPC64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValuePPC64_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValuePPC64_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValuePPC64_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValuePPC64_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValuePPC64_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpPPC64XOR
+ return true
+ case OpNeqPtr:
+ return rewriteValuePPC64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpPPC64LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValuePPC64_OpNot(v)
+ case OpOffPtr:
+ return rewriteValuePPC64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpPPC64OR
+ return true
+ case OpOr32:
+ v.Op = OpPPC64OR
+ return true
+ case OpOr64:
+ v.Op = OpPPC64OR
+ return true
+ case OpOr8:
+ v.Op = OpPPC64OR
+ return true
+ case OpOrB:
+ v.Op = OpPPC64OR
+ return true
+ case OpPPC64ADD:
+ return rewriteValuePPC64_OpPPC64ADD(v)
+ case OpPPC64ADDconst:
+ return rewriteValuePPC64_OpPPC64ADDconst(v)
+ case OpPPC64AND:
+ return rewriteValuePPC64_OpPPC64AND(v)
+ case OpPPC64ANDN:
+ return rewriteValuePPC64_OpPPC64ANDN(v)
+ case OpPPC64ANDconst:
+ return rewriteValuePPC64_OpPPC64ANDconst(v)
+ case OpPPC64CLRLSLDI:
+ return rewriteValuePPC64_OpPPC64CLRLSLDI(v)
+ case OpPPC64CMP:
+ return rewriteValuePPC64_OpPPC64CMP(v)
+ case OpPPC64CMPU:
+ return rewriteValuePPC64_OpPPC64CMPU(v)
+ case OpPPC64CMPUconst:
+ return rewriteValuePPC64_OpPPC64CMPUconst(v)
+ case OpPPC64CMPW:
+ return rewriteValuePPC64_OpPPC64CMPW(v)
+ case OpPPC64CMPWU:
+ return rewriteValuePPC64_OpPPC64CMPWU(v)
+ case OpPPC64CMPWUconst:
+ return rewriteValuePPC64_OpPPC64CMPWUconst(v)
+ case OpPPC64CMPWconst:
+ return rewriteValuePPC64_OpPPC64CMPWconst(v)
+ case OpPPC64CMPconst:
+ return rewriteValuePPC64_OpPPC64CMPconst(v)
+ case OpPPC64Equal:
+ return rewriteValuePPC64_OpPPC64Equal(v)
+ case OpPPC64FABS:
+ return rewriteValuePPC64_OpPPC64FABS(v)
+ case OpPPC64FADD:
+ return rewriteValuePPC64_OpPPC64FADD(v)
+ case OpPPC64FADDS:
+ return rewriteValuePPC64_OpPPC64FADDS(v)
+ case OpPPC64FCEIL:
+ return rewriteValuePPC64_OpPPC64FCEIL(v)
+ case OpPPC64FFLOOR:
+ return rewriteValuePPC64_OpPPC64FFLOOR(v)
+ case OpPPC64FGreaterEqual:
+ return rewriteValuePPC64_OpPPC64FGreaterEqual(v)
+ case OpPPC64FGreaterThan:
+ return rewriteValuePPC64_OpPPC64FGreaterThan(v)
+ case OpPPC64FLessEqual:
+ return rewriteValuePPC64_OpPPC64FLessEqual(v)
+ case OpPPC64FLessThan:
+ return rewriteValuePPC64_OpPPC64FLessThan(v)
+ case OpPPC64FMOVDload:
+ return rewriteValuePPC64_OpPPC64FMOVDload(v)
+ case OpPPC64FMOVDstore:
+ return rewriteValuePPC64_OpPPC64FMOVDstore(v)
+ case OpPPC64FMOVSload:
+ return rewriteValuePPC64_OpPPC64FMOVSload(v)
+ case OpPPC64FMOVSstore:
+ return rewriteValuePPC64_OpPPC64FMOVSstore(v)
+ case OpPPC64FNEG:
+ return rewriteValuePPC64_OpPPC64FNEG(v)
+ case OpPPC64FSQRT:
+ return rewriteValuePPC64_OpPPC64FSQRT(v)
+ case OpPPC64FSUB:
+ return rewriteValuePPC64_OpPPC64FSUB(v)
+ case OpPPC64FSUBS:
+ return rewriteValuePPC64_OpPPC64FSUBS(v)
+ case OpPPC64FTRUNC:
+ return rewriteValuePPC64_OpPPC64FTRUNC(v)
+ case OpPPC64GreaterEqual:
+ return rewriteValuePPC64_OpPPC64GreaterEqual(v)
+ case OpPPC64GreaterThan:
+ return rewriteValuePPC64_OpPPC64GreaterThan(v)
+ case OpPPC64ISEL:
+ return rewriteValuePPC64_OpPPC64ISEL(v)
+ case OpPPC64ISELB:
+ return rewriteValuePPC64_OpPPC64ISELB(v)
+ case OpPPC64LessEqual:
+ return rewriteValuePPC64_OpPPC64LessEqual(v)
+ case OpPPC64LessThan:
+ return rewriteValuePPC64_OpPPC64LessThan(v)
+ case OpPPC64MFVSRD:
+ return rewriteValuePPC64_OpPPC64MFVSRD(v)
+ case OpPPC64MOVBZload:
+ return rewriteValuePPC64_OpPPC64MOVBZload(v)
+ case OpPPC64MOVBZloadidx:
+ return rewriteValuePPC64_OpPPC64MOVBZloadidx(v)
+ case OpPPC64MOVBZreg:
+ return rewriteValuePPC64_OpPPC64MOVBZreg(v)
+ case OpPPC64MOVBreg:
+ return rewriteValuePPC64_OpPPC64MOVBreg(v)
+ case OpPPC64MOVBstore:
+ return rewriteValuePPC64_OpPPC64MOVBstore(v)
+ case OpPPC64MOVBstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVBstoreidx(v)
+ case OpPPC64MOVBstorezero:
+ return rewriteValuePPC64_OpPPC64MOVBstorezero(v)
+ case OpPPC64MOVDload:
+ return rewriteValuePPC64_OpPPC64MOVDload(v)
+ case OpPPC64MOVDloadidx:
+ return rewriteValuePPC64_OpPPC64MOVDloadidx(v)
+ case OpPPC64MOVDstore:
+ return rewriteValuePPC64_OpPPC64MOVDstore(v)
+ case OpPPC64MOVDstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVDstoreidx(v)
+ case OpPPC64MOVDstorezero:
+ return rewriteValuePPC64_OpPPC64MOVDstorezero(v)
+ case OpPPC64MOVHBRstore:
+ return rewriteValuePPC64_OpPPC64MOVHBRstore(v)
+ case OpPPC64MOVHZload:
+ return rewriteValuePPC64_OpPPC64MOVHZload(v)
+ case OpPPC64MOVHZloadidx:
+ return rewriteValuePPC64_OpPPC64MOVHZloadidx(v)
+ case OpPPC64MOVHZreg:
+ return rewriteValuePPC64_OpPPC64MOVHZreg(v)
+ case OpPPC64MOVHload:
+ return rewriteValuePPC64_OpPPC64MOVHload(v)
+ case OpPPC64MOVHloadidx:
+ return rewriteValuePPC64_OpPPC64MOVHloadidx(v)
+ case OpPPC64MOVHreg:
+ return rewriteValuePPC64_OpPPC64MOVHreg(v)
+ case OpPPC64MOVHstore:
+ return rewriteValuePPC64_OpPPC64MOVHstore(v)
+ case OpPPC64MOVHstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVHstoreidx(v)
+ case OpPPC64MOVHstorezero:
+ return rewriteValuePPC64_OpPPC64MOVHstorezero(v)
+ case OpPPC64MOVWBRstore:
+ return rewriteValuePPC64_OpPPC64MOVWBRstore(v)
+ case OpPPC64MOVWZload:
+ return rewriteValuePPC64_OpPPC64MOVWZload(v)
+ case OpPPC64MOVWZloadidx:
+ return rewriteValuePPC64_OpPPC64MOVWZloadidx(v)
+ case OpPPC64MOVWZreg:
+ return rewriteValuePPC64_OpPPC64MOVWZreg(v)
+ case OpPPC64MOVWload:
+ return rewriteValuePPC64_OpPPC64MOVWload(v)
+ case OpPPC64MOVWloadidx:
+ return rewriteValuePPC64_OpPPC64MOVWloadidx(v)
+ case OpPPC64MOVWreg:
+ return rewriteValuePPC64_OpPPC64MOVWreg(v)
+ case OpPPC64MOVWstore:
+ return rewriteValuePPC64_OpPPC64MOVWstore(v)
+ case OpPPC64MOVWstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVWstoreidx(v)
+ case OpPPC64MOVWstorezero:
+ return rewriteValuePPC64_OpPPC64MOVWstorezero(v)
+ case OpPPC64MTVSRD:
+ return rewriteValuePPC64_OpPPC64MTVSRD(v)
+ case OpPPC64MULLD:
+ return rewriteValuePPC64_OpPPC64MULLD(v)
+ case OpPPC64MULLW:
+ return rewriteValuePPC64_OpPPC64MULLW(v)
+ case OpPPC64NEG:
+ return rewriteValuePPC64_OpPPC64NEG(v)
+ case OpPPC64NOR:
+ return rewriteValuePPC64_OpPPC64NOR(v)
+ case OpPPC64NotEqual:
+ return rewriteValuePPC64_OpPPC64NotEqual(v)
+ case OpPPC64OR:
+ return rewriteValuePPC64_OpPPC64OR(v)
+ case OpPPC64ORN:
+ return rewriteValuePPC64_OpPPC64ORN(v)
+ case OpPPC64ORconst:
+ return rewriteValuePPC64_OpPPC64ORconst(v)
+ case OpPPC64ROTL:
+ return rewriteValuePPC64_OpPPC64ROTL(v)
+ case OpPPC64ROTLW:
+ return rewriteValuePPC64_OpPPC64ROTLW(v)
+ case OpPPC64ROTLWconst:
+ return rewriteValuePPC64_OpPPC64ROTLWconst(v)
+ case OpPPC64SLD:
+ return rewriteValuePPC64_OpPPC64SLD(v)
+ case OpPPC64SLDconst:
+ return rewriteValuePPC64_OpPPC64SLDconst(v)
+ case OpPPC64SLW:
+ return rewriteValuePPC64_OpPPC64SLW(v)
+ case OpPPC64SLWconst:
+ return rewriteValuePPC64_OpPPC64SLWconst(v)
+ case OpPPC64SRAD:
+ return rewriteValuePPC64_OpPPC64SRAD(v)
+ case OpPPC64SRAW:
+ return rewriteValuePPC64_OpPPC64SRAW(v)
+ case OpPPC64SRD:
+ return rewriteValuePPC64_OpPPC64SRD(v)
+ case OpPPC64SRW:
+ return rewriteValuePPC64_OpPPC64SRW(v)
+ case OpPPC64SRWconst:
+ return rewriteValuePPC64_OpPPC64SRWconst(v)
+ case OpPPC64SUB:
+ return rewriteValuePPC64_OpPPC64SUB(v)
+ case OpPPC64SUBFCconst:
+ return rewriteValuePPC64_OpPPC64SUBFCconst(v)
+ case OpPPC64XOR:
+ return rewriteValuePPC64_OpPPC64XOR(v)
+ case OpPPC64XORconst:
+ return rewriteValuePPC64_OpPPC64XORconst(v)
+ case OpPanicBounds:
+ return rewriteValuePPC64_OpPanicBounds(v)
+ case OpPopCount16:
+ return rewriteValuePPC64_OpPopCount16(v)
+ case OpPopCount32:
+ return rewriteValuePPC64_OpPopCount32(v)
+ case OpPopCount64:
+ v.Op = OpPPC64POPCNTD
+ return true
+ case OpPopCount8:
+ return rewriteValuePPC64_OpPopCount8(v)
+ case OpRotateLeft16:
+ return rewriteValuePPC64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValuePPC64_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValuePPC64_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValuePPC64_OpRotateLeft8(v)
+ case OpRound:
+ v.Op = OpPPC64FROUND
+ return true
+ case OpRound32F:
+ v.Op = OpPPC64LoweredRound32F
+ return true
+ case OpRound64F:
+ v.Op = OpPPC64LoweredRound64F
+ return true
+ case OpRsh16Ux16:
+ return rewriteValuePPC64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValuePPC64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValuePPC64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValuePPC64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValuePPC64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValuePPC64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValuePPC64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValuePPC64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValuePPC64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValuePPC64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValuePPC64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValuePPC64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValuePPC64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValuePPC64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValuePPC64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValuePPC64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValuePPC64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValuePPC64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValuePPC64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValuePPC64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValuePPC64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValuePPC64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValuePPC64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValuePPC64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValuePPC64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValuePPC64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValuePPC64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValuePPC64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValuePPC64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValuePPC64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValuePPC64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValuePPC64_OpRsh8x8(v)
+ case OpSignExt16to32:
+ v.Op = OpPPC64MOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpPPC64MOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpPPC64MOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpPPC64MOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpPPC64MOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpPPC64MOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValuePPC64_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpPPC64FSQRT
+ return true
+ case OpStaticCall:
+ v.Op = OpPPC64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValuePPC64_OpStore(v)
+ case OpSub16:
+ v.Op = OpPPC64SUB
+ return true
+ case OpSub32:
+ v.Op = OpPPC64SUB
+ return true
+ case OpSub32F:
+ v.Op = OpPPC64FSUBS
+ return true
+ case OpSub64:
+ v.Op = OpPPC64SUB
+ return true
+ case OpSub64F:
+ v.Op = OpPPC64FSUB
+ return true
+ case OpSub8:
+ v.Op = OpPPC64SUB
+ return true
+ case OpSubPtr:
+ v.Op = OpPPC64SUB
+ return true
+ case OpTrunc:
+ v.Op = OpPPC64FTRUNC
+ return true
+ case OpTrunc16to8:
+ return rewriteValuePPC64_OpTrunc16to8(v)
+ case OpTrunc32to16:
+ return rewriteValuePPC64_OpTrunc32to16(v)
+ case OpTrunc32to8:
+ return rewriteValuePPC64_OpTrunc32to8(v)
+ case OpTrunc64to16:
+ return rewriteValuePPC64_OpTrunc64to16(v)
+ case OpTrunc64to32:
+ return rewriteValuePPC64_OpTrunc64to32(v)
+ case OpTrunc64to8:
+ return rewriteValuePPC64_OpTrunc64to8(v)
+ case OpWB:
+ v.Op = OpPPC64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpPPC64XOR
+ return true
+ case OpXor32:
+ v.Op = OpPPC64XOR
+ return true
+ case OpXor64:
+ v.Op = OpPPC64XOR
+ return true
+ case OpXor8:
+ v.Op = OpPPC64XOR
+ return true
+ case OpZero:
+ return rewriteValuePPC64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpPPC64MOVHZreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpPPC64MOVHZreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpPPC64MOVWZreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpPPC64MOVBZreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpPPC64MOVBZreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpPPC64MOVBZreg
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVDaddr {sym} [0] base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpPPC64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap32 ptr old new_ mem)
+ // result: (LoweredAtomicCas32 [1] ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpPPC64LoweredAtomicCas32)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicCompareAndSwap64(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap64 ptr old new_ mem)
+ // result: (LoweredAtomicCas64 [1] ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpPPC64LoweredAtomicCas64)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicCompareAndSwapRel32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwapRel32 ptr old new_ mem)
+ // result: (LoweredAtomicCas32 [0] ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpPPC64LoweredAtomicCas32)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoad32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad32 ptr mem)
+ // result: (LoweredAtomicLoad32 [1] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad32)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoad64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad64 ptr mem)
+ // result: (LoweredAtomicLoad64 [1] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad64)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoad8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad8 ptr mem)
+ // result: (LoweredAtomicLoad8 [1] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad8)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoadAcq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadAcq32 ptr mem)
+ // result: (LoweredAtomicLoad32 [0] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad32)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoadAcq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadAcq64 ptr mem)
+ // result: (LoweredAtomicLoad64 [0] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad64)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoadPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadPtr ptr mem)
+ // result: (LoweredAtomicLoadPtr [1] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoadPtr)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStore32 ptr val mem)
+ // result: (LoweredAtomicStore32 [1] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore32)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStore64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStore64 ptr val mem)
+ // result: (LoweredAtomicStore64 [1] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore64)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStore8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStore8 ptr val mem)
+ // result: (LoweredAtomicStore8 [1] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore8)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStoreRel32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStoreRel32 ptr val mem)
+ // result: (LoweredAtomicStore32 [0] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore32)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStoreRel64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStoreRel64 ptr val mem)
+ // result: (LoweredAtomicStore64 [0] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore64)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ADD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRDconst, t)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpPPC64SUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 x)
+ // result: (SUBFCconst [32] (CNTLZW <typ.Int> x))
+ for {
+ x := v_0
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CNTLZW, typ.Int)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (SUBFCconst [64] (CNTLZD <typ.Int> x))
+ for {
+ x := v_0
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(64)
+ v0 := b.NewValue0(v.Pos, OpPPC64CNTLZD, typ.Int)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com16 x)
+ // result: (NOR x x)
+ for {
+ x := v_0
+ v.reset(OpPPC64NOR)
+ v.AddArg2(x, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com32 x)
+ // result: (NOR x x)
+ for {
+ x := v_0
+ v.reset(OpPPC64NOR)
+ v.AddArg2(x, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com64 x)
+ // result: (NOR x x)
+ for {
+ x := v_0
+ v.reset(OpPPC64NOR)
+ v.AddArg2(x, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com8 x)
+ // result: (NOR x x)
+ for {
+ x := v_0
+ v.reset(OpPPC64NOR)
+ v.AddArg2(x, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCondSelect(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CondSelect x y bool)
+ // cond: flagArg(bool) != nil
+ // result: (ISEL [2] x y bool)
+ for {
+ x := v_0
+ y := v_1
+ bool := v_2
+ if !(flagArg(bool) != nil) {
+ break
+ }
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v.AddArg3(x, y, bool)
+ return true
+ }
+ // match: (CondSelect x y bool)
+ // cond: flagArg(bool) == nil
+ // result: (ISEL [2] x y (CMPWconst [0] bool))
+ for {
+ x := v_0
+ y := v_1
+ bool := v_2
+ if !(flagArg(bool) == nil) {
+ break
+ }
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(bool)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [b])
+ // result: (MOVDconst [b2i(b)])
+ for {
+ b := auxIntToBool(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(b))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCopysign(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Copysign x y)
+ // result: (FCPSGN y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FCPSGN)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 x)
+ // result: (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int16)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int16)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v2.AddArg(x)
+ v1.AddArg2(v2, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 x)
+ // cond: objabi.GOPPC64<=8
+ // result: (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
+ for {
+ x := v_0
+ if !(objabi.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64POPCNTW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v2.AddArg(x)
+ v1.AddArg2(v2, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz32 x)
+ // result: (CNTTZW (MOVWZreg x))
+ for {
+ x := v_0
+ v.reset(OpPPC64CNTTZW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64 x)
+ // cond: objabi.GOPPC64<=8
+ // result: (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
+ for {
+ x := v_0
+ if !(objabi.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64POPCNTD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz64 x)
+ // result: (CNTTZD x)
+ for {
+ x := v_0
+ v.reset(OpPPC64CNTTZD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 x)
+ // result: (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.UInt8)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.UInt8)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v2.AddArg(x)
+ v1.AddArg2(v2, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Fto32 x)
+ // result: (MFVSRD (FCTIWZ x))
+ for {
+ x := v_0
+ v.reset(OpPPC64MFVSRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Fto64 x)
+ // result: (MFVSRD (FCTIDZ x))
+ for {
+ x := v_0
+ v.reset(OpPPC64MFVSRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32to32F x)
+ // result: (FCFIDS (MTVSRD (SignExt32to64 x)))
+ for {
+ x := v_0
+ v.reset(OpPPC64FCFIDS)
+ v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32to64F x)
+ // result: (FCFID (MTVSRD (SignExt32to64 x)))
+ for {
+ x := v_0
+ v.reset(OpPPC64FCFID)
+ v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64Fto32 x)
+ // result: (MFVSRD (FCTIWZ x))
+ for {
+ x := v_0
+ v.reset(OpPPC64MFVSRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64Fto64 x)
+ // result: (MFVSRD (FCTIDZ x))
+ for {
+ x := v_0
+ v.reset(OpPPC64MFVSRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64to32F x)
+ // result: (FCFIDS (MTVSRD x))
+ for {
+ x := v_0
+ v.reset(OpPPC64FCFIDS)
+ v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64to64F x)
+ // result: (FCFID (MTVSRD x))
+ for {
+ x := v_0
+ v.reset(OpPPC64FCFID)
+ v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 [false] x y)
+ // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVWU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div32 [false] x y)
+ // result: (DIVW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 [false] x y)
+ // result: (DIVD x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVWU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // cond: isSigned(x.Type) && isSigned(y.Type)
+ // result: (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(isSigned(x.Type) && isSigned(y.Type)) {
+ continue
+ }
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 x y)
+ // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (Equal (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (Equal (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64 x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (Equal (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // cond: isSigned(x.Type) && isSigned(y.Type)
+ // result: (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(isSigned(x.Type) && isSigned(y.Type)) {
+ continue
+ }
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 x y)
+ // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (ANDconst [1] (EQV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (LessThan (CMPU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil ptr)
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v_0
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(0)
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (LessEqual (CMPU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (LessEqual (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (FLessEqual (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FLessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32U x y)
+ // result: (LessEqual (CMPWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64 x y)
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (FLessEqual (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FLessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64U x y)
+ // result: (LessEqual (CMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (LessThan (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (FLessThan (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FLessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32U x y)
+ // result: (LessThan (CMPWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64 x y)
+ // result: (LessThan (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (FLessThan (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FLessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64U x y)
+ // result: (LessThan (CMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && isSigned(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && !isSigned(t)
+ // result: (MOVWZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && isSigned(t)
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && !isSigned(t)
+ // result: (MOVHZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is8BitInt(t) && isSigned(t)
+ // result: (MOVBreg (MOVBZload ptr mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVBreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is8BitInt(t) && !isSigned(t)
+ // result: (MOVBZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpPPC64MOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x16 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 16
+ // result: (SLWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x32 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(16)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 16
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh16x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 16
+ // result: (SLWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x64 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(16)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x8 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x16 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 32
+ // result: (SLWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x32 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 32
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh32x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 32
+ // result: (SLWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x64 x (AND y (MOVDconst [31])))
+ // result: (SLW x (ANDconst <typ.Int32> [31] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64AND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32)
+ v0.AuxInt = int64ToAuxInt(31)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Lsh32x64 x (ANDconst <typ.Int32> [31] y))
+ // result: (SLW x (ANDconst <typ.Int32> [31] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.Int32 || auxIntToInt64(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32)
+ v0.AuxInt = int64ToAuxInt(31)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x64 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x8 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x16 x y)
+ // result: (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 64
+ // result: (SLDconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SLDconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x32 x y)
+ // result: (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 64
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 64
+ // result: (SLDconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SLDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x64 x (AND y (MOVDconst [63])))
+ // result: (SLD x (ANDconst <typ.Int64> [63] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64AND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SLD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Lsh64x64 x (ANDconst <typ.Int64> [63] y))
+ // result: (SLD x (ANDconst <typ.Int64> [63] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.Int64 || auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64SLD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // result: (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x8 x y)
+ // result: (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x16 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(8)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 8
+ // result: (SLWconst x [c&7])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c & 7)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x32 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(8)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 8
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh8x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 8
+ // result: (SLWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x64 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(8)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x8 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(8)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // cond: objabi.GOPPC64 >= 9
+ // result: (MODSW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(objabi.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64MODSW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Mod32 x y)
+ // cond: objabi.GOPPC64 <= 8
+ // result: (SUB x (MULLW y (DIVW x y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(objabi.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64SUB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVW, typ.Int32)
+ v1.AddArg2(x, y)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // cond: objabi.GOPPC64 >= 9
+ // result: (MODUW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(objabi.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64MODUW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Mod32u x y)
+ // cond: objabi.GOPPC64 <= 8
+ // result: (SUB x (MULLW y (DIVWU x y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(objabi.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64SUB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVWU, typ.Int32)
+ v1.AddArg2(x, y)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64 x y)
+ // cond: objabi.GOPPC64 >=9
+ // result: (MODSD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(objabi.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64MODSD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Mod64 x y)
+ // cond: objabi.GOPPC64 <=8
+ // result: (SUB x (MULLD y (DIVD x y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(objabi.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64SUB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVD, typ.Int64)
+ v1.AddArg2(x, y)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpMod64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64u x y)
+ // cond: objabi.GOPPC64 >= 9
+ // result: (MODUD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(objabi.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64MODUD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Mod64u x y)
+ // cond: objabi.GOPPC64 <= 8
+ // result: (SUB x (MULLD y (DIVDU x y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(objabi.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64SUB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVDU, typ.Int64)
+ v1.AddArg2(x, y)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVHstore dst (MOVHZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVWstore dst (MOVWZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, typ.Int64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVWstore [4] dst (MOVWZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVHload, typ.Int16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVBstore [6] dst (MOVBZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && objabi.GOPPC64 <= 8 && logLargeCopy(v, s)
+ // result: (LoweredMove [s] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && objabi.GOPPC64 <= 8 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpPPC64LoweredMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && s <= 64 && objabi.GOPPC64 >= 9
+ // result: (LoweredQuadMoveShort [s] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && s <= 64 && objabi.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64LoweredQuadMoveShort)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && objabi.GOPPC64 >= 9 && logLargeCopy(v, s)
+ // result: (LoweredQuadMove [s] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && objabi.GOPPC64 >= 9 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpPPC64LoweredQuadMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // cond: isSigned(x.Type) && isSigned(y.Type)
+ // result: (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(isSigned(x.Type) && isSigned(y.Type)) {
+ continue
+ }
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Neq16 x y)
+ // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (NotEqual (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (NotEqual (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64 x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (NotEqual (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // cond: isSigned(x.Type) && isSigned(y.Type)
+ // result: (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(isSigned(x.Type) && isSigned(y.Type)) {
+ continue
+ }
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Neq8 x y)
+ // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OffPtr [off] ptr)
+ // result: (ADD (MOVDconst <typ.Int64> [off]) ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpPPC64ADD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(off)
+ v.AddArg2(v0, ptr)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADD l:(MULLD x y) z)
+ // cond: objabi.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)
+ // result: (MADDLD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpPPC64MULLD {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ z := v_1
+ if !(objabi.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpPPC64MADDLD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLDconst x [c]) (SRDconst x [d]))
+ // cond: d == 64-c
+ // result: (ROTLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpPPC64SRDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 64-c) {
+ continue
+ }
+ v.reset(OpPPC64ROTLconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLWconst x [c]) (SRWconst x [d]))
+ // cond: d == 32-c
+ // result: (ROTLWconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLWconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(OpPPC64ROTLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))))
+ // result: (ROTL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLD {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRD {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 63 || y != v_1_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
+ // result: (ROTL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLD {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRD {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
+ // result: (ROTLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRW {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
+ // result: (ROTLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRW {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 31 || y != v_1_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (ADDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDaddr [d] {sym} x))
+ // cond: is32Bit(c+int64(d))
+ // result: (MOVDaddr [int32(c+int64(d))] {sym} x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDaddr {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(c + int64(d))) {
+ break
+ }
+ v.reset(OpPPC64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(c + int64(d)))
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] x:(SP))
+ // cond: is32Bit(c)
+ // result: (MOVDaddr [int32(c)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP || !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBFCconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (SUBFCconst [c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SUBFCconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND (MOVDconst [m]) (ROTLWconst [r] x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64ROTLWconst {
+ continue
+ }
+ r := auxIntToInt64(v_1.AuxInt)
+ x := v_1.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [m]) (ROTLW x r))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64ROTLW {
+ continue
+ }
+ r := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWNM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
+ v.AddArg2(x, r)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [m]) (SRWconst x [s]))
+ // cond: mergePPC64RShiftMask(m,s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ s := auxIntToInt64(v_1.AuxInt)
+ if !(mergePPC64RShiftMask(m, s, 32) == 0) {
+ continue
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [m]) (SRWconst x [s]))
+ // cond: mergePPC64AndSrwi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ s := auxIntToInt64(v_1.AuxInt)
+ x := v_1.Args[0]
+ if !(mergePPC64AndSrwi(m, s) != 0) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x (NOR y y))
+ // result: (ANDN x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64NOR {
+ continue
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ANDN)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MOVDconst [c]))
+ // cond: isU16Bit(c)
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU16Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [c]) y:(MOVWZreg _))
+ // cond: c&0xFFFFFFFF == 0xFFFFFFFF
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if y.Op != OpPPC64MOVWZreg || !(c&0xFFFFFFFF == 0xFFFFFFFF) {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x))
+ // result: (MOVWZreg x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0xFFFFFFFF {
+ continue
+ }
+ y := v_1
+ if y.Op != OpPPC64MOVWreg {
+ continue
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [c]) x:(MOVBZload _ _))
+ // result: (ANDconst [c&0xFF] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if x.Op != OpPPC64MOVBZload {
+ continue
+ }
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFF)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDN (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c&^d])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c &^ d)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [m] (ROTLWconst [r] x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ROTLWconst {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [m] (ROTLW x r))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ROTLW {
+ break
+ }
+ r := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWNM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
+ v.AddArg2(x, r)
+ return true
+ }
+ // match: (ANDconst [m] (SRWconst x [s]))
+ // cond: mergePPC64RShiftMask(m,s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ if !(mergePPC64RShiftMask(m, s, 32) == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [m] (SRWconst x [s]))
+ // cond: mergePPC64AndSrwi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64AndSrwi(m, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [0] _)
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [c] y:(MOVBZreg _))
+ // cond: c&0xFF == 0xFF
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ANDconst [0xFF] y:(MOVBreg _))
+ // result: y
+ for {
+ if auxIntToInt64(v.AuxInt) != 0xFF {
+ break
+ }
+ y := v_0
+ if y.Op != OpPPC64MOVBreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ANDconst [c] y:(MOVHZreg _))
+ // cond: c&0xFFFF == 0xFFFF
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ANDconst [0xFFFF] y:(MOVHreg _))
+ // result: y
+ for {
+ if auxIntToInt64(v.AuxInt) != 0xFFFF {
+ break
+ }
+ y := v_0
+ if y.Op != OpPPC64MOVHreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ANDconst [c] (MOVBreg x))
+ // result: (ANDconst [c&0xFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVBZreg x))
+ // result: (ANDconst [c&0xFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVHreg x))
+ // result: (ANDconst [c&0xFFFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFFFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVHZreg x))
+ // result: (ANDconst [c&0xFFFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFFFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWreg x))
+ // result: (ANDconst [c&0xFFFFFFFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWZreg x))
+ // result: (ANDconst [c&0xFFFFFFFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CLRLSLDI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CLRLSLDI [c] (SRWconst [s] x))
+ // cond: mergePPC64ClrlsldiSrw(int64(c),s) != 0
+ // result: (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64ClrlsldiSrw(int64(c), s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64ClrlsldiSrw(int64(c), s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CLRLSLDI [c] i:(RLWINM [s] x))
+ // cond: mergePPC64ClrlsldiRlwinm(c,s) != 0
+ // result: (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ i := v_0
+ if i.Op != OpPPC64RLWINM {
+ break
+ }
+ s := auxIntToInt64(i.AuxInt)
+ x := i.Args[0]
+ if !(mergePPC64ClrlsldiRlwinm(c, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64ClrlsldiRlwinm(c, s))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMP x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (CMPconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVDconst [c]) y)
+ // cond: is16Bit(c)
+ // result: (InvertFlags (CMPconst y [c]))
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMP y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPU x (MOVDconst [c]))
+ // cond: isU16Bit(c)
+ // result: (CMPUconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64CMPUconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPU (MOVDconst [c]) y)
+ // cond: isU16Bit(c)
+ // result: (InvertFlags (CMPUconst y [c]))
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(isU16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPU x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPU y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)<uint64(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)>uint64(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVWreg y))
+ // result: (CMPW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64CMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW (MOVWreg x) y)
+ // result: (CMPW x y)
+ for {
+ if v_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpPPC64CMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (CMPWconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64CMPWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVDconst [c]) y)
+ // cond: is16Bit(c)
+ // result: (InvertFlags (CMPWconst y [int32(c)]))
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPWU x (MOVWZreg y))
+ // result: (CMPWU x y)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64CMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU (MOVWZreg x) y)
+ // result: (CMPWU x y)
+ for {
+ if v_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpPPC64CMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU x (MOVDconst [c]))
+ // cond: isU16Bit(c)
+ // result: (CMPWUconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64CMPWUconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWU (MOVDconst [c]) y)
+ // cond: isU16Bit(c)
+ // result: (InvertFlags (CMPWUconst y [int32(c)]))
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(isU16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPWU x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPWU y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)<uint32(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)>uint32(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)<int32(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) < int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)>int32(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) > int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x<y
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < y) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x>y
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > y) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64Equal(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Equal (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (Equal (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Equal (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Equal (InvertFlags x))
+ // result: (Equal x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64Equal)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Equal cmp)
+ // result: (ISELB [2] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FABS(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FABS (FMOVDconst [x]))
+ // result: (FMOVDconst [math.Abs(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Abs(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADD (FMUL x y) z)
+ // result: (FMADD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64FMUL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_1
+ v.reset(OpPPC64FMADD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FADDS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADDS (FMULS x y) z)
+ // result: (FMADDS x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64FMULS {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_1
+ v.reset(OpPPC64FMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FCEIL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FCEIL (FMOVDconst [x]))
+ // result: (FMOVDconst [math.Ceil(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Ceil(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FFLOOR(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FFLOOR (FMOVDconst [x]))
+ // result: (FMOVDconst [math.Floor(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Floor(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FGreaterEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FGreaterEqual cmp)
+ // result: (ISEL [2] (MOVDconst [1]) (ISELB [1] (MOVDconst [1]) cmp) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISELB, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(v0, cmp)
+ v.AddArg3(v0, v1, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FGreaterThan(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FGreaterThan cmp)
+ // result: (ISELB [1] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FLessEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FLessEqual cmp)
+ // result: (ISEL [2] (MOVDconst [1]) (ISELB [0] (MOVDconst [1]) cmp) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISELB, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg2(v0, cmp)
+ v.AddArg3(v0, v1, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FLessThan(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FLessThan cmp)
+ // result: (ISELB [0] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _))
+ // result: (MTVSRD x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpPPC64MTVSRD)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (FMOVDload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDstore [off] {sym} ptr (MTVSRD x) mem)
+ // result: (MOVDstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MTVSRD {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (FMOVSload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FNEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEG (FABS x))
+ // result: (FNABS x)
+ for {
+ if v_0.Op != OpPPC64FABS {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64FNABS)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FNEG (FNABS x))
+ // result: (FABS x)
+ for {
+ if v_0.Op != OpPPC64FNABS {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64FABS)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FSQRT(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FSQRT (FMOVDconst [x]))
+ // cond: x >= 0
+ // result: (FMOVDconst [math.Sqrt(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ if !(x >= 0) {
+ break
+ }
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Sqrt(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FSUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUB (FMUL x y) z)
+ // result: (FMSUB x y z)
+ for {
+ if v_0.Op != OpPPC64FMUL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_1
+ v.reset(OpPPC64FMSUB)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FSUBS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUBS (FMULS x y) z)
+ // result: (FMSUBS x y z)
+ for {
+ if v_0.Op != OpPPC64FMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_1
+ v.reset(OpPPC64FMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FTRUNC(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FTRUNC (FMOVDconst [x]))
+ // result: (FMOVDconst [math.Trunc(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Trunc(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64GreaterEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (GreaterEqual (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (GreaterEqual (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (GreaterEqual (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (GreaterEqual (InvertFlags x))
+ // result: (LessEqual x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64LessEqual)
+ v.AddArg(x)
+ return true
+ }
+ // match: (GreaterEqual cmp)
+ // result: (ISELB [4] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64GreaterThan(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (GreaterThan (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (GreaterThan (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (GreaterThan (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (GreaterThan (InvertFlags x))
+ // result: (LessThan x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64LessThan)
+ v.AddArg(x)
+ return true
+ }
+ // match: (GreaterThan cmp)
+ // result: (ISELB [1] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPU (ANDconst [d] y) (MOVDconst [c])))
+ // cond: c >= d
+ // result: (ANDconst [d] y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 || v_2.Op != OpPPC64CMPU {
+ break
+ }
+ _ = v_2.Args[1]
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64ANDconst || auxIntToInt64(v_2_0.AuxInt) != d || y != v_2_0.Args[0] {
+ break
+ }
+ v_2_1 := v_2.Args[1]
+ if v_2_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2_1.AuxInt)
+ if !(c >= d) {
+ break
+ }
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(d)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y)))
+ // cond: c >= d
+ // result: (ANDconst [d] y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 || v_2.Op != OpPPC64CMPUconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64ANDconst || auxIntToInt64(v_2_0.AuxInt) != d || y != v_2_0.Args[0] || !(c >= d) {
+ break
+ }
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(d)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ISEL [2] x _ (FlagEQ))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [2] _ y (FlagLT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [2] _ y (FlagGT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [6] _ y (FlagEQ))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [6] x _ (FlagLT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [6] x _ (FlagGT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [0] _ y (FlagEQ))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [0] _ y (FlagGT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [0] x _ (FlagLT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [5] _ x (FlagEQ))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_1
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [5] _ x (FlagLT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_1
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [5] y _ (FlagGT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ y := v_0
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [1] _ y (FlagEQ))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [1] _ y (FlagLT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [1] x _ (FlagGT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [4] x _ (FlagEQ))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [4] x _ (FlagGT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [4] _ y (FlagLT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [n] x y (InvertFlags bool))
+ // cond: n%4 == 0
+ // result: (ISEL [n+1] x y bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_2.Args[0]
+ if !(n%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(n + 1)
+ v.AddArg3(x, y, bool)
+ return true
+ }
+ // match: (ISEL [n] x y (InvertFlags bool))
+ // cond: n%4 == 1
+ // result: (ISEL [n-1] x y bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_2.Args[0]
+ if !(n%4 == 1) {
+ break
+ }
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(n - 1)
+ v.AddArg3(x, y, bool)
+ return true
+ }
+ // match: (ISEL [n] x y (InvertFlags bool))
+ // cond: n%4 == 2
+ // result: (ISEL [n] x y bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_2.Args[0]
+ if !(n%4 == 2) {
+ break
+ }
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(n)
+ v.AddArg3(x, y, bool)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ISELB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ISELB [0] _ (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_1.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [0] _ (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_1.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [0] _ (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_1.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [1] _ (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_1.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [1] _ (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_1.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [1] _ (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_1.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [2] _ (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_1.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [2] _ (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_1.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [2] _ (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_1.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [4] _ (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 || v_1.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [4] _ (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 || v_1.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [4] _ (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 || v_1.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [5] _ (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 || v_1.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [5] _ (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 || v_1.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [5] _ (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 || v_1.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [6] _ (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 || v_1.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [6] _ (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 || v_1.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [6] _ (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 || v_1.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool))
+ // cond: n%4 == 0
+ // result: (ISELB [n+1] (MOVDconst [1]) bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 || v_1.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_1.Args[0]
+ if !(n%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(n + 1)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, bool)
+ return true
+ }
+ // match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool))
+ // cond: n%4 == 1
+ // result: (ISELB [n-1] (MOVDconst [1]) bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 || v_1.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_1.Args[0]
+ if !(n%4 == 1) {
+ break
+ }
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(n - 1)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, bool)
+ return true
+ }
+ // match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool))
+ // cond: n%4 == 2
+ // result: (ISELB [n] (MOVDconst [1]) bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 || v_1.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_1.Args[0]
+ if !(n%4 == 2) {
+ break
+ }
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(n)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, bool)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64LessEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LessEqual (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (LessEqual (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (LessEqual (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (LessEqual (InvertFlags x))
+ // result: (GreaterEqual x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64GreaterEqual)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LessEqual cmp)
+ // result: (ISELB [5] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(5)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64LessThan(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LessThan (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (LessThan (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (LessThan (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (LessThan (InvertFlags x))
+ // result: (GreaterThan x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64GreaterThan)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LessThan cmp)
+ // result: (ISELB [0] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64MFVSRD(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MFVSRD (FMOVDconst [c]))
+ // result: (MOVDconst [int64(math.Float64bits(c))])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(math.Float64bits(c)))
+ return true
+ }
+ // match: (MFVSRD x:(FMOVDload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVDload [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64FMOVDload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpPPC64MOVDload, typ.Int64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBZload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVBZload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVBZload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVBZloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVBZloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBZloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBZloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVBZload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBZloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVBZload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBZreg y:(ANDconst [c] _))
+ // cond: uint64(c) <= 0xFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(y.AuxInt)
+ if !(uint64(c) <= 0xFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVBZreg (SRWconst [c] (MOVBZreg x)))
+ // result: (SRWconst [c] (MOVBZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVBZreg (SRWconst [c] x))
+ // cond: sizeof(x.Type) == 8
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) == 8) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (SRDconst [c] x))
+ // cond: c>=56
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 56) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (SRWconst [c] x))
+ // cond: c>=24
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 24) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg y:(MOVBZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVBZreg (MOVBreg x))
+ // result: (MOVBZreg x)
+ for {
+ if v_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (OR <t> x (MOVWZreg y)))
+ // result: (MOVBZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (XOR <t> x (MOVWZreg y)))
+ // result: (MOVBZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (AND <t> x (MOVWZreg y)))
+ // result: (MOVBZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (OR <t> x (MOVHZreg y)))
+ // result: (MOVBZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (XOR <t> x (MOVHZreg y)))
+ // result: (MOVBZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (AND <t> x (MOVHZreg y)))
+ // result: (MOVBZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (OR <t> x (MOVBZreg y)))
+ // result: (MOVBZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVBZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (XOR <t> x (MOVBZreg y)))
+ // result: (MOVBZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVBZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (AND <t> x (MOVBZreg y)))
+ // result: (MOVBZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVBZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg z:(ANDconst [c] (MOVBZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVBZreg z:(AND y (MOVBZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ v.copyOf(z)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg x:(MOVBZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg x:(MOVBZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg x:(Arg <t>))
+ // cond: is8BitInt(t) && !isSigned(t)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint8(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBreg y:(ANDconst [c] _))
+ // cond: uint64(c) <= 0x7F
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(y.AuxInt)
+ if !(uint64(c) <= 0x7F) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVBreg (SRAWconst [c] (MOVBreg x)))
+ // result: (SRAWconst [c] (MOVBreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVBreg (SRAWconst [c] x))
+ // cond: sizeof(x.Type) == 8
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) == 8) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRDconst [c] x))
+ // cond: c>56
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 56) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRDconst [c] x))
+ // cond: c==56
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 56) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRADconst [c] x))
+ // cond: c>=56
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRADconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 56) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRWconst [c] x))
+ // cond: c>24
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 24) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRWconst [c] x))
+ // cond: c==24
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 24) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRAWconst [c] x))
+ // cond: c>=24
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 24) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg y:(MOVBreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVBreg (MOVBZreg x))
+ // result: (MOVBreg x)
+ for {
+ if v_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(Arg <t>))
+ // cond: is8BitInt(t) && isSigned(t)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int8(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVBstore [off1+int32(off2)] {sym} x val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [0] {sym} p:(ADD ptr idx) val mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVBstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHZreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_1_0.Args[0]
+ mem := v_2
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHZreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_1_0.Args[0]
+ mem := v_2
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVWreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1_0.Args[0]
+ mem := v_2
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVWZreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1_0.Args[0]
+ mem := v_2
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i1] {s} p (SRWconst w [24]) x0:(MOVBstore [i0] {s} p (SRWconst w [16]) mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
+ // result: (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRWconst || auxIntToInt64(v_1.AuxInt) != 24 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpPPC64SRWconst || auxIntToInt64(x0_1.AuxInt) != 16 || w != x0_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpPPC64SRWconst, typ.UInt16)
+ v0.AuxInt = int64ToAuxInt(16)
+ v0.AddArg(w)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i1] {s} p (SRDconst w [24]) x0:(MOVBstore [i0] {s} p (SRDconst w [16]) mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
+ // result: (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRDconst || auxIntToInt64(v_1.AuxInt) != 24 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpPPC64SRDconst || auxIntToInt64(x0_1.AuxInt) != 16 || w != x0_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpPPC64SRWconst, typ.UInt16)
+ v0.AuxInt = int64ToAuxInt(16)
+ v0.AddArg(w)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i1] {s} p (SRWconst w [8]) x0:(MOVBstore [i0] {s} p w mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
+ // result: (MOVHstore [i0] {s} p w mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRWconst || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] || w != x0.Args[1] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i1] {s} p (SRDconst w [8]) x0:(MOVBstore [i0] {s} p w mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
+ // result: (MOVHstore [i0] {s} p w mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRDconst || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] || w != x0.Args[1] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i3] {s} p w x0:(MOVBstore [i2] {s} p (SRWconst w [8]) x1:(MOVBstore [i1] {s} p (SRWconst w [16]) x2:(MOVBstore [i0] {s} p (SRWconst w [24]) mem))))
+ // cond: !config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && clobber(x0, x1, x2)
+ // result: (MOVWBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+ for {
+ i3 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i2 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpPPC64SRWconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpPPC64MOVBstore {
+ break
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpPPC64SRWconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ mem := x2.Args[2]
+ if p != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpPPC64SRWconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWBRstore)
+ v0 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg(p)
+ v.AddArg3(v0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i1] {s} p w x0:(MOVBstore [i0] {s} p (SRWconst w [8]) mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
+ // result: (MOVHBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpPPC64SRWconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVHBRstore)
+ v0 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg(p)
+ v.AddArg3(v0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i7] {s} p (SRDconst w [56]) x0:(MOVBstore [i6] {s} p (SRDconst w [48]) x1:(MOVBstore [i5] {s} p (SRDconst w [40]) x2:(MOVBstore [i4] {s} p (SRDconst w [32]) x3:(MOVWstore [i0] {s} p w mem)))))
+ // cond: !config.BigEndian && i0%4 == 0 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3)
+ // result: (MOVDstore [i0] {s} p w mem)
+ for {
+ i7 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRDconst || auxIntToInt64(v_1.AuxInt) != 56 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i6 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpPPC64SRDconst || auxIntToInt64(x0_1.AuxInt) != 48 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpPPC64MOVBstore {
+ break
+ }
+ i5 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpPPC64SRDconst || auxIntToInt64(x1_1.AuxInt) != 40 || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpPPC64MOVBstore {
+ break
+ }
+ i4 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if p != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpPPC64SRDconst || auxIntToInt64(x2_1.AuxInt) != 32 || w != x2_1.Args[0] {
+ break
+ }
+ x3 := x2.Args[2]
+ if x3.Op != OpPPC64MOVWstore {
+ break
+ }
+ i0 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ break
+ }
+ mem := x3.Args[2]
+ if p != x3.Args[0] || w != x3.Args[1] || !(!config.BigEndian && i0%4 == 0 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i7] {s} p w x0:(MOVBstore [i6] {s} p (SRDconst w [8]) x1:(MOVBstore [i5] {s} p (SRDconst w [16]) x2:(MOVBstore [i4] {s} p (SRDconst w [24]) x3:(MOVBstore [i3] {s} p (SRDconst w [32]) x4:(MOVBstore [i2] {s} p (SRDconst w [40]) x5:(MOVBstore [i1] {s} p (SRDconst w [48]) x6:(MOVBstore [i0] {s} p (SRDconst w [56]) mem))))))))
+ // cond: !config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3, x4, x5, x6)
+ // result: (MOVDBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+ for {
+ i7 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i6 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpPPC64SRDconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpPPC64MOVBstore {
+ break
+ }
+ i5 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpPPC64SRDconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpPPC64MOVBstore {
+ break
+ }
+ i4 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if p != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpPPC64SRDconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] {
+ break
+ }
+ x3 := x2.Args[2]
+ if x3.Op != OpPPC64MOVBstore {
+ break
+ }
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[2]
+ if p != x3.Args[0] {
+ break
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpPPC64SRDconst || auxIntToInt64(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
+ break
+ }
+ x4 := x3.Args[2]
+ if x4.Op != OpPPC64MOVBstore {
+ break
+ }
+ i2 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[2]
+ if p != x4.Args[0] {
+ break
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpPPC64SRDconst || auxIntToInt64(x4_1.AuxInt) != 40 || w != x4_1.Args[0] {
+ break
+ }
+ x5 := x4.Args[2]
+ if x5.Op != OpPPC64MOVBstore {
+ break
+ }
+ i1 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
+ break
+ }
+ _ = x5.Args[2]
+ if p != x5.Args[0] {
+ break
+ }
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpPPC64SRDconst || auxIntToInt64(x5_1.AuxInt) != 48 || w != x5_1.Args[0] {
+ break
+ }
+ x6 := x5.Args[2]
+ if x6.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
+ break
+ }
+ mem := x6.Args[2]
+ if p != x6.Args[0] {
+ break
+ }
+ x6_1 := x6.Args[1]
+ if x6_1.Op != OpPPC64SRDconst || auxIntToInt64(x6_1.AuxInt) != 56 || w != x6_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ break
+ }
+ v.reset(OpPPC64MOVDBRstore)
+ v0 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg(p)
+ v.AddArg3(v0, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is16Bit(c)
+ // result: (MOVBstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: is16Bit(c)
+ // result: (MOVBstore [int32(c)] ptr val mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVBreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVBZreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVHreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVHZreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVWZreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (SRWconst (MOVHreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v_3
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (SRWconst (MOVHZreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v_3
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (SRWconst (MOVWreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v_3
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (SRWconst (MOVWZreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v_3
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
+ // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ x := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _))
+ // result: (MFVSRD x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64FMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpPPC64MFVSRD)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0
+ // result: (MOVDload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVDload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVDloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVDloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c) && c%4 == 0
+ // result: (MOVDload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) && c%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c) && c%4 == 0
+ // result: (MOVDload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c) && c%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off] {sym} ptr (MFVSRD x) mem)
+ // result: (FMOVDstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MFVSRD {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0
+ // result: (MOVDstore [off1+int32(off2)] {sym} x val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstore [0] {sym} p:(ADD ptr idx) val mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVDstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVDstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is16Bit(c) && c%4 == 0
+ // result: (MOVDstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c) && c%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: is16Bit(c) && c%4 == 0
+ // result: (MOVDstore [int32(c)] ptr val mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c) && c%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0
+ // result: (MOVDstorezero [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
+ // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
+ // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ x := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHBRstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHBRstore {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHBRstore {sym} ptr x mem)
+ for {
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHBRstore)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHBRstore {sym} ptr (MOVHZreg x) mem)
+ // result: (MOVHBRstore {sym} ptr x mem)
+ for {
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHBRstore)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHBRstore {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHBRstore {sym} ptr x mem)
+ for {
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHBRstore)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHBRstore {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVHBRstore {sym} ptr x mem)
+ for {
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHBRstore)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHZload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVHZload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVHZload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVHZloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHZloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHZloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHZloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVHZload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHZloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVHZload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHZreg y:(ANDconst [c] _))
+ // cond: uint64(c) <= 0xFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(y.AuxInt)
+ if !(uint64(c) <= 0xFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] (MOVBZreg x)))
+ // result: (SRWconst [c] (MOVBZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] (MOVHZreg x)))
+ // result: (SRWconst [c] (MOVHZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] x))
+ // cond: sizeof(x.Type) <= 16
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (SRDconst [c] x))
+ // cond: c>=48
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 48) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] x))
+ // cond: c>=16
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg y:(MOVHZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHZreg y:(MOVBZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHZreg y:(MOVHBRload _ _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHBRload {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHZreg y:(MOVHreg x))
+ // result: (MOVHZreg x)
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (OR <t> x (MOVWZreg y)))
+ // result: (MOVHZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (XOR <t> x (MOVWZreg y)))
+ // result: (MOVHZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (AND <t> x (MOVWZreg y)))
+ // result: (MOVHZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (OR <t> x (MOVHZreg y)))
+ // result: (MOVHZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (XOR <t> x (MOVHZreg y)))
+ // result: (MOVHZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (AND <t> x (MOVHZreg y)))
+ // result: (MOVHZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg z:(ANDconst [c] (MOVBZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVHZreg z:(ANDconst [c] (MOVHZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64MOVHZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVHZreg z:(AND y (MOVHZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_1.Op != OpPPC64MOVHZload {
+ continue
+ }
+ v.copyOf(z)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg x:(MOVBZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVBZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVHZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVHZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t)) && !isSigned(t)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t)) && !isSigned(t)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVHload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVHload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVHloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVHload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVHload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHreg y:(ANDconst [c] _))
+ // cond: uint64(c) <= 0x7FFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(y.AuxInt)
+ if !(uint64(c) <= 0x7FFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] (MOVBreg x)))
+ // result: (SRAWconst [c] (MOVBreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] (MOVHreg x)))
+ // result: (SRAWconst [c] (MOVHreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] x))
+ // cond: sizeof(x.Type) <= 16
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRDconst [c] x))
+ // cond: c>48
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 48) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRDconst [c] x))
+ // cond: c==48
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 48) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRADconst [c] x))
+ // cond: c>=48
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRADconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 48) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRWconst [c] x))
+ // cond: c>16
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] x))
+ // cond: c>=16
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRWconst [c] x))
+ // cond: c==16
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg y:(MOVHreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHreg y:(MOVBreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHreg y:(MOVHZreg x))
+ // result: (MOVHreg x)
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t)) && isSigned(t)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t)) && isSigned(t)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVHstore [off1+int32(off2)] {sym} x val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [0] {sym} p:(ADD ptr idx) val mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVHstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [i1] {s} p (SRWconst w [16]) x0:(MOVHstore [i0] {s} p w mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)
+ // result: (MOVWstore [i0] {s} p w mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRWconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVHstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] || w != x0.Args[1] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVHstore [i1] {s} p (SRDconst w [16]) x0:(MOVHstore [i0] {s} p w mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)
+ // result: (MOVWstore [i0] {s} p w mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRDconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVHstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] || w != x0.Args[1] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is16Bit(c)
+ // result: (MOVHstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: is16Bit(c)
+ // result: (MOVHstore [int32(c)] ptr val mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVHreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVHZreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVWZreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
+ // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ x := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWBRstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWBRstore {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWBRstore {sym} ptr x mem)
+ for {
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVWBRstore)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWBRstore {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVWBRstore {sym} ptr x mem)
+ for {
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVWBRstore)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVWZload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVWZload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVWZloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWZloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWZloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWZloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVWZload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWZloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVWZload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVWZreg y:(ANDconst [c] _))
+ // cond: uint64(c) <= 0xFFFFFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(y.AuxInt)
+ if !(uint64(c) <= 0xFFFFFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(AND (MOVDconst [c]) _))
+ // cond: uint64(c) <= 0xFFFFFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64AND {
+ break
+ }
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ if y_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(y_0.AuxInt)
+ if !(uint64(c) <= 0xFFFFFFFF) {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg (SRWconst [c] (MOVBZreg x)))
+ // result: (SRWconst [c] (MOVBZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWZreg (SRWconst [c] (MOVHZreg x)))
+ // result: (SRWconst [c] (MOVHZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWZreg (SRWconst [c] (MOVWZreg x)))
+ // result: (SRWconst [c] (MOVWZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWZreg (SRWconst [c] x))
+ // cond: sizeof(x.Type) <= 32
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 32) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg (SRDconst [c] x))
+ // cond: c>=32
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 32) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVWZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVHZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVBZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVHBRload _ _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHBRload {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVWBRload _ _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWBRload {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVWreg x))
+ // result: (MOVWZreg x)
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg (OR <t> x (MOVWZreg y)))
+ // result: (MOVWZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg (XOR <t> x (MOVWZreg y)))
+ // result: (MOVWZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg (AND <t> x (MOVWZreg y)))
+ // result: (MOVWZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg z:(ANDconst [c] (MOVBZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVWZreg z:(ANDconst [c] (MOVHZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64MOVHZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVWZreg z:(ANDconst [c] (MOVWZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64MOVWZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVWZreg z:(AND y (MOVWZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_1.Op != OpPPC64MOVWZload {
+ continue
+ }
+ v.copyOf(z)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg x:(MOVBZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVBZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVHZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVHZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0
+ // result: (MOVWload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1)+off2) && (int64(off1)+off2)%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVWloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c) && c%4 == 0
+ // result: (MOVWload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) && c%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c) && c%4 == 0
+ // result: (MOVWload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c) && c%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVWreg y:(ANDconst [c] _))
+ // cond: uint64(c) <= 0xFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(y.AuxInt)
+ if !(uint64(c) <= 0xFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWreg y:(AND (MOVDconst [c]) _))
+ // cond: uint64(c) <= 0x7FFFFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64AND {
+ break
+ }
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ if y_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(y_0.AuxInt)
+ if !(uint64(c) <= 0x7FFFFFFF) {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (MOVWreg (SRAWconst [c] (MOVBreg x)))
+ // result: (SRAWconst [c] (MOVBreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWreg (SRAWconst [c] (MOVHreg x)))
+ // result: (SRAWconst [c] (MOVHreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWreg (SRAWconst [c] (MOVWreg x)))
+ // result: (SRAWconst [c] (MOVWreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWreg (SRAWconst [c] x))
+ // cond: sizeof(x.Type) <= 32
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 32) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SRDconst [c] x))
+ // cond: c>32
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 32) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SRADconst [c] x))
+ // cond: c>=32
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRADconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 32) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SRDconst [c] x))
+ // cond: c==32
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 32) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg y:(MOVWreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWreg y:(MOVHreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWreg y:(MOVBreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWreg y:(MOVWZreg x))
+ // result: (MOVWreg x)
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVWstore [off1+int32(off2)] {sym} x val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} p:(ADD ptr idx) val mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVWstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is16Bit(c)
+ // result: (MOVWstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: is16Bit(c)
+ // result: (MOVWstore [int32(c)] ptr val mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVWstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVWstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVWZreg x) mem)
+ // result: (MOVWstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVWstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
+ // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ x := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MTVSRD(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MTVSRD (MOVDconst [c]))
+ // cond: !math.IsNaN(math.Float64frombits(uint64(c)))
+ // result: (FMOVDconst [math.Float64frombits(uint64(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(!math.IsNaN(math.Float64frombits(uint64(c)))) {
+ break
+ }
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(c)))
+ return true
+ }
+ // match: (MTVSRD x:(MOVDload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (FMOVDload [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVDload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpPPC64FMOVDload, typ.Float64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MULLD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLD x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (MULLDconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64MULLDconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MULLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLW x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (MULLWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64MULLWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64NEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEG (ADDconst [c] x))
+ // cond: is32Bit(-c)
+ // result: (SUBFCconst [-c] x)
+ for {
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c)) {
+ break
+ }
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (NEG (SUBFCconst [c] x))
+ // cond: is32Bit(-c)
+ // result: (ADDconst [-c] x)
+ for {
+ if v_0.Op != OpPPC64SUBFCconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c)) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64NOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NOR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [^(c|d)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^(c | d))
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64NotEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NotEqual (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (NotEqual (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (NotEqual (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (NotEqual (InvertFlags x))
+ // result: (NotEqual x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64NotEqual)
+ v.AddArg(x)
+ return true
+ }
+ // match: (NotEqual cmp)
+ // result: (ISELB [6] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: ( OR (SLDconst x [c]) (SRDconst x [d]))
+ // cond: d == 64-c
+ // result: (ROTLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpPPC64SRDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 64-c) {
+ continue
+ }
+ v.reset(OpPPC64ROTLconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: ( OR (SLWconst x [c]) (SRWconst x [d]))
+ // cond: d == 32-c
+ // result: (ROTLWconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLWconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(OpPPC64ROTLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))))
+ // result: (ROTL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLD {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRD {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 63 || y != v_1_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
+ // result: (ROTL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLD {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRD {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
+ // result: (ROTLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRW {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
+ // result: (ROTLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRW {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 31 || y != v_1_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (OR x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> x0:(MOVBZload [i0] {s} p mem) o1:(SLWconst x1:(MOVBZload [i1] {s} p mem) [8]))
+ // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)
+ // result: @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o1 := v_1
+ if o1.Op != OpPPC64SLWconst || auxIntToInt64(o1.AuxInt) != 8 {
+ continue
+ }
+ x1 := o1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> x0:(MOVBZload [i0] {s} p mem) o1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))
+ // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)
+ // result: @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o1 := v_1
+ if o1.Op != OpPPC64SLDconst || auxIntToInt64(o1.AuxInt) != 8 {
+ continue
+ }
+ x1 := o1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> x0:(MOVBZload [i1] {s} p mem) o1:(SLWconst x1:(MOVBZload [i0] {s} p mem) [8]))
+ // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)
+ // result: @mergePoint(b,x0,x1) (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o1 := v_1
+ if o1.Op != OpPPC64SLWconst || auxIntToInt64(o1.AuxInt) != 8 {
+ continue
+ }
+ x1 := o1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> x0:(MOVBZload [i1] {s} p mem) o1:(SLDconst x1:(MOVBZload [i0] {s} p mem) [8]))
+ // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)
+ // result: @mergePoint(b,x0,x1) (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o1 := v_1
+ if o1.Op != OpPPC64SLDconst || auxIntToInt64(o1.AuxInt) != 8 {
+ continue
+ }
+ x1 := o1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> s0:(SLWconst x0:(MOVBZload [i1] {s} p mem) [n1]) s1:(SLWconst x1:(MOVBZload [i0] {s} p mem) [n2]))
+ // cond: !config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0, s1)
+ // result: @mergePoint(b,x0,x1) (SLDconst <t> (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [n1])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpPPC64SLWconst {
+ continue
+ }
+ n1 := auxIntToInt64(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ s1 := v_1
+ if s1.Op != OpPPC64SLWconst {
+ continue
+ }
+ n2 := auxIntToInt64(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0, s1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t)
+ v.copyOf(v0)
+ v0.AuxInt = int64ToAuxInt(n1)
+ v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t)
+ v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> s0:(SLDconst x0:(MOVBZload [i1] {s} p mem) [n1]) s1:(SLDconst x1:(MOVBZload [i0] {s} p mem) [n2]))
+ // cond: !config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0, s1)
+ // result: @mergePoint(b,x0,x1) (SLDconst <t> (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [n1])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpPPC64SLDconst {
+ continue
+ }
+ n1 := auxIntToInt64(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ s1 := v_1
+ if s1.Op != OpPPC64SLDconst {
+ continue
+ }
+ n2 := auxIntToInt64(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0, s1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t)
+ v.copyOf(v0)
+ v0.AuxInt = int64ToAuxInt(n1)
+ v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t)
+ v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> s1:(SLWconst x2:(MOVBZload [i3] {s} p mem) [24]) o0:(OR <t> s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [16]) x0:(MOVHZload [i0] {s} p mem)))
+ // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpPPC64SLWconst || auxIntToInt64(s1.AuxInt) != 24 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i3 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
+ mem := x2.Args[1]
+ p := x2.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s0 := o0_0
+ if s0.Op != OpPPC64SLWconst || auxIntToInt64(s0.AuxInt) != 16 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ x0 := o0_1
+ if x0.Op != OpPPC64MOVHZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> s1:(SLDconst x2:(MOVBZload [i3] {s} p mem) [24]) o0:(OR <t> s0:(SLDconst x1:(MOVBZload [i2] {s} p mem) [16]) x0:(MOVHZload [i0] {s} p mem)))
+ // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 24 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i3 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
+ mem := x2.Args[1]
+ p := x2.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s0 := o0_0
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 16 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ x0 := o0_1
+ if x0.Op != OpPPC64MOVHZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> s1:(SLWconst x2:(MOVBZload [i0] {s} p mem) [24]) o0:(OR <t> s0:(SLWconst x1:(MOVBZload [i1] {s} p mem) [16]) x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem)))
+ // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpPPC64SLWconst || auxIntToInt64(s1.AuxInt) != 24 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
+ mem := x2.Args[1]
+ p := x2.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s0 := o0_0
+ if s0.Op != OpPPC64SLWconst || auxIntToInt64(s0.AuxInt) != 16 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ x0 := o0_1
+ if x0.Op != OpPPC64MOVHBRload || x0.Type != t {
+ continue
+ }
+ _ = x0.Args[1]
+ x0_0 := x0.Args[0]
+ if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr {
+ continue
+ }
+ i2 := auxIntToInt32(x0_0.AuxInt)
+ if auxToSym(x0_0.Aux) != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> s1:(SLDconst x2:(MOVBZload [i0] {s} p mem) [24]) o0:(OR <t> s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [16]) x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem)))
+ // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 24 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
+ mem := x2.Args[1]
+ p := x2.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s0 := o0_0
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 16 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ x0 := o0_1
+ if x0.Op != OpPPC64MOVHBRload || x0.Type != t {
+ continue
+ }
+ _ = x0.Args[1]
+ x0_0 := x0.Args[0]
+ if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr {
+ continue
+ }
+ i2 := auxIntToInt32(x0_0.AuxInt)
+ if auxToSym(x0_0.Aux) != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> x0:(MOVBZload [i3] {s} p mem) o0:(OR <t> s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [8]) s1:(SLWconst x2:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [16])))
+ // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i3 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s0 := o0_0
+ if s0.Op != OpPPC64SLWconst || auxIntToInt64(s0.AuxInt) != 8 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ s1 := o0_1
+ if s1.Op != OpPPC64SLWconst || auxIntToInt64(s1.AuxInt) != 16 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpPPC64MOVHBRload || x2.Type != t {
+ continue
+ }
+ _ = x2.Args[1]
+ x2_0 := x2.Args[0]
+ if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr {
+ continue
+ }
+ i0 := auxIntToInt32(x2_0.AuxInt)
+ if auxToSym(x2_0.Aux) != s || p != x2_0.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> x0:(MOVBZload [i3] {s} p mem) o0:(OR <t> s0:(SLDconst x1:(MOVBZload [i2] {s} p mem) [8]) s1:(SLDconst x2:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [16])))
+ // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i3 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s0 := o0_0
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 8 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ s1 := o0_1
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 16 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpPPC64MOVHBRload || x2.Type != t {
+ continue
+ }
+ _ = x2.Args[1]
+ x2_0 := x2.Args[0]
+ if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr {
+ continue
+ }
+ i0 := auxIntToInt32(x2_0.AuxInt)
+ if auxToSym(x2_0.Aux) != s || p != x2_0.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32]) o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i2] {s} p mem) [40]) s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [48])))
+ // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, s2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s2 := v_0
+ if s2.Op != OpPPC64SLDconst || auxIntToInt64(s2.AuxInt) != 32 {
+ continue
+ }
+ x2 := s2.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i3 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
+ mem := x2.Args[1]
+ p := x2.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s1 := o0_0
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 40 {
+ continue
+ }
+ x1 := s1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ s0 := o0_1
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 48 {
+ continue
+ }
+ x0 := s0.Args[0]
+ if x0.Op != OpPPC64MOVHBRload || x0.Type != t {
+ continue
+ }
+ _ = x0.Args[1]
+ x0_0 := x0.Args[0]
+ if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr {
+ continue
+ }
+ i0 := auxIntToInt32(x0_0.AuxInt)
+ if auxToSym(x0_0.Aux) != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, s2, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t)
+ v.copyOf(v0)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t)
+ v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56]) o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem) [32])))
+ // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, s2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s2 := v_0
+ if s2.Op != OpPPC64SLDconst || auxIntToInt64(s2.AuxInt) != 56 {
+ continue
+ }
+ x2 := s2.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
+ mem := x2.Args[1]
+ p := x2.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s1 := o0_0
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 48 {
+ continue
+ }
+ x1 := s1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ s0 := o0_1
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 32 {
+ continue
+ }
+ x0 := s0.Args[0]
+ if x0.Op != OpPPC64MOVHBRload || x0.Type != t {
+ continue
+ }
+ _ = x0.Args[1]
+ x0_0 := x0.Args[0]
+ if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr {
+ continue
+ }
+ i2 := auxIntToInt32(x0_0.AuxInt)
+ if auxToSym(x0_0.Aux) != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, s2, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t)
+ v.copyOf(v0)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t)
+ v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR <t> s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR <t> s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR <t> s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) x0:(MOVWZload {s} [i0] p mem)))))
+ // cond: !config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0, x4, x5, x6, x7, s3, s4, s5, s6, o3, o4, o5)
+ // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload <t> {s} [i0] p mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s6 := v_0
+ if s6.Op != OpPPC64SLDconst || auxIntToInt64(s6.AuxInt) != 56 {
+ continue
+ }
+ x7 := s6.Args[0]
+ if x7.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i7 := auxIntToInt32(x7.AuxInt)
+ s := auxToSym(x7.Aux)
+ mem := x7.Args[1]
+ p := x7.Args[0]
+ o5 := v_1
+ if o5.Op != OpPPC64OR || o5.Type != t {
+ continue
+ }
+ _ = o5.Args[1]
+ o5_0 := o5.Args[0]
+ o5_1 := o5.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o5_0, o5_1 = _i1+1, o5_1, o5_0 {
+ s5 := o5_0
+ if s5.Op != OpPPC64SLDconst || auxIntToInt64(s5.AuxInt) != 48 {
+ continue
+ }
+ x6 := s5.Args[0]
+ if x6.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i6 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ if p != x6.Args[0] || mem != x6.Args[1] {
+ continue
+ }
+ o4 := o5_1
+ if o4.Op != OpPPC64OR || o4.Type != t {
+ continue
+ }
+ _ = o4.Args[1]
+ o4_0 := o4.Args[0]
+ o4_1 := o4.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, o4_0, o4_1 = _i2+1, o4_1, o4_0 {
+ s4 := o4_0
+ if s4.Op != OpPPC64SLDconst || auxIntToInt64(s4.AuxInt) != 40 {
+ continue
+ }
+ x5 := s4.Args[0]
+ if x5.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i5 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ o3 := o4_1
+ if o3.Op != OpPPC64OR || o3.Type != t {
+ continue
+ }
+ _ = o3.Args[1]
+ o3_0 := o3.Args[0]
+ o3_1 := o3.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, o3_0, o3_1 = _i3+1, o3_1, o3_0 {
+ s3 := o3_0
+ if s3.Op != OpPPC64SLDconst || auxIntToInt64(s3.AuxInt) != 32 {
+ continue
+ }
+ x4 := s3.Args[0]
+ if x4.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i4 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ x0 := o3_1
+ if x0.Op != OpPPC64MOVWZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i0%4 == 0 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0, x4, x5, x6, x7, s3, s4, s5, s6, o3, o4, o5)) {
+ continue
+ }
+ b = mergePoint(b, x0, x4, x5, x6, x7)
+ v0 := b.NewValue0(x0.Pos, OpPPC64MOVDload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ // match: (OR <t> s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56]) o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) o1:(OR <t> s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40]) o2:(OR <t> s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]) x4:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i4] p) mem)))))
+ // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, o0, o1, o2, s0, s1, s2, s3)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ x0 := s0.Args[0]
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s1 := o0_0
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 48 {
+ continue
+ }
+ x1 := s1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ o1 := o0_1
+ if o1.Op != OpPPC64OR || o1.Type != t {
+ continue
+ }
+ _ = o1.Args[1]
+ o1_0 := o1.Args[0]
+ o1_1 := o1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, o1_0, o1_1 = _i2+1, o1_1, o1_0 {
+ s2 := o1_0
+ if s2.Op != OpPPC64SLDconst || auxIntToInt64(s2.AuxInt) != 40 {
+ continue
+ }
+ x2 := s2.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ continue
+ }
+ o2 := o1_1
+ if o2.Op != OpPPC64OR || o2.Type != t {
+ continue
+ }
+ _ = o2.Args[1]
+ o2_0 := o2.Args[0]
+ o2_1 := o2.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, o2_0, o2_1 = _i3+1, o2_1, o2_0 {
+ s3 := o2_0
+ if s3.Op != OpPPC64SLDconst || auxIntToInt64(s3.AuxInt) != 32 {
+ continue
+ }
+ x3 := s3.Args[0]
+ if x3.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ continue
+ }
+ x4 := o2_1
+ if x4.Op != OpPPC64MOVWBRload || x4.Type != t {
+ continue
+ }
+ _ = x4.Args[1]
+ x4_0 := x4.Args[0]
+ if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr {
+ continue
+ }
+ i4 := auxIntToInt32(x4_0.AuxInt)
+ if p != x4_0.Args[0] || mem != x4.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, o0, o1, o2, s0, s1, s2, s3)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(x4.Pos, OpPPC64MOVDBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x4.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ // match: (OR <t> x7:(MOVBZload [i7] {s} p mem) o5:(OR <t> s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR <t> s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR <t> s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLWconst x3:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])))))
+ // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)
+ // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x7 := v_0
+ if x7.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i7 := auxIntToInt32(x7.AuxInt)
+ s := auxToSym(x7.Aux)
+ mem := x7.Args[1]
+ p := x7.Args[0]
+ o5 := v_1
+ if o5.Op != OpPPC64OR || o5.Type != t {
+ continue
+ }
+ _ = o5.Args[1]
+ o5_0 := o5.Args[0]
+ o5_1 := o5.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o5_0, o5_1 = _i1+1, o5_1, o5_0 {
+ s6 := o5_0
+ if s6.Op != OpPPC64SLDconst || auxIntToInt64(s6.AuxInt) != 8 {
+ continue
+ }
+ x6 := s6.Args[0]
+ if x6.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i6 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ if p != x6.Args[0] || mem != x6.Args[1] {
+ continue
+ }
+ o4 := o5_1
+ if o4.Op != OpPPC64OR || o4.Type != t {
+ continue
+ }
+ _ = o4.Args[1]
+ o4_0 := o4.Args[0]
+ o4_1 := o4.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, o4_0, o4_1 = _i2+1, o4_1, o4_0 {
+ s5 := o4_0
+ if s5.Op != OpPPC64SLDconst || auxIntToInt64(s5.AuxInt) != 16 {
+ continue
+ }
+ x5 := s5.Args[0]
+ if x5.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i5 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ o3 := o4_1
+ if o3.Op != OpPPC64OR || o3.Type != t {
+ continue
+ }
+ _ = o3.Args[1]
+ o3_0 := o3.Args[0]
+ o3_1 := o3.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, o3_0, o3_1 = _i3+1, o3_1, o3_0 {
+ s4 := o3_0
+ if s4.Op != OpPPC64SLDconst || auxIntToInt64(s4.AuxInt) != 24 {
+ continue
+ }
+ x4 := s4.Args[0]
+ if x4.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i4 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ s0 := o3_1
+ if s0.Op != OpPPC64SLWconst || auxIntToInt64(s0.AuxInt) != 32 {
+ continue
+ }
+ x3 := s0.Args[0]
+ if x3.Op != OpPPC64MOVWBRload || x3.Type != t {
+ continue
+ }
+ _ = x3.Args[1]
+ x3_0 := x3.Args[0]
+ if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr {
+ continue
+ }
+ i0 := auxIntToInt32(x3_0.AuxInt)
+ if auxToSym(x3_0.Aux) != s || p != x3_0.Args[0] || mem != x3.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)) {
+ continue
+ }
+ b = mergePoint(b, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ // match: (OR <t> x7:(MOVBZload [i7] {s} p mem) o5:(OR <t> s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR <t> s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR <t> s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLDconst x3:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])))))
+ // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)
+ // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x7 := v_0
+ if x7.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i7 := auxIntToInt32(x7.AuxInt)
+ s := auxToSym(x7.Aux)
+ mem := x7.Args[1]
+ p := x7.Args[0]
+ o5 := v_1
+ if o5.Op != OpPPC64OR || o5.Type != t {
+ continue
+ }
+ _ = o5.Args[1]
+ o5_0 := o5.Args[0]
+ o5_1 := o5.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o5_0, o5_1 = _i1+1, o5_1, o5_0 {
+ s6 := o5_0
+ if s6.Op != OpPPC64SLDconst || auxIntToInt64(s6.AuxInt) != 8 {
+ continue
+ }
+ x6 := s6.Args[0]
+ if x6.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i6 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ if p != x6.Args[0] || mem != x6.Args[1] {
+ continue
+ }
+ o4 := o5_1
+ if o4.Op != OpPPC64OR || o4.Type != t {
+ continue
+ }
+ _ = o4.Args[1]
+ o4_0 := o4.Args[0]
+ o4_1 := o4.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, o4_0, o4_1 = _i2+1, o4_1, o4_0 {
+ s5 := o4_0
+ if s5.Op != OpPPC64SLDconst || auxIntToInt64(s5.AuxInt) != 16 {
+ continue
+ }
+ x5 := s5.Args[0]
+ if x5.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i5 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ o3 := o4_1
+ if o3.Op != OpPPC64OR || o3.Type != t {
+ continue
+ }
+ _ = o3.Args[1]
+ o3_0 := o3.Args[0]
+ o3_1 := o3.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, o3_0, o3_1 = _i3+1, o3_1, o3_0 {
+ s4 := o3_0
+ if s4.Op != OpPPC64SLDconst || auxIntToInt64(s4.AuxInt) != 24 {
+ continue
+ }
+ x4 := s4.Args[0]
+ if x4.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i4 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ s0 := o3_1
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 32 {
+ continue
+ }
+ x3 := s0.Args[0]
+ if x3.Op != OpPPC64MOVWBRload || x3.Type != t {
+ continue
+ }
+ _ = x3.Args[1]
+ x3_0 := x3.Args[0]
+ if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr {
+ continue
+ }
+ i0 := auxIntToInt32(x3_0.AuxInt)
+ if auxToSym(x3_0.Aux) != s || p != x3_0.Args[0] || mem != x3.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)) {
+ continue
+ }
+ b = mergePoint(b, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ORN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORN x (MOVDconst [-1]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORN (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c|^d])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c | ^d)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [c] (ORconst [d] x))
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpPPC64ORconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVDconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ROTL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROTL x (MOVDconst [c]))
+ // result: (ROTLconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64ROTLconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ROTLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROTLW x (MOVDconst [c]))
+ // result: (ROTLWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64ROTLWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ROTLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROTLWconst [r] (AND (MOVDconst [m]) x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+ for {
+ r := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, rotateLeft32(m, r), 32))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ROTLWconst [r] (ANDconst [m] x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+ for {
+ r := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, rotateLeft32(m, r), 32))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SLD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLD x (MOVDconst [c]))
+ // result: (SLDconst [c&63 | (c>>6&1*63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SLDconst)
+ v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLDconst [l] (SRWconst [r] x))
+ // cond: mergePPC64SldiSrw(l,r) != 0
+ // result: (RLWINM [mergePPC64SldiSrw(l,r)] x)
+ for {
+ l := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64SldiSrw(l, r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64SldiSrw(l, r))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(MOVBZreg x))
+ // cond: c < 8 && z.Uses == 1
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 8 && z.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 56, 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(MOVHZreg x))
+ // cond: c < 16 && z.Uses == 1
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 16 && z.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 48, 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(MOVWZreg x))
+ // cond: c < 32 && z.Uses == 1
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 32 && z.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32, 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(ANDconst [d] x))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ d := auxIntToInt64(z.AuxInt)
+ x := z.Args[0]
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 64-getPPC64ShiftMaskLength(d), 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(AND (MOVDconst [d]) x))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d))
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(z_0.AuxInt)
+ x := z_1
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) {
+ continue
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 64-getPPC64ShiftMaskLength(d), 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (SLDconst [c] z:(MOVWreg x))
+ // cond: c < 32 && objabi.GOPPC64 >= 9
+ // result: (EXTSWSLconst [c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 32 && objabi.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64EXTSWSLconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLW x (MOVDconst [c]))
+ // result: (SLWconst [c&31 | (c>>5&1*31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLWconst [c] z:(MOVBZreg x))
+ // cond: z.Uses == 1 && c < 8
+ // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(z.Uses == 1 && c < 8) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLWI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 24, 31, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLWconst [c] z:(MOVHZreg x))
+ // cond: z.Uses == 1 && c < 16
+ // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(z.Uses == 1 && c < 16) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLWI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 16, 31, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLWconst [c] z:(ANDconst [d] x))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d))
+ // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ d := auxIntToInt64(z.AuxInt)
+ x := z.Args[0]
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLWI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32-getPPC64ShiftMaskLength(d), 31, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLWconst [c] z:(AND (MOVDconst [d]) x))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d))
+ // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(z_0.AuxInt)
+ x := z_1
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) {
+ continue
+ }
+ v.reset(OpPPC64CLRLSLWI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32-getPPC64ShiftMaskLength(d), 31, 32))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (SLWconst [c] z:(MOVWreg x))
+ // cond: c < 32 && objabi.GOPPC64 >= 9
+ // result: (EXTSWSLconst [c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 32 && objabi.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64EXTSWSLconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRAD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAD x (MOVDconst [c]))
+ // result: (SRADconst [c&63 | (c>>6&1*63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRAW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAW x (MOVDconst [c]))
+ // result: (SRAWconst [c&31 | (c>>5&1*31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRD x (MOVDconst [c]))
+ // result: (SRDconst [c&63 | (c>>6&1*63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRW x (MOVDconst [c]))
+ // result: (SRWconst [c&31 | (c>>5&1*31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRWconst (ANDconst [m] x) [s])
+ // cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRWconst (ANDconst [m] x) [s])
+ // cond: mergePPC64AndSrwi(m>>uint(s),s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m>>uint(s), s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRWconst (AND (MOVDconst [m]) x) [s])
+ // cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) {
+ continue
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (SRWconst (AND (MOVDconst [m]) x) [s])
+ // cond: mergePPC64AndSrwi(m>>uint(s),s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m>>uint(s), s))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUB x (MOVDconst [c]))
+ // cond: is32Bit(-c)
+ // result: (ADDconst [-c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(-c)) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (SUBFCconst [c] x)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SUBFCconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBFCconst [c] (NEG x))
+ // result: (ADDconst [c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64NEG {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBFCconst [c] (SUBFCconst [d] x))
+ // cond: is32Bit(c-d)
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SUBFCconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c - d)) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBFCconst [0] x)
+ // result: (NEG x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpPPC64NEG)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XOR (SLDconst x [c]) (SRDconst x [d]))
+ // cond: d == 64-c
+ // result: (ROTLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpPPC64SRDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 64-c) {
+ continue
+ }
+ v.reset(OpPPC64ROTLconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLWconst x [c]) (SRWconst x [d]))
+ // cond: d == 32-c
+ // result: (ROTLWconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLWconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(OpPPC64ROTLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))))
+ // result: (ROTL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLD {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRD {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 63 || y != v_1_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
+ // result: (ROTL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLD {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRD {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
+ // result: (ROTLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRW {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
+ // result: (ROTLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRW {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 31 || y != v_1_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [c] (XORconst [d] x))
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64XORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpPPC64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpPPC64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpPPC64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 x)
+ // result: (POPCNTW (MOVHZreg x))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPopCount32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount32 x)
+ // result: (POPCNTW (MOVWZreg x))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPopCount8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount8 x)
+ // result: (POPCNTB (MOVBZreg x))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVDconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft32 x (MOVDconst [c]))
+ // result: (ROTLWconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64ROTLWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RotateLeft32 x y)
+ // result: (ROTLW x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft64 x (MOVDconst [c]))
+ // result: (ROTLconst [c&63] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64ROTLconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RotateLeft64 x y)
+ // result: (ROTL x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVDconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux16 x y)
+ // result: (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 x (MOVDconst [c]))
+ // cond: uint32(c) < 16
+ // result: (SRWconst (ZeroExt16to32 x) [c&15])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c & 15)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux32 x y)
+ // result: (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 16
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 16
+ // result: (SRWconst (ZeroExt16to32 x) [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux64 x y)
+ // result: (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux8 x y)
+ // result: (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x16 x y)
+ // result: (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 16
+ // result: (SRAWconst (SignExt16to32 x) [c&15])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c & 15)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x32 x y)
+ // result: (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 16
+ // result: (SRAWconst (SignExt16to32 x) [63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 16
+ // result: (SRAWconst (SignExt16to32 x) [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // result: (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x8 x y)
+ // result: (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux16 x y)
+ // result: (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 x (MOVDconst [c]))
+ // cond: uint32(c) < 32
+ // result: (SRWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux32 x y)
+ // result: (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 32
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 32
+ // result: (SRWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux64 x (AND y (MOVDconst [31])))
+ // result: (SRW x (ANDconst <typ.Int32> [31] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64AND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32)
+ v0.AuxInt = int64ToAuxInt(31)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh32Ux64 x (ANDconst <typ.UInt> [31] y))
+ // result: (SRW x (ANDconst <typ.UInt> [31] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(31)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+ // result: (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1_1.Args[0]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(32)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(31)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+ // result: (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 {
+ break
+ }
+ y := v_1_0.Args[0]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(31)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31]))))
+ // result: (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ v_1_1_1 := v_1_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 {
+ y := v_1_1_0
+ if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(32)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(31)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31]))))
+ // result: (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(31)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh32Ux64 x y)
+ // result: (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux8 x y)
+ // result: (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x16 x y)
+ // result: (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 32
+ // result: (SRAWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x32 x y)
+ // result: (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 32
+ // result: (SRAWconst x [63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 32
+ // result: (SRAWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x64 x (AND y (MOVDconst [31])))
+ // result: (SRAW x (ANDconst <typ.Int32> [31] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64AND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32)
+ v0.AuxInt = int64ToAuxInt(31)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh32x64 x (ANDconst <typ.UInt> [31] y))
+ // result: (SRAW x (ANDconst <typ.UInt> [31] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(31)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+ // result: (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1_1.Args[0]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(32)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(31)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+ // result: (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 {
+ break
+ }
+ y := v_1_0.Args[0]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(31)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31]))))
+ // result: (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ v_1_1_1 := v_1_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 {
+ y := v_1_1_0
+ if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(32)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(31)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh32x64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31]))))
+ // result: (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(31)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh32x64 x y)
+ // result: (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x8 x y)
+ // result: (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux16 x y)
+ // result: (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 x (MOVDconst [c]))
+ // cond: uint32(c) < 64
+ // result: (SRDconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux32 x y)
+ // result: (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 64
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 64
+ // result: (SRDconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux64 x (AND y (MOVDconst [63])))
+ // result: (SRD x (ANDconst <typ.Int64> [63] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64AND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh64Ux64 x (ANDconst <typ.UInt> [63] y))
+ // result: (SRD x (ANDconst <typ.UInt> [63] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+ // result: (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1_1.Args[0]
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(63)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+ // result: (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 {
+ break
+ }
+ y := v_1_0.Args[0]
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(63)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63]))))
+ // result: (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ v_1_1_1 := v_1_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 {
+ y := v_1_1_0
+ if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(63)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63]))))
+ // result: (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(63)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh64Ux64 x y)
+ // result: (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux8 x y)
+ // result: (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x16 x y)
+ // result: (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 64
+ // result: (SRADconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x32 x y)
+ // result: (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 64
+ // result: (SRADconst x [63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 64
+ // result: (SRADconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x64 x (AND y (MOVDconst [63])))
+ // result: (SRAD x (ANDconst <typ.Int64> [63] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64AND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh64x64 x (ANDconst <typ.UInt> [63] y))
+ // result: (SRAD x (ANDconst <typ.UInt> [63] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+ // result: (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1_1.Args[0]
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(63)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+ // result: (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 {
+ break
+ }
+ y := v_1_0.Args[0]
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(63)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63]))))
+ // result: (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ v_1_1_1 := v_1_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 {
+ y := v_1_1_0
+ if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(63)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh64x64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63]))))
+ // result: (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(63)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh64x64 x y)
+ // result: (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x8 x y)
+ // result: (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux16 x y)
+ // result: (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 x (MOVDconst [c]))
+ // cond: uint32(c) < 8
+ // result: (SRWconst (ZeroExt8to32 x) [c&7])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c & 7)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux32 x y)
+ // result: (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 8
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 8
+ // result: (SRWconst (ZeroExt8to32 x) [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux64 x y)
+ // result: (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux8 x y)
+ // result: (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x16 x y)
+ // result: (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 8
+ // result: (SRAWconst (SignExt8to32 x) [c&7])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c & 7)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x32 x y)
+ // result: (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 8
+ // result: (SRAWconst (SignExt8to32 x) [63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 8
+ // result: (SRAWconst (SignExt8to32 x) [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // result: (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x8 x y)
+ // result: (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRADconst (NEG <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpPPC64NEG, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is32BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (FMOVSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitInt(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitInt(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpTrunc16to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc16to8 <t> x)
+ // cond: isSigned(t)
+ // result: (MOVBreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc16to8 x)
+ // result: (MOVBZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc32to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc32to16 <t> x)
+ // cond: isSigned(t)
+ // result: (MOVHreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc32to16 x)
+ // result: (MOVHZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc32to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc32to8 <t> x)
+ // cond: isSigned(t)
+ // result: (MOVBreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc32to8 x)
+ // result: (MOVBZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc64to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to16 <t> x)
+ // cond: isSigned(t)
+ // result: (MOVHreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to16 x)
+ // result: (MOVHZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc64to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to32 <t> x)
+ // cond: isSigned(t)
+ // result: (MOVWreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 x)
+ // result: (MOVWZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc64to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to8 <t> x)
+ // cond: isSigned(t)
+ // result: (MOVBreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to8 x)
+ // result: (MOVBZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (MOVBstorezero destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVBstorezero)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (MOVHstorezero destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVHstorezero)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (MOVBstorezero [2] destptr (MOVHstorezero destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (MOVWstorezero destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVWstorezero)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (MOVBstorezero [4] destptr (MOVWstorezero destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (MOVHstorezero [4] destptr (MOVWstorezero destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (MOVBstorezero [6] destptr (MOVHstorezero [4] destptr (MOVWstorezero destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(4)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [8] {t} destptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVDstorezero destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ destptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [8] destptr mem)
+ // result: (MOVWstorezero [4] destptr (MOVWstorezero [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [12] {t} destptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstorezero [8] destptr (MOVDstorezero [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ destptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [16] {t} destptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ destptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [24] {t} destptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ destptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(8)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [32] {t} destptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ t := auxToType(v.Aux)
+ destptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(24)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(16)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg2(destptr, mem)
+ v1.AddArg2(destptr, v2)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: objabi.GOPPC64 <= 8 && s < 64
+ // result: (LoweredZeroShort [s] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(objabi.GOPPC64 <= 8 && s < 64) {
+ break
+ }
+ v.reset(OpPPC64LoweredZeroShort)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: objabi.GOPPC64 <= 8
+ // result: (LoweredZero [s] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(objabi.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64LoweredZero)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s < 128 && objabi.GOPPC64 >= 9
+ // result: (LoweredQuadZeroShort [s] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s < 128 && objabi.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64LoweredQuadZeroShort)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: objabi.GOPPC64 >= 9
+ // result: (LoweredQuadZero [s] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(objabi.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64LoweredQuadZero)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockPPC64(b *Block) bool {
+ switch b.Kind {
+ case BlockPPC64EQ:
+ // match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (EQ (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (EQ (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ // match: (EQ (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagLT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64EQ, cmp)
+ return true
+ }
+ // match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (EQ (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (EQ (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (ANDCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (ORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (XORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ break
+ }
+ case BlockPPC64GE:
+ // match: (GE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagLT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64LE, cmp)
+ return true
+ }
+ // match: (GE (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (GE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (GE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (ANDCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (ORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (XORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ break
+ }
+ case BlockPPC64GT:
+ // match: (GT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64LT, cmp)
+ return true
+ }
+ // match: (GT (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (GT (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ // match: (GT (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (GT (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (ANDCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (ORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (XORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ break
+ }
+ case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpPPC64Equal {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64EQ, cc)
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpPPC64NotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64NE, cc)
+ return true
+ }
+ // match: (If (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpPPC64LessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64LT, cc)
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpPPC64LessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64LE, cc)
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpPPC64GreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64GT, cc)
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpPPC64GreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64GE, cc)
+ return true
+ }
+ // match: (If (FLessThan cc) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpPPC64FLessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64FLT, cc)
+ return true
+ }
+ // match: (If (FLessEqual cc) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpPPC64FLessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64FLE, cc)
+ return true
+ }
+ // match: (If (FGreaterThan cc) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpPPC64FGreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64FGT, cc)
+ return true
+ }
+ // match: (If (FGreaterEqual cc) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpPPC64FGreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64FGE, cc)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (NE (CMPWconst [0] cond) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpPPC64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(cond)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ case BlockPPC64LE:
+ // match: (LE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64GE, cmp)
+ return true
+ }
+ // match: (LE (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (LE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ // match: (LE (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (LE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (ANDCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (ORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (XORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ break
+ }
+ case BlockPPC64LT:
+ // match: (LT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagLT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64GT, cmp)
+ return true
+ }
+ // match: (LT (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (LT (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (LT (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (ANDCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (ORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (XORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ break
+ }
+ case BlockPPC64NE:
+ // match: (NE (CMPWconst [0] (Equal cc)) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64Equal {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockPPC64EQ, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (NotEqual cc)) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64NotEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockPPC64NE, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (LessThan cc)) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64LessThan {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockPPC64LT, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (LessEqual cc)) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64LessEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockPPC64LE, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (GreaterThan cc)) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64GreaterThan {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockPPC64GT, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (GreaterEqual cc)) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64GreaterEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockPPC64GE, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (FLessThan cc)) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64FLessThan {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockPPC64FLT, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (FLessEqual cc)) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64FLessEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockPPC64FLE, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (FGreaterThan cc)) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64FGreaterThan {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockPPC64FGT, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (FGreaterEqual cc)) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64FGreaterEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockPPC64FGE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (NE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (NE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (FlagLT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64NE, cmp)
+ return true
+ }
+ // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (NE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (NE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (ANDCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (ORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (XORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ break
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
new file mode 100644
index 0000000..fb507b6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -0,0 +1,6604 @@
+// Code generated from gen/RISCV64.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValueRISCV64(v *Value) bool {
+ switch v.Op {
+ case OpAdd16:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAdd32:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAdd32F:
+ v.Op = OpRISCV64FADDS
+ return true
+ case OpAdd64:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAdd64F:
+ v.Op = OpRISCV64FADDD
+ return true
+ case OpAdd8:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAddPtr:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAddr:
+ return rewriteValueRISCV64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAnd32:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAnd64:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAnd8:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAndB:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpRISCV64LoweredAtomicAdd32
+ return true
+ case OpAtomicAdd64:
+ v.Op = OpRISCV64LoweredAtomicAdd64
+ return true
+ case OpAtomicCompareAndSwap32:
+ v.Op = OpRISCV64LoweredAtomicCas32
+ return true
+ case OpAtomicCompareAndSwap64:
+ v.Op = OpRISCV64LoweredAtomicCas64
+ return true
+ case OpAtomicExchange32:
+ v.Op = OpRISCV64LoweredAtomicExchange32
+ return true
+ case OpAtomicExchange64:
+ v.Op = OpRISCV64LoweredAtomicExchange64
+ return true
+ case OpAtomicLoad32:
+ v.Op = OpRISCV64LoweredAtomicLoad32
+ return true
+ case OpAtomicLoad64:
+ v.Op = OpRISCV64LoweredAtomicLoad64
+ return true
+ case OpAtomicLoad8:
+ v.Op = OpRISCV64LoweredAtomicLoad8
+ return true
+ case OpAtomicLoadPtr:
+ v.Op = OpRISCV64LoweredAtomicLoad64
+ return true
+ case OpAtomicStore32:
+ v.Op = OpRISCV64LoweredAtomicStore32
+ return true
+ case OpAtomicStore64:
+ v.Op = OpRISCV64LoweredAtomicStore64
+ return true
+ case OpAtomicStore8:
+ v.Op = OpRISCV64LoweredAtomicStore8
+ return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpRISCV64LoweredAtomicStore64
+ return true
+ case OpAvg64u:
+ return rewriteValueRISCV64_OpAvg64u(v)
+ case OpClosureCall:
+ v.Op = OpRISCV64CALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpRISCV64NOT
+ return true
+ case OpCom32:
+ v.Op = OpRISCV64NOT
+ return true
+ case OpCom64:
+ v.Op = OpRISCV64NOT
+ return true
+ case OpCom8:
+ v.Op = OpRISCV64NOT
+ return true
+ case OpConst16:
+ v.Op = OpRISCV64MOVHconst
+ return true
+ case OpConst32:
+ v.Op = OpRISCV64MOVWconst
+ return true
+ case OpConst32F:
+ return rewriteValueRISCV64_OpConst32F(v)
+ case OpConst64:
+ v.Op = OpRISCV64MOVDconst
+ return true
+ case OpConst64F:
+ return rewriteValueRISCV64_OpConst64F(v)
+ case OpConst8:
+ v.Op = OpRISCV64MOVBconst
+ return true
+ case OpConstBool:
+ return rewriteValueRISCV64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueRISCV64_OpConstNil(v)
+ case OpConvert:
+ v.Op = OpRISCV64MOVconvert
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpRISCV64FCVTWS
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpRISCV64FCVTLS
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpRISCV64FCVTDS
+ return true
+ case OpCvt32to32F:
+ v.Op = OpRISCV64FCVTSW
+ return true
+ case OpCvt32to64F:
+ v.Op = OpRISCV64FCVTDW
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpRISCV64FCVTWD
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpRISCV64FCVTSD
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpRISCV64FCVTLD
+ return true
+ case OpCvt64to32F:
+ v.Op = OpRISCV64FCVTSL
+ return true
+ case OpCvt64to64F:
+ v.Op = OpRISCV64FCVTDL
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueRISCV64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueRISCV64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueRISCV64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpRISCV64FDIVS
+ return true
+ case OpDiv32u:
+ v.Op = OpRISCV64DIVUW
+ return true
+ case OpDiv64:
+ return rewriteValueRISCV64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpRISCV64FDIVD
+ return true
+ case OpDiv64u:
+ v.Op = OpRISCV64DIVU
+ return true
+ case OpDiv8:
+ return rewriteValueRISCV64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueRISCV64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueRISCV64_OpEq16(v)
+ case OpEq32:
+ return rewriteValueRISCV64_OpEq32(v)
+ case OpEq32F:
+ v.Op = OpRISCV64FEQS
+ return true
+ case OpEq64:
+ return rewriteValueRISCV64_OpEq64(v)
+ case OpEq64F:
+ v.Op = OpRISCV64FEQD
+ return true
+ case OpEq8:
+ return rewriteValueRISCV64_OpEq8(v)
+ case OpEqB:
+ return rewriteValueRISCV64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueRISCV64_OpEqPtr(v)
+ case OpGetCallerPC:
+ v.Op = OpRISCV64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpRISCV64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpRISCV64LoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ return rewriteValueRISCV64_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueRISCV64_OpHmul32u(v)
+ case OpHmul64:
+ v.Op = OpRISCV64MULH
+ return true
+ case OpHmul64u:
+ v.Op = OpRISCV64MULHU
+ return true
+ case OpInterCall:
+ v.Op = OpRISCV64CALLinter
+ return true
+ case OpIsInBounds:
+ v.Op = OpLess64U
+ return true
+ case OpIsNonNil:
+ return rewriteValueRISCV64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ v.Op = OpLeq64U
+ return true
+ case OpLeq16:
+ return rewriteValueRISCV64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueRISCV64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueRISCV64_OpLeq32(v)
+ case OpLeq32F:
+ v.Op = OpRISCV64FLES
+ return true
+ case OpLeq32U:
+ return rewriteValueRISCV64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueRISCV64_OpLeq64(v)
+ case OpLeq64F:
+ v.Op = OpRISCV64FLED
+ return true
+ case OpLeq64U:
+ return rewriteValueRISCV64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueRISCV64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueRISCV64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueRISCV64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueRISCV64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueRISCV64_OpLess32(v)
+ case OpLess32F:
+ v.Op = OpRISCV64FLTS
+ return true
+ case OpLess32U:
+ return rewriteValueRISCV64_OpLess32U(v)
+ case OpLess64:
+ v.Op = OpRISCV64SLT
+ return true
+ case OpLess64F:
+ v.Op = OpRISCV64FLTD
+ return true
+ case OpLess64U:
+ v.Op = OpRISCV64SLTU
+ return true
+ case OpLess8:
+ return rewriteValueRISCV64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueRISCV64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueRISCV64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueRISCV64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueRISCV64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueRISCV64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueRISCV64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueRISCV64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueRISCV64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueRISCV64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueRISCV64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueRISCV64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueRISCV64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueRISCV64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueRISCV64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueRISCV64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueRISCV64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueRISCV64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueRISCV64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueRISCV64_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueRISCV64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueRISCV64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueRISCV64_OpMod32(v)
+ case OpMod32u:
+ v.Op = OpRISCV64REMUW
+ return true
+ case OpMod64:
+ return rewriteValueRISCV64_OpMod64(v)
+ case OpMod64u:
+ v.Op = OpRISCV64REMU
+ return true
+ case OpMod8:
+ return rewriteValueRISCV64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueRISCV64_OpMod8u(v)
+ case OpMove:
+ return rewriteValueRISCV64_OpMove(v)
+ case OpMul16:
+ return rewriteValueRISCV64_OpMul16(v)
+ case OpMul32:
+ v.Op = OpRISCV64MULW
+ return true
+ case OpMul32F:
+ v.Op = OpRISCV64FMULS
+ return true
+ case OpMul64:
+ v.Op = OpRISCV64MUL
+ return true
+ case OpMul64F:
+ v.Op = OpRISCV64FMULD
+ return true
+ case OpMul8:
+ return rewriteValueRISCV64_OpMul8(v)
+ case OpNeg16:
+ v.Op = OpRISCV64NEG
+ return true
+ case OpNeg32:
+ v.Op = OpRISCV64NEG
+ return true
+ case OpNeg32F:
+ v.Op = OpRISCV64FNEGS
+ return true
+ case OpNeg64:
+ v.Op = OpRISCV64NEG
+ return true
+ case OpNeg64F:
+ v.Op = OpRISCV64FNEGD
+ return true
+ case OpNeg8:
+ v.Op = OpRISCV64NEG
+ return true
+ case OpNeq16:
+ return rewriteValueRISCV64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueRISCV64_OpNeq32(v)
+ case OpNeq32F:
+ v.Op = OpRISCV64FNES
+ return true
+ case OpNeq64:
+ return rewriteValueRISCV64_OpNeq64(v)
+ case OpNeq64F:
+ v.Op = OpRISCV64FNED
+ return true
+ case OpNeq8:
+ return rewriteValueRISCV64_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueRISCV64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpRISCV64LoweredNilCheck
+ return true
+ case OpNot:
+ v.Op = OpRISCV64SEQZ
+ return true
+ case OpOffPtr:
+ return rewriteValueRISCV64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpRISCV64OR
+ return true
+ case OpOr32:
+ v.Op = OpRISCV64OR
+ return true
+ case OpOr64:
+ v.Op = OpRISCV64OR
+ return true
+ case OpOr8:
+ v.Op = OpRISCV64OR
+ return true
+ case OpOrB:
+ v.Op = OpRISCV64OR
+ return true
+ case OpPanicBounds:
+ return rewriteValueRISCV64_OpPanicBounds(v)
+ case OpRISCV64ADD:
+ return rewriteValueRISCV64_OpRISCV64ADD(v)
+ case OpRISCV64ADDI:
+ return rewriteValueRISCV64_OpRISCV64ADDI(v)
+ case OpRISCV64AND:
+ return rewriteValueRISCV64_OpRISCV64AND(v)
+ case OpRISCV64MOVBUload:
+ return rewriteValueRISCV64_OpRISCV64MOVBUload(v)
+ case OpRISCV64MOVBUreg:
+ return rewriteValueRISCV64_OpRISCV64MOVBUreg(v)
+ case OpRISCV64MOVBload:
+ return rewriteValueRISCV64_OpRISCV64MOVBload(v)
+ case OpRISCV64MOVBreg:
+ return rewriteValueRISCV64_OpRISCV64MOVBreg(v)
+ case OpRISCV64MOVBstore:
+ return rewriteValueRISCV64_OpRISCV64MOVBstore(v)
+ case OpRISCV64MOVBstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVBstorezero(v)
+ case OpRISCV64MOVDconst:
+ return rewriteValueRISCV64_OpRISCV64MOVDconst(v)
+ case OpRISCV64MOVDload:
+ return rewriteValueRISCV64_OpRISCV64MOVDload(v)
+ case OpRISCV64MOVDreg:
+ return rewriteValueRISCV64_OpRISCV64MOVDreg(v)
+ case OpRISCV64MOVDstore:
+ return rewriteValueRISCV64_OpRISCV64MOVDstore(v)
+ case OpRISCV64MOVDstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVDstorezero(v)
+ case OpRISCV64MOVHUload:
+ return rewriteValueRISCV64_OpRISCV64MOVHUload(v)
+ case OpRISCV64MOVHUreg:
+ return rewriteValueRISCV64_OpRISCV64MOVHUreg(v)
+ case OpRISCV64MOVHload:
+ return rewriteValueRISCV64_OpRISCV64MOVHload(v)
+ case OpRISCV64MOVHreg:
+ return rewriteValueRISCV64_OpRISCV64MOVHreg(v)
+ case OpRISCV64MOVHstore:
+ return rewriteValueRISCV64_OpRISCV64MOVHstore(v)
+ case OpRISCV64MOVHstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVHstorezero(v)
+ case OpRISCV64MOVWUload:
+ return rewriteValueRISCV64_OpRISCV64MOVWUload(v)
+ case OpRISCV64MOVWUreg:
+ return rewriteValueRISCV64_OpRISCV64MOVWUreg(v)
+ case OpRISCV64MOVWload:
+ return rewriteValueRISCV64_OpRISCV64MOVWload(v)
+ case OpRISCV64MOVWreg:
+ return rewriteValueRISCV64_OpRISCV64MOVWreg(v)
+ case OpRISCV64MOVWstore:
+ return rewriteValueRISCV64_OpRISCV64MOVWstore(v)
+ case OpRISCV64MOVWstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVWstorezero(v)
+ case OpRISCV64OR:
+ return rewriteValueRISCV64_OpRISCV64OR(v)
+ case OpRISCV64SLL:
+ return rewriteValueRISCV64_OpRISCV64SLL(v)
+ case OpRISCV64SRA:
+ return rewriteValueRISCV64_OpRISCV64SRA(v)
+ case OpRISCV64SRL:
+ return rewriteValueRISCV64_OpRISCV64SRL(v)
+ case OpRISCV64SUB:
+ return rewriteValueRISCV64_OpRISCV64SUB(v)
+ case OpRISCV64SUBW:
+ return rewriteValueRISCV64_OpRISCV64SUBW(v)
+ case OpRISCV64XOR:
+ return rewriteValueRISCV64_OpRISCV64XOR(v)
+ case OpRotateLeft16:
+ return rewriteValueRISCV64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueRISCV64_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValueRISCV64_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValueRISCV64_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueRISCV64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueRISCV64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueRISCV64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueRISCV64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueRISCV64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueRISCV64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueRISCV64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueRISCV64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueRISCV64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueRISCV64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueRISCV64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueRISCV64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueRISCV64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueRISCV64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueRISCV64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueRISCV64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueRISCV64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueRISCV64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueRISCV64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueRISCV64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueRISCV64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueRISCV64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueRISCV64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueRISCV64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueRISCV64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueRISCV64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueRISCV64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueRISCV64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueRISCV64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueRISCV64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueRISCV64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueRISCV64_OpRsh8x8(v)
+ case OpSignExt16to32:
+ v.Op = OpRISCV64MOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpRISCV64MOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpRISCV64MOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpRISCV64MOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpRISCV64MOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpRISCV64MOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValueRISCV64_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpRISCV64FSQRTD
+ return true
+ case OpStaticCall:
+ v.Op = OpRISCV64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValueRISCV64_OpStore(v)
+ case OpSub16:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpSub32:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpSub32F:
+ v.Op = OpRISCV64FSUBS
+ return true
+ case OpSub64:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpSub64F:
+ v.Op = OpRISCV64FSUBD
+ return true
+ case OpSub8:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpSubPtr:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpRISCV64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpXor32:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpXor64:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpXor8:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpZero:
+ return rewriteValueRISCV64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpRISCV64MOVHUreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpRISCV64MOVHUreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpRISCV64MOVWUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpRISCV64MOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpRISCV64MOVBUreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpRISCV64MOVBUreg
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVaddr {sym} [0] base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpRISCV64MOVaddr)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64ADD)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADD, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64SRLI, t)
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SRLI, t)
+ v2.AuxInt = int64ToAuxInt(1)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpRISCV64ANDI, t)
+ v3.AuxInt = int64ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpRISCV64AND, t)
+ v4.AddArg2(x, y)
+ v3.AddArg(v4)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst32F(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Const32F [val])
+ // result: (FMVSX (MOVWconst [int32(math.Float32bits(val))]))
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpRISCV64FMVSX)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(math.Float32bits(val)))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst64F(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Const64F [val])
+ // result: (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpRISCV64FMVDX)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(math.Float64bits(val)))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [val])
+ // result: (MOVBconst [int8(b2i(val))])
+ for {
+ val := auxIntToBool(v.AuxInt)
+ v.reset(OpRISCV64MOVBconst)
+ v.AuxInt = int8ToAuxInt(int8(b2i(val)))
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y [false])
+ // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div32 x y [false])
+ // result: (DIVW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 x y [false])
+ // result: (DIV x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIV)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (SEQZ (SUBW <x.Type> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUBW, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64 x y)
+ // result: (SEQZ (SUB <x.Type> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (SEQZ (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64XOR, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (SEQZ (SUB <x.Type> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil p)
+ // result: (NeqPtr (MOVDconst [0]) p)
+ for {
+ p := v_0
+ v.reset(OpNeqPtr)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, p)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (Not (Less16 y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess16, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (Not (Less16U y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess16U, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (Not (Less32 y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (Not (Less32U y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (Not (Less64 y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (Not (Less64U y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64U, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (Not (Less8 y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess8, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (Not (Less8U y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess8U, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (SLT (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLT)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (SLT (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLT)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (SLT (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLT)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: ( is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: ( is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && isSigned(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && !isSigned(t))
+ // result: (MOVWUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpRISCV64MOVaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x64 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x64 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x64 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x64 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y [false])
+ // result: (REMW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod32 x y [false])
+ // result: (REMW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 x y [false])
+ // result: (REM x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REM)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (REMW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore dst (MOVHload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(2)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v4.AuxInt = int32ToAuxInt(2)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(1)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [16] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [24] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [32] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [24] dst (MOVDload [24] src mem) (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(24)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(16)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v2.AuxInt = int32ToAuxInt(16)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(8)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v4.AuxInt = int32ToAuxInt(8)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpRISCV64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(16 * (128 - s/8))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: (s <= 16 || logLargeCopy(v, s))
+ // result: (LoweredMove [t.Alignment()] dst src (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s <= 16 || logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpRISCV64LoweredMove)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADDI, src.Type)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMul16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul16 x y)
+ // result: (MULW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64MULW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMul8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul8 x y)
+ // result: (MULW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64MULW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (SNEZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (SNEZ (SUBW <x.Type> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUBW, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64 x y)
+ // result: (SNEZ (SUB <x.Type> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (SNEZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (SNEZ (SUB <x.Type> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OffPtr [off] ptr:(SP))
+ // cond: is32Bit(off)
+ // result: (MOVaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP || !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpRISCV64MOVaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (ADDI [off] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(off)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADD (MOVDconst [off]) ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpRISCV64ADD)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(off)
+ v.AddArg2(v0, ptr)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpRISCV64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpRISCV64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpRISCV64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64ADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADD (MOVBconst [val]) x)
+ // result: (ADDI [int64(val)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVBconst {
+ continue
+ }
+ val := auxIntToInt8(v_0.AuxInt)
+ x := v_1
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD (MOVHconst [val]) x)
+ // result: (ADDI [int64(val)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVHconst {
+ continue
+ }
+ val := auxIntToInt16(v_0.AuxInt)
+ x := v_1
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD (MOVWconst [val]) x)
+ // result: (ADDI [int64(val)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVWconst {
+ continue
+ }
+ val := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD (MOVDconst [val]) x)
+ // cond: is32Bit(val)
+ // result: (ADDI [val] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVDconst {
+ continue
+ }
+ val := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(val)) {
+ continue
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64ADDI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDI [c] (MOVaddr [d] {s} x))
+ // cond: is32Bit(c+int64(d))
+ // result: (MOVaddr [int32(c)+d] {s} x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(c + int64(d))) {
+ break
+ }
+ v.reset(OpRISCV64MOVaddr)
+ v.AuxInt = int32ToAuxInt(int32(c) + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDI [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND (MOVBconst [val]) x)
+ // result: (ANDI [int64(val)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVBconst {
+ continue
+ }
+ val := auxIntToInt8(v_0.AuxInt)
+ x := v_1
+ v.reset(OpRISCV64ANDI)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVHconst [val]) x)
+ // result: (ANDI [int64(val)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVHconst {
+ continue
+ }
+ val := auxIntToInt16(v_0.AuxInt)
+ x := v_1
+ v.reset(OpRISCV64ANDI)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVWconst [val]) x)
+ // result: (ANDI [int64(val)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVWconst {
+ continue
+ }
+ val := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpRISCV64ANDI)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [val]) x)
+ // cond: is32Bit(val)
+ // result: (ANDI [val] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVDconst {
+ continue
+ }
+ val := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(val)) {
+ continue
+ }
+ v.reset(OpRISCV64ANDI)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBUload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBUreg (MOVBconst [c]))
+ // result: (MOVDconst [int64(uint8(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVBconst {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVBUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBreg (MOVBconst [c]))
+ // result: (MOVDconst [int64(c)])
+ for {
+ if v_0.Op != OpRISCV64MOVBconst {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVBload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVBconst || auxIntToInt8(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDconst(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVDconst <t> [c])
+ // cond: !is32Bit(c) && int32(c) < 0
+ // result: (ADD (SLLI <t> [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))]))
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if !(!is32Bit(c) && int32(c) < 0) {
+ break
+ }
+ v.reset(OpRISCV64ADD)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c>>32 + 1)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(int32(c)))
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (MOVDconst <t> [c])
+ // cond: !is32Bit(c) && int32(c) >= 0
+ // result: (ADD (SLLI <t> [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))]))
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if !(!is32Bit(c) && int32(c) >= 0) {
+ break
+ }
+ v.reset(OpRISCV64ADD)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c>>32 + 0)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(int32(c)))
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVDreg x)
+ // cond: x.Uses == 1
+ // result: (MOVDnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64MOVDnop)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHUload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHUreg (MOVBconst [c]))
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVBconst {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ // match: (MOVHUreg (MOVHconst [c]))
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVHconst {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVHload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVHUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHreg (MOVBconst [c]))
+ // result: (MOVDconst [int64(c)])
+ for {
+ if v_0.Op != OpRISCV64MOVBconst {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (MOVHreg (MOVHconst [c]))
+ // result: (MOVDconst [int64(c)])
+ for {
+ if v_0.Op != OpRISCV64MOVHconst {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVHload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHconst || auxIntToInt16(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWUload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWUreg (MOVBconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVBconst {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (MOVWUreg (MOVHconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVHconst {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (MOVWUreg (MOVWconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVWUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWreg (MOVBconst [c]))
+ // result: (MOVDconst [int64(c)])
+ for {
+ if v_0.Op != OpRISCV64MOVBconst {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (MOVWreg (MOVHconst [c]))
+ // result: (MOVDconst [int64(c)])
+ for {
+ if v_0.Op != OpRISCV64MOVHconst {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (MOVWreg (MOVWconst [c]))
+ // result: (MOVDconst [int64(c)])
+ for {
+ if v_0.Op != OpRISCV64MOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (MOVWreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVWUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVWload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64OR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (OR (MOVBconst [val]) x)
+ // result: (ORI [int64(val)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVBconst {
+ continue
+ }
+ val := auxIntToInt8(v_0.AuxInt)
+ x := v_1
+ v.reset(OpRISCV64ORI)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR (MOVHconst [val]) x)
+ // result: (ORI [int64(val)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVHconst {
+ continue
+ }
+ val := auxIntToInt16(v_0.AuxInt)
+ x := v_1
+ v.reset(OpRISCV64ORI)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR (MOVWconst [val]) x)
+ // result: (ORI [int64(val)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVWconst {
+ continue
+ }
+ val := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpRISCV64ORI)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR (MOVDconst [val]) x)
+ // cond: is32Bit(val)
+ // result: (ORI [val] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVDconst {
+ continue
+ }
+ val := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(val)) {
+ continue
+ }
+ v.reset(OpRISCV64ORI)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLL x (MOVBconst [val]))
+ // result: (SLLI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVBconst {
+ break
+ }
+ val := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRISCV64SLLI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLL x (MOVHconst [val]))
+ // result: (SLLI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVHconst {
+ break
+ }
+ val := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRISCV64SLLI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLL x (MOVWconst [val]))
+ // result: (SLLI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVWconst {
+ break
+ }
+ val := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRISCV64SLLI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLL x (MOVDconst [val]))
+ // result: (SLLI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpRISCV64SLLI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRA x (MOVBconst [val]))
+ // result: (SRAI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVBconst {
+ break
+ }
+ val := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRA x (MOVHconst [val]))
+ // result: (SRAI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVHconst {
+ break
+ }
+ val := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRA x (MOVWconst [val]))
+ // result: (SRAI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVWconst {
+ break
+ }
+ val := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRA x (MOVDconst [val]))
+ // result: (SRAI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRL x (MOVBconst [val]))
+ // result: (SRLI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVBconst {
+ break
+ }
+ val := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRL x (MOVHconst [val]))
+ // result: (SRLI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVHconst {
+ break
+ }
+ val := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRL x (MOVWconst [val]))
+ // result: (SRLI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVWconst {
+ break
+ }
+ val := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRL x (MOVDconst [val]))
+ // result: (SRLI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUB x (MOVBconst [val]))
+ // result: (ADDI [-int64(val)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVBconst {
+ break
+ }
+ val := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(-int64(val))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (MOVHconst [val]))
+ // result: (ADDI [-int64(val)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVHconst {
+ break
+ }
+ val := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(-int64(val))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (MOVWconst [val]))
+ // cond: is32Bit(-int64(val))
+ // result: (ADDI [-int64(val)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVWconst {
+ break
+ }
+ val := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(-int64(val))) {
+ break
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(-int64(val))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (MOVDconst [val]))
+ // cond: is32Bit(-val)
+ // result: (ADDI [-val] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(-val)) {
+ break
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(-val)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (MOVBconst [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVBconst || auxIntToInt8(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUB x (MOVHconst [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVHconst || auxIntToInt16(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUB x (MOVWconst [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUB x (MOVDconst [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUB (MOVBconst [0]) x)
+ // result: (NEG x)
+ for {
+ if v_0.Op != OpRISCV64MOVBconst || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpRISCV64NEG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB (MOVHconst [0]) x)
+ // result: (NEG x)
+ for {
+ if v_0.Op != OpRISCV64MOVHconst || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpRISCV64NEG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB (MOVWconst [0]) x)
+ // result: (NEG x)
+ for {
+ if v_0.Op != OpRISCV64MOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpRISCV64NEG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB (MOVDconst [0]) x)
+ // result: (NEG x)
+ for {
+ if v_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpRISCV64NEG)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SUBW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBW x (MOVWconst [0]))
+ // result: (ADDIW [0] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRISCV64ADDIW)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBW (MOVDconst [0]) x)
+ // result: (NEGW x)
+ for {
+ if v_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpRISCV64NEGW)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR (MOVBconst [val]) x)
+ // result: (XORI [int64(val)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVBconst {
+ continue
+ }
+ val := auxIntToInt8(v_0.AuxInt)
+ x := v_1
+ v.reset(OpRISCV64XORI)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR (MOVHconst [val]) x)
+ // result: (XORI [int64(val)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVHconst {
+ continue
+ }
+ val := auxIntToInt16(v_0.AuxInt)
+ x := v_1
+ v.reset(OpRISCV64XORI)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR (MOVWconst [val]) x)
+ // result: (XORI [int64(val)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVWconst {
+ continue
+ }
+ val := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpRISCV64XORI)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR (MOVDconst [val]) x)
+ // cond: is32Bit(val)
+ // result: (XORI [val] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVDconst {
+ continue
+ }
+ val := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(val)) {
+ continue
+ }
+ v.reset(OpRISCV64XORI)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVHconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVHconst [c&15])) (Rsh16Ux64 <t> x (MOVHconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpRISCV64MOVHconst {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16)
+ v1.AuxInt = int16ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft32 <t> x (MOVWconst [c]))
+ // result: (Or32 (Lsh32x64 <t> x (MOVWconst [c&31])) (Rsh32Ux64 <t> x (MOVWconst [-c&31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpRISCV64MOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 31)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 31)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft64 <t> x (MOVDconst [c]))
+ // result: (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 63)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 63)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVBconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVBconst [c&7])) (Rsh8Ux64 <t> x (MOVBconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpRISCV64MOVBconst {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8)
+ v1.AuxInt = int8ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8)
+ v3.AuxInt = int8ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 <t> x y)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 <t> x y)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 <t> x y)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 <t> x y)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg32, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg32, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg32, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg32, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 <t> x y)
+ // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 <t> x y)
+ // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 <t> x y)
+ // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 <t> x y)
+ // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 <t> x y)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 <t> x y)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux64 <t> x y)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 <t> x y)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 <t> x y)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 <t> x y)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x64 <t> x y)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 <t> x y)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 <t> x y)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 <t> x y)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 <t> x y)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 <t> x y)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (NOT (SRAI <t> [63] (ADDI <t> [-1] x)))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpRISCV64NOT)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRAI, t)
+ v0.AuxInt = int64ToAuxInt(63)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, t)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (FMOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVBconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8)
+ v0.AuxInt = int8ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore ptr (MOVHconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16)
+ v0.AuxInt = int16ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVBstore [1] ptr (MOVBconst [0]) (MOVBstore ptr (MOVBconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8)
+ v0.AuxInt = int8ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVHconst [0]) (MOVHstore ptr (MOVHconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16)
+ v0.AuxInt = int16ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVBstore [3] ptr (MOVBconst [0]) (MOVBstore [2] ptr (MOVBconst [0]) (MOVBstore [1] ptr (MOVBconst [0]) (MOVBstore ptr (MOVBconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8)
+ v0.AuxInt = int8ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] ptr (MOVHconst [0]) (MOVHstore [4] ptr (MOVHconst [0]) (MOVHstore [2] ptr (MOVHconst [0]) (MOVHstore ptr (MOVHconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16)
+ v0.AuxInt = int16ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(2)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVBconst [0]) (MOVBstore [1] ptr (MOVBconst [0]) (MOVBstore ptr (MOVBconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8)
+ v0.AuxInt = int8ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] ptr (MOVHconst [0]) (MOVHstore [2] ptr (MOVHconst [0]) (MOVHstore ptr (MOVHconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16)
+ v0.AuxInt = int16ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [12] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore ptr (MOVWconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [16] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [24] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [32] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [24] ptr (MOVDconst [0]) (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(24)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(16)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(8)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [8 * (128 - s/8)] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpRISCV64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(8 * (128 - s/8))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // result: (LoweredZero [t.Alignment()] ptr (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)])) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64LoweredZero)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADD, ptr.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg2(ptr, v1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+}
+func rewriteBlockRISCV64(b *Block) bool {
+ switch b.Kind {
+ case BlockRISCV64BEQ:
+ // match: (BEQ (MOVDconst [0]) cond yes no)
+ // result: (BEQZ cond yes no)
+ for b.Controls[0].Op == OpRISCV64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockRISCV64BEQZ, cond)
+ return true
+ }
+ // match: (BEQ cond (MOVDconst [0]) yes no)
+ // result: (BEQZ cond yes no)
+ for b.Controls[1].Op == OpRISCV64MOVDconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockRISCV64BEQZ, cond)
+ return true
+ }
+ case BlockRISCV64BEQZ:
+ // match: (BEQZ (SEQZ x) yes no)
+ // result: (BNEZ x yes no)
+ for b.Controls[0].Op == OpRISCV64SEQZ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BNEZ, x)
+ return true
+ }
+ // match: (BEQZ (SNEZ x) yes no)
+ // result: (BEQZ x yes no)
+ for b.Controls[0].Op == OpRISCV64SNEZ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BEQZ, x)
+ return true
+ }
+ // match: (BEQZ (SUB x y) yes no)
+ // result: (BEQ x y yes no)
+ for b.Controls[0].Op == OpRISCV64SUB {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BEQ, x, y)
+ return true
+ }
+ // match: (BEQZ (SLT x y) yes no)
+ // result: (BGE x y yes no)
+ for b.Controls[0].Op == OpRISCV64SLT {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BGE, x, y)
+ return true
+ }
+ // match: (BEQZ (SLTU x y) yes no)
+ // result: (BGEU x y yes no)
+ for b.Controls[0].Op == OpRISCV64SLTU {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BGEU, x, y)
+ return true
+ }
+ case BlockRISCV64BNE:
+ // match: (BNE (MOVDconst [0]) cond yes no)
+ // result: (BNEZ cond yes no)
+ for b.Controls[0].Op == OpRISCV64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockRISCV64BNEZ, cond)
+ return true
+ }
+ // match: (BNE cond (MOVDconst [0]) yes no)
+ // result: (BNEZ cond yes no)
+ for b.Controls[1].Op == OpRISCV64MOVDconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockRISCV64BNEZ, cond)
+ return true
+ }
+ case BlockRISCV64BNEZ:
+ // match: (BNEZ (SEQZ x) yes no)
+ // result: (BEQZ x yes no)
+ for b.Controls[0].Op == OpRISCV64SEQZ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BEQZ, x)
+ return true
+ }
+ // match: (BNEZ (SNEZ x) yes no)
+ // result: (BNEZ x yes no)
+ for b.Controls[0].Op == OpRISCV64SNEZ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BNEZ, x)
+ return true
+ }
+ // match: (BNEZ (SUB x y) yes no)
+ // result: (BNE x y yes no)
+ for b.Controls[0].Op == OpRISCV64SUB {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BNE, x, y)
+ return true
+ }
+ // match: (BNEZ (SLT x y) yes no)
+ // result: (BLT x y yes no)
+ for b.Controls[0].Op == OpRISCV64SLT {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BLT, x, y)
+ return true
+ }
+ // match: (BNEZ (SLTU x y) yes no)
+ // result: (BLTU x y yes no)
+ for b.Controls[0].Op == OpRISCV64SLTU {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BLTU, x, y)
+ return true
+ }
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (BNEZ cond yes no)
+ for {
+ cond := b.Controls[0]
+ b.resetWithControl(BlockRISCV64BNEZ, cond)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
new file mode 100644
index 0000000..a9722b8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -0,0 +1,17859 @@
+// Code generated from gen/S390X.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+import "cmd/compile/internal/types"
+import "cmd/internal/obj/s390x"
+
+func rewriteValueS390X(v *Value) bool {
+ switch v.Op {
+ case OpAdd16:
+ v.Op = OpS390XADDW
+ return true
+ case OpAdd32:
+ v.Op = OpS390XADDW
+ return true
+ case OpAdd32F:
+ return rewriteValueS390X_OpAdd32F(v)
+ case OpAdd64:
+ v.Op = OpS390XADD
+ return true
+ case OpAdd64F:
+ return rewriteValueS390X_OpAdd64F(v)
+ case OpAdd8:
+ v.Op = OpS390XADDW
+ return true
+ case OpAddPtr:
+ v.Op = OpS390XADD
+ return true
+ case OpAddr:
+ return rewriteValueS390X_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpS390XANDW
+ return true
+ case OpAnd32:
+ v.Op = OpS390XANDW
+ return true
+ case OpAnd64:
+ v.Op = OpS390XAND
+ return true
+ case OpAnd8:
+ v.Op = OpS390XANDW
+ return true
+ case OpAndB:
+ v.Op = OpS390XANDW
+ return true
+ case OpAtomicAdd32:
+ return rewriteValueS390X_OpAtomicAdd32(v)
+ case OpAtomicAdd64:
+ return rewriteValueS390X_OpAtomicAdd64(v)
+ case OpAtomicAnd32:
+ v.Op = OpS390XLAN
+ return true
+ case OpAtomicAnd8:
+ return rewriteValueS390X_OpAtomicAnd8(v)
+ case OpAtomicCompareAndSwap32:
+ return rewriteValueS390X_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ return rewriteValueS390X_OpAtomicCompareAndSwap64(v)
+ case OpAtomicExchange32:
+ return rewriteValueS390X_OpAtomicExchange32(v)
+ case OpAtomicExchange64:
+ return rewriteValueS390X_OpAtomicExchange64(v)
+ case OpAtomicLoad32:
+ return rewriteValueS390X_OpAtomicLoad32(v)
+ case OpAtomicLoad64:
+ return rewriteValueS390X_OpAtomicLoad64(v)
+ case OpAtomicLoad8:
+ return rewriteValueS390X_OpAtomicLoad8(v)
+ case OpAtomicLoadAcq32:
+ return rewriteValueS390X_OpAtomicLoadAcq32(v)
+ case OpAtomicLoadPtr:
+ return rewriteValueS390X_OpAtomicLoadPtr(v)
+ case OpAtomicOr32:
+ v.Op = OpS390XLAO
+ return true
+ case OpAtomicOr8:
+ return rewriteValueS390X_OpAtomicOr8(v)
+ case OpAtomicStore32:
+ return rewriteValueS390X_OpAtomicStore32(v)
+ case OpAtomicStore64:
+ return rewriteValueS390X_OpAtomicStore64(v)
+ case OpAtomicStore8:
+ return rewriteValueS390X_OpAtomicStore8(v)
+ case OpAtomicStorePtrNoWB:
+ return rewriteValueS390X_OpAtomicStorePtrNoWB(v)
+ case OpAtomicStoreRel32:
+ return rewriteValueS390X_OpAtomicStoreRel32(v)
+ case OpAvg64u:
+ return rewriteValueS390X_OpAvg64u(v)
+ case OpBitLen64:
+ return rewriteValueS390X_OpBitLen64(v)
+ case OpBswap32:
+ v.Op = OpS390XMOVWBR
+ return true
+ case OpBswap64:
+ v.Op = OpS390XMOVDBR
+ return true
+ case OpCeil:
+ return rewriteValueS390X_OpCeil(v)
+ case OpClosureCall:
+ v.Op = OpS390XCALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpS390XNOTW
+ return true
+ case OpCom32:
+ v.Op = OpS390XNOTW
+ return true
+ case OpCom64:
+ v.Op = OpS390XNOT
+ return true
+ case OpCom8:
+ v.Op = OpS390XNOTW
+ return true
+ case OpConst16:
+ return rewriteValueS390X_OpConst16(v)
+ case OpConst32:
+ return rewriteValueS390X_OpConst32(v)
+ case OpConst32F:
+ v.Op = OpS390XFMOVSconst
+ return true
+ case OpConst64:
+ return rewriteValueS390X_OpConst64(v)
+ case OpConst64F:
+ v.Op = OpS390XFMOVDconst
+ return true
+ case OpConst8:
+ return rewriteValueS390X_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueS390X_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueS390X_OpConstNil(v)
+ case OpCtz32:
+ return rewriteValueS390X_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz64:
+ return rewriteValueS390X_OpCtz64(v)
+ case OpCtz64NonZero:
+ v.Op = OpCtz64
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpS390XCFEBRA
+ return true
+ case OpCvt32Fto32U:
+ v.Op = OpS390XCLFEBR
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpS390XCGEBRA
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpS390XLDEBR
+ return true
+ case OpCvt32Fto64U:
+ v.Op = OpS390XCLGEBR
+ return true
+ case OpCvt32Uto32F:
+ v.Op = OpS390XCELFBR
+ return true
+ case OpCvt32Uto64F:
+ v.Op = OpS390XCDLFBR
+ return true
+ case OpCvt32to32F:
+ v.Op = OpS390XCEFBRA
+ return true
+ case OpCvt32to64F:
+ v.Op = OpS390XCDFBRA
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpS390XCFDBRA
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpS390XLEDBR
+ return true
+ case OpCvt64Fto32U:
+ v.Op = OpS390XCLFDBR
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpS390XCGDBRA
+ return true
+ case OpCvt64Fto64U:
+ v.Op = OpS390XCLGDBR
+ return true
+ case OpCvt64Uto32F:
+ v.Op = OpS390XCELGBR
+ return true
+ case OpCvt64Uto64F:
+ v.Op = OpS390XCDLGBR
+ return true
+ case OpCvt64to32F:
+ v.Op = OpS390XCEGBRA
+ return true
+ case OpCvt64to64F:
+ v.Op = OpS390XCDGBRA
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueS390X_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueS390X_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueS390X_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpS390XFDIVS
+ return true
+ case OpDiv32u:
+ return rewriteValueS390X_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValueS390X_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpS390XFDIV
+ return true
+ case OpDiv64u:
+ v.Op = OpS390XDIVDU
+ return true
+ case OpDiv8:
+ return rewriteValueS390X_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueS390X_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueS390X_OpEq16(v)
+ case OpEq32:
+ return rewriteValueS390X_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueS390X_OpEq32F(v)
+ case OpEq64:
+ return rewriteValueS390X_OpEq64(v)
+ case OpEq64F:
+ return rewriteValueS390X_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueS390X_OpEq8(v)
+ case OpEqB:
+ return rewriteValueS390X_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueS390X_OpEqPtr(v)
+ case OpFMA:
+ return rewriteValueS390X_OpFMA(v)
+ case OpFloor:
+ return rewriteValueS390X_OpFloor(v)
+ case OpGetCallerPC:
+ v.Op = OpS390XLoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpS390XLoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpS390XLoweredGetClosurePtr
+ return true
+ case OpGetG:
+ v.Op = OpS390XLoweredGetG
+ return true
+ case OpHmul32:
+ return rewriteValueS390X_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueS390X_OpHmul32u(v)
+ case OpHmul64:
+ v.Op = OpS390XMULHD
+ return true
+ case OpHmul64u:
+ v.Op = OpS390XMULHDU
+ return true
+ case OpITab:
+ return rewriteValueS390X_OpITab(v)
+ case OpInterCall:
+ v.Op = OpS390XCALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueS390X_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueS390X_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueS390X_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueS390X_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueS390X_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueS390X_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueS390X_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueS390X_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueS390X_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValueS390X_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValueS390X_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueS390X_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueS390X_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueS390X_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueS390X_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueS390X_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueS390X_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueS390X_OpLess32U(v)
+ case OpLess64:
+ return rewriteValueS390X_OpLess64(v)
+ case OpLess64F:
+ return rewriteValueS390X_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValueS390X_OpLess64U(v)
+ case OpLess8:
+ return rewriteValueS390X_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueS390X_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueS390X_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueS390X_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueS390X_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueS390X_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueS390X_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueS390X_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueS390X_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueS390X_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueS390X_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueS390X_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueS390X_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueS390X_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueS390X_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueS390X_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueS390X_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueS390X_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueS390X_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueS390X_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueS390X_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueS390X_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueS390X_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueS390X_OpMod32u(v)
+ case OpMod64:
+ return rewriteValueS390X_OpMod64(v)
+ case OpMod64u:
+ v.Op = OpS390XMODDU
+ return true
+ case OpMod8:
+ return rewriteValueS390X_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueS390X_OpMod8u(v)
+ case OpMove:
+ return rewriteValueS390X_OpMove(v)
+ case OpMul16:
+ v.Op = OpS390XMULLW
+ return true
+ case OpMul32:
+ v.Op = OpS390XMULLW
+ return true
+ case OpMul32F:
+ v.Op = OpS390XFMULS
+ return true
+ case OpMul64:
+ v.Op = OpS390XMULLD
+ return true
+ case OpMul64F:
+ v.Op = OpS390XFMUL
+ return true
+ case OpMul64uhilo:
+ v.Op = OpS390XMLGR
+ return true
+ case OpMul8:
+ v.Op = OpS390XMULLW
+ return true
+ case OpNeg16:
+ v.Op = OpS390XNEGW
+ return true
+ case OpNeg32:
+ v.Op = OpS390XNEGW
+ return true
+ case OpNeg32F:
+ v.Op = OpS390XFNEGS
+ return true
+ case OpNeg64:
+ v.Op = OpS390XNEG
+ return true
+ case OpNeg64F:
+ v.Op = OpS390XFNEG
+ return true
+ case OpNeg8:
+ v.Op = OpS390XNEGW
+ return true
+ case OpNeq16:
+ return rewriteValueS390X_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueS390X_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueS390X_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValueS390X_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValueS390X_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueS390X_OpNeq8(v)
+ case OpNeqB:
+ return rewriteValueS390X_OpNeqB(v)
+ case OpNeqPtr:
+ return rewriteValueS390X_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpS390XLoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueS390X_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueS390X_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpS390XORW
+ return true
+ case OpOr32:
+ v.Op = OpS390XORW
+ return true
+ case OpOr64:
+ v.Op = OpS390XOR
+ return true
+ case OpOr8:
+ v.Op = OpS390XORW
+ return true
+ case OpOrB:
+ v.Op = OpS390XORW
+ return true
+ case OpPanicBounds:
+ return rewriteValueS390X_OpPanicBounds(v)
+ case OpPopCount16:
+ return rewriteValueS390X_OpPopCount16(v)
+ case OpPopCount32:
+ return rewriteValueS390X_OpPopCount32(v)
+ case OpPopCount64:
+ return rewriteValueS390X_OpPopCount64(v)
+ case OpPopCount8:
+ return rewriteValueS390X_OpPopCount8(v)
+ case OpRotateLeft16:
+ return rewriteValueS390X_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ v.Op = OpS390XRLL
+ return true
+ case OpRotateLeft64:
+ v.Op = OpS390XRLLG
+ return true
+ case OpRotateLeft8:
+ return rewriteValueS390X_OpRotateLeft8(v)
+ case OpRound:
+ return rewriteValueS390X_OpRound(v)
+ case OpRound32F:
+ v.Op = OpS390XLoweredRound32F
+ return true
+ case OpRound64F:
+ v.Op = OpS390XLoweredRound64F
+ return true
+ case OpRoundToEven:
+ return rewriteValueS390X_OpRoundToEven(v)
+ case OpRsh16Ux16:
+ return rewriteValueS390X_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueS390X_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueS390X_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueS390X_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueS390X_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueS390X_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueS390X_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueS390X_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueS390X_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueS390X_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueS390X_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueS390X_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueS390X_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueS390X_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueS390X_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueS390X_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueS390X_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueS390X_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueS390X_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueS390X_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueS390X_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueS390X_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueS390X_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueS390X_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueS390X_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueS390X_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueS390X_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueS390X_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueS390X_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueS390X_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueS390X_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueS390X_OpRsh8x8(v)
+ case OpS390XADD:
+ return rewriteValueS390X_OpS390XADD(v)
+ case OpS390XADDC:
+ return rewriteValueS390X_OpS390XADDC(v)
+ case OpS390XADDE:
+ return rewriteValueS390X_OpS390XADDE(v)
+ case OpS390XADDW:
+ return rewriteValueS390X_OpS390XADDW(v)
+ case OpS390XADDWconst:
+ return rewriteValueS390X_OpS390XADDWconst(v)
+ case OpS390XADDWload:
+ return rewriteValueS390X_OpS390XADDWload(v)
+ case OpS390XADDconst:
+ return rewriteValueS390X_OpS390XADDconst(v)
+ case OpS390XADDload:
+ return rewriteValueS390X_OpS390XADDload(v)
+ case OpS390XAND:
+ return rewriteValueS390X_OpS390XAND(v)
+ case OpS390XANDW:
+ return rewriteValueS390X_OpS390XANDW(v)
+ case OpS390XANDWconst:
+ return rewriteValueS390X_OpS390XANDWconst(v)
+ case OpS390XANDWload:
+ return rewriteValueS390X_OpS390XANDWload(v)
+ case OpS390XANDconst:
+ return rewriteValueS390X_OpS390XANDconst(v)
+ case OpS390XANDload:
+ return rewriteValueS390X_OpS390XANDload(v)
+ case OpS390XCMP:
+ return rewriteValueS390X_OpS390XCMP(v)
+ case OpS390XCMPU:
+ return rewriteValueS390X_OpS390XCMPU(v)
+ case OpS390XCMPUconst:
+ return rewriteValueS390X_OpS390XCMPUconst(v)
+ case OpS390XCMPW:
+ return rewriteValueS390X_OpS390XCMPW(v)
+ case OpS390XCMPWU:
+ return rewriteValueS390X_OpS390XCMPWU(v)
+ case OpS390XCMPWUconst:
+ return rewriteValueS390X_OpS390XCMPWUconst(v)
+ case OpS390XCMPWconst:
+ return rewriteValueS390X_OpS390XCMPWconst(v)
+ case OpS390XCMPconst:
+ return rewriteValueS390X_OpS390XCMPconst(v)
+ case OpS390XCPSDR:
+ return rewriteValueS390X_OpS390XCPSDR(v)
+ case OpS390XFCMP:
+ return rewriteValueS390X_OpS390XFCMP(v)
+ case OpS390XFCMPS:
+ return rewriteValueS390X_OpS390XFCMPS(v)
+ case OpS390XFMOVDload:
+ return rewriteValueS390X_OpS390XFMOVDload(v)
+ case OpS390XFMOVDstore:
+ return rewriteValueS390X_OpS390XFMOVDstore(v)
+ case OpS390XFMOVSload:
+ return rewriteValueS390X_OpS390XFMOVSload(v)
+ case OpS390XFMOVSstore:
+ return rewriteValueS390X_OpS390XFMOVSstore(v)
+ case OpS390XFNEG:
+ return rewriteValueS390X_OpS390XFNEG(v)
+ case OpS390XFNEGS:
+ return rewriteValueS390X_OpS390XFNEGS(v)
+ case OpS390XLDGR:
+ return rewriteValueS390X_OpS390XLDGR(v)
+ case OpS390XLEDBR:
+ return rewriteValueS390X_OpS390XLEDBR(v)
+ case OpS390XLGDR:
+ return rewriteValueS390X_OpS390XLGDR(v)
+ case OpS390XLOCGR:
+ return rewriteValueS390X_OpS390XLOCGR(v)
+ case OpS390XLTDBR:
+ return rewriteValueS390X_OpS390XLTDBR(v)
+ case OpS390XLTEBR:
+ return rewriteValueS390X_OpS390XLTEBR(v)
+ case OpS390XLoweredRound32F:
+ return rewriteValueS390X_OpS390XLoweredRound32F(v)
+ case OpS390XLoweredRound64F:
+ return rewriteValueS390X_OpS390XLoweredRound64F(v)
+ case OpS390XMOVBZload:
+ return rewriteValueS390X_OpS390XMOVBZload(v)
+ case OpS390XMOVBZreg:
+ return rewriteValueS390X_OpS390XMOVBZreg(v)
+ case OpS390XMOVBload:
+ return rewriteValueS390X_OpS390XMOVBload(v)
+ case OpS390XMOVBreg:
+ return rewriteValueS390X_OpS390XMOVBreg(v)
+ case OpS390XMOVBstore:
+ return rewriteValueS390X_OpS390XMOVBstore(v)
+ case OpS390XMOVBstoreconst:
+ return rewriteValueS390X_OpS390XMOVBstoreconst(v)
+ case OpS390XMOVDaddridx:
+ return rewriteValueS390X_OpS390XMOVDaddridx(v)
+ case OpS390XMOVDload:
+ return rewriteValueS390X_OpS390XMOVDload(v)
+ case OpS390XMOVDstore:
+ return rewriteValueS390X_OpS390XMOVDstore(v)
+ case OpS390XMOVDstoreconst:
+ return rewriteValueS390X_OpS390XMOVDstoreconst(v)
+ case OpS390XMOVHBRstore:
+ return rewriteValueS390X_OpS390XMOVHBRstore(v)
+ case OpS390XMOVHZload:
+ return rewriteValueS390X_OpS390XMOVHZload(v)
+ case OpS390XMOVHZreg:
+ return rewriteValueS390X_OpS390XMOVHZreg(v)
+ case OpS390XMOVHload:
+ return rewriteValueS390X_OpS390XMOVHload(v)
+ case OpS390XMOVHreg:
+ return rewriteValueS390X_OpS390XMOVHreg(v)
+ case OpS390XMOVHstore:
+ return rewriteValueS390X_OpS390XMOVHstore(v)
+ case OpS390XMOVHstoreconst:
+ return rewriteValueS390X_OpS390XMOVHstoreconst(v)
+ case OpS390XMOVWBRstore:
+ return rewriteValueS390X_OpS390XMOVWBRstore(v)
+ case OpS390XMOVWZload:
+ return rewriteValueS390X_OpS390XMOVWZload(v)
+ case OpS390XMOVWZreg:
+ return rewriteValueS390X_OpS390XMOVWZreg(v)
+ case OpS390XMOVWload:
+ return rewriteValueS390X_OpS390XMOVWload(v)
+ case OpS390XMOVWreg:
+ return rewriteValueS390X_OpS390XMOVWreg(v)
+ case OpS390XMOVWstore:
+ return rewriteValueS390X_OpS390XMOVWstore(v)
+ case OpS390XMOVWstoreconst:
+ return rewriteValueS390X_OpS390XMOVWstoreconst(v)
+ case OpS390XMULLD:
+ return rewriteValueS390X_OpS390XMULLD(v)
+ case OpS390XMULLDconst:
+ return rewriteValueS390X_OpS390XMULLDconst(v)
+ case OpS390XMULLDload:
+ return rewriteValueS390X_OpS390XMULLDload(v)
+ case OpS390XMULLW:
+ return rewriteValueS390X_OpS390XMULLW(v)
+ case OpS390XMULLWconst:
+ return rewriteValueS390X_OpS390XMULLWconst(v)
+ case OpS390XMULLWload:
+ return rewriteValueS390X_OpS390XMULLWload(v)
+ case OpS390XNEG:
+ return rewriteValueS390X_OpS390XNEG(v)
+ case OpS390XNEGW:
+ return rewriteValueS390X_OpS390XNEGW(v)
+ case OpS390XNOT:
+ return rewriteValueS390X_OpS390XNOT(v)
+ case OpS390XNOTW:
+ return rewriteValueS390X_OpS390XNOTW(v)
+ case OpS390XOR:
+ return rewriteValueS390X_OpS390XOR(v)
+ case OpS390XORW:
+ return rewriteValueS390X_OpS390XORW(v)
+ case OpS390XORWconst:
+ return rewriteValueS390X_OpS390XORWconst(v)
+ case OpS390XORWload:
+ return rewriteValueS390X_OpS390XORWload(v)
+ case OpS390XORconst:
+ return rewriteValueS390X_OpS390XORconst(v)
+ case OpS390XORload:
+ return rewriteValueS390X_OpS390XORload(v)
+ case OpS390XRISBGZ:
+ return rewriteValueS390X_OpS390XRISBGZ(v)
+ case OpS390XRLL:
+ return rewriteValueS390X_OpS390XRLL(v)
+ case OpS390XRLLG:
+ return rewriteValueS390X_OpS390XRLLG(v)
+ case OpS390XSLD:
+ return rewriteValueS390X_OpS390XSLD(v)
+ case OpS390XSLDconst:
+ return rewriteValueS390X_OpS390XSLDconst(v)
+ case OpS390XSLW:
+ return rewriteValueS390X_OpS390XSLW(v)
+ case OpS390XSLWconst:
+ return rewriteValueS390X_OpS390XSLWconst(v)
+ case OpS390XSRAD:
+ return rewriteValueS390X_OpS390XSRAD(v)
+ case OpS390XSRADconst:
+ return rewriteValueS390X_OpS390XSRADconst(v)
+ case OpS390XSRAW:
+ return rewriteValueS390X_OpS390XSRAW(v)
+ case OpS390XSRAWconst:
+ return rewriteValueS390X_OpS390XSRAWconst(v)
+ case OpS390XSRD:
+ return rewriteValueS390X_OpS390XSRD(v)
+ case OpS390XSRDconst:
+ return rewriteValueS390X_OpS390XSRDconst(v)
+ case OpS390XSRW:
+ return rewriteValueS390X_OpS390XSRW(v)
+ case OpS390XSRWconst:
+ return rewriteValueS390X_OpS390XSRWconst(v)
+ case OpS390XSTM2:
+ return rewriteValueS390X_OpS390XSTM2(v)
+ case OpS390XSTMG2:
+ return rewriteValueS390X_OpS390XSTMG2(v)
+ case OpS390XSUB:
+ return rewriteValueS390X_OpS390XSUB(v)
+ case OpS390XSUBE:
+ return rewriteValueS390X_OpS390XSUBE(v)
+ case OpS390XSUBW:
+ return rewriteValueS390X_OpS390XSUBW(v)
+ case OpS390XSUBWconst:
+ return rewriteValueS390X_OpS390XSUBWconst(v)
+ case OpS390XSUBWload:
+ return rewriteValueS390X_OpS390XSUBWload(v)
+ case OpS390XSUBconst:
+ return rewriteValueS390X_OpS390XSUBconst(v)
+ case OpS390XSUBload:
+ return rewriteValueS390X_OpS390XSUBload(v)
+ case OpS390XSumBytes2:
+ return rewriteValueS390X_OpS390XSumBytes2(v)
+ case OpS390XSumBytes4:
+ return rewriteValueS390X_OpS390XSumBytes4(v)
+ case OpS390XSumBytes8:
+ return rewriteValueS390X_OpS390XSumBytes8(v)
+ case OpS390XXOR:
+ return rewriteValueS390X_OpS390XXOR(v)
+ case OpS390XXORW:
+ return rewriteValueS390X_OpS390XXORW(v)
+ case OpS390XXORWconst:
+ return rewriteValueS390X_OpS390XXORWconst(v)
+ case OpS390XXORWload:
+ return rewriteValueS390X_OpS390XXORWload(v)
+ case OpS390XXORconst:
+ return rewriteValueS390X_OpS390XXORconst(v)
+ case OpS390XXORload:
+ return rewriteValueS390X_OpS390XXORload(v)
+ case OpSelect0:
+ return rewriteValueS390X_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueS390X_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpS390XMOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpS390XMOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpS390XMOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpS390XMOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpS390XMOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpS390XMOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValueS390X_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpS390XFSQRT
+ return true
+ case OpStaticCall:
+ v.Op = OpS390XCALLstatic
+ return true
+ case OpStore:
+ return rewriteValueS390X_OpStore(v)
+ case OpSub16:
+ v.Op = OpS390XSUBW
+ return true
+ case OpSub32:
+ v.Op = OpS390XSUBW
+ return true
+ case OpSub32F:
+ return rewriteValueS390X_OpSub32F(v)
+ case OpSub64:
+ v.Op = OpS390XSUB
+ return true
+ case OpSub64F:
+ return rewriteValueS390X_OpSub64F(v)
+ case OpSub8:
+ v.Op = OpS390XSUBW
+ return true
+ case OpSubPtr:
+ v.Op = OpS390XSUB
+ return true
+ case OpTrunc:
+ return rewriteValueS390X_OpTrunc(v)
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpS390XLoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpS390XXORW
+ return true
+ case OpXor32:
+ v.Op = OpS390XXORW
+ return true
+ case OpXor64:
+ v.Op = OpS390XXOR
+ return true
+ case OpXor8:
+ v.Op = OpS390XXORW
+ return true
+ case OpZero:
+ return rewriteValueS390X_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpS390XMOVHZreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpS390XMOVHZreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpS390XMOVWZreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpS390XMOVBZreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpS390XMOVBZreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpS390XMOVBZreg
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpAdd32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Add32F x y)
+ // result: (Select0 (FADDS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpS390XFADDS, types.NewTuple(typ.Float32, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAdd64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Add64F x y)
+ // result: (Select0 (FADD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpS390XFADD, types.NewTuple(typ.Float64, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpS390XMOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicAdd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAdd32 ptr val mem)
+ // result: (AddTupleFirst32 val (LAA ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XAddTupleFirst32)
+ v0 := b.NewValue0(v.Pos, OpS390XLAA, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg2(val, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicAdd64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAdd64 ptr val mem)
+ // result: (AddTupleFirst64 val (LAAG ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XAddTupleFirst64)
+ v0 := b.NewValue0(v.Pos, OpS390XLAAG, types.NewTuple(typ.UInt64, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg2(val, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8 ptr val mem)
+ // result: (LANfloor ptr (RLL <typ.UInt32> (ORWconst <typ.UInt32> val [-1<<8]) (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr)) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XLANfloor)
+ v0 := b.NewValue0(v.Pos, OpS390XRLL, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpS390XORWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(-1 << 8)
+ v1.AddArg(val)
+ v2 := b.NewValue0(v.Pos, OpS390XRXSBG, typ.UInt32)
+ v2.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(59, 60, 3))
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(3 << 3)
+ v2.AddArg2(v3, ptr)
+ v0.AddArg2(v1, v2)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap32 ptr old new_ mem)
+ // result: (LoweredAtomicCas32 ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpS390XLoweredAtomicCas32)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicCompareAndSwap64(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap64 ptr old new_ mem)
+ // result: (LoweredAtomicCas64 ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpS390XLoweredAtomicCas64)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicExchange32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicExchange32 ptr val mem)
+ // result: (LoweredAtomicExchange32 ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XLoweredAtomicExchange32)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicExchange64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicExchange64 ptr val mem)
+ // result: (LoweredAtomicExchange64 ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XLoweredAtomicExchange64)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoad32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad32 ptr mem)
+ // result: (MOVWZatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVWZatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoad64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad64 ptr mem)
+ // result: (MOVDatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVDatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoad8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad8 ptr mem)
+ // result: (MOVBZatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVBZatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoadAcq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadAcq32 ptr mem)
+ // result: (MOVWZatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVWZatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoadPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadPtr ptr mem)
+ // result: (MOVDatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVDatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8 ptr val mem)
+ // result: (LAOfloor ptr (SLW <typ.UInt32> (MOVBZreg <typ.UInt32> val) (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr)) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XLAOfloor)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt32)
+ v1.AddArg(val)
+ v2 := b.NewValue0(v.Pos, OpS390XRXSBG, typ.UInt32)
+ v2.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(59, 60, 3))
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(3 << 3)
+ v2.AddArg2(v3, ptr)
+ v0.AddArg2(v1, v2)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AtomicStore32 ptr val mem)
+ // result: (SYNC (MOVWatomicstore ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XSYNC)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWatomicstore, types.TypeMem)
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStore64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AtomicStore64 ptr val mem)
+ // result: (SYNC (MOVDatomicstore ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XSYNC)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem)
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStore8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AtomicStore8 ptr val mem)
+ // result: (SYNC (MOVBatomicstore ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XSYNC)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBatomicstore, types.TypeMem)
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStorePtrNoWB(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AtomicStorePtrNoWB ptr val mem)
+ // result: (SYNC (MOVDatomicstore ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XSYNC)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem)
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStoreRel32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStoreRel32 ptr val mem)
+ // result: (MOVWatomicstore ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XMOVWatomicstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Pos, OpS390XSRDconst, t)
+ v0.AuxInt = uint8ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpS390XSUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (SUB (MOVDconst [64]) (FLOGR x))
+ for {
+ x := v_0
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpCeil(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Ceil x)
+ // result: (FIDBR [6] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(6)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueS390X_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueS390X_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueS390X_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueS390X_OpConstBool(v *Value) bool {
+ // match: (ConstBool [b])
+ // result: (MOVDconst [b2i(b)])
+ for {
+ b := auxIntToBool(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(b))
+ return true
+ }
+}
+func rewriteValueS390X_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueS390X_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 <t> x)
+ // result: (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XANDW, t)
+ v4 := b.NewValue0(v.Pos, OpS390XSUBWconst, t)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpS390XNOTW, t)
+ v5.AddArg(x)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64 <t> x)
+ // result: (SUB (MOVDconst [64]) (FLOGR (AND <t> (SUBconst <t> [1] x) (NOT <t> x))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XAND, t)
+ v3 := b.NewValue0(v.Pos, OpS390XSUBconst, t)
+ v3.AuxInt = int32ToAuxInt(1)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XNOT, t)
+ v4.AddArg(x)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (DIVW (MOVHreg x) (MOVHreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (DIVWU (MOVHZreg x) (MOVHZreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 x y)
+ // result: (DIVW (MOVWreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (DIVWU (MOVWZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 x y)
+ // result: (DIVD x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVD)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (MOVBreg x) (MOVBreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVWU (MOVBZreg x) (MOVBZreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32F x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64 x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64F x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpFMA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMA x y z)
+ // result: (FMADD z x y)
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ v.reset(OpS390XFMADD)
+ v.AddArg3(z, x, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpFloor(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Floor x)
+ // result: (FIDBR [7] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(7)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = uint8ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = uint8ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpITab(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ITab (Load ptr mem))
+ // result: (MOVDload ptr mem)
+ for {
+ if v_0.Op != OpLoad {
+ break
+ }
+ mem := v_0.Args[1]
+ ptr := v_0.Args[0]
+ v.reset(OpS390XMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsInBounds idx len)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v2.AddArg2(idx, len)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil p)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
+ for {
+ p := v_0
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg(p)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsSliceInBounds idx len)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v2.AddArg2(idx, len)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVHZreg x) (MOVHZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32F x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64F x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVBZreg x) (MOVBZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVHZreg x) (MOVHZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32F x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64 x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64F x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64U x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVBZreg x) (MOVBZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpS390XMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && isSigned(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpS390XMOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && !isSigned(t)
+ // result: (MOVWZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpS390XMOVWZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && isSigned(t)
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpS390XMOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && !isSigned(t)
+ // result: (MOVHZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpS390XMOVHZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is8BitInt(t) && isSigned(t)
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpS390XMOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || (is8BitInt(t) && !isSigned(t)))
+ // result: (MOVBZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean() || (is8BitInt(t) && !isSigned(t))) {
+ break
+ }
+ v.reset(OpS390XMOVBZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpS390XFMOVSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpS390XFMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpS390XMOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLD <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLD <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLD <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLD <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (MODW (MOVHreg x) (MOVHreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (MODWU (MOVHZreg x) (MOVHZreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // result: (MODW (MOVWreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (MODWU (MOVWZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 x y)
+ // result: (MODD x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODD)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (MODW (MOVBreg x) (MOVBreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (MODWU (MOVBZreg x) (MOVBZreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVHstore dst (MOVHZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVWstore dst (MOVWZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVDstore)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [24] dst src mem)
+ // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVHstore dst (MOVHZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVBstore [6] dst (MOVBZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 0 && s <= 256 && logLargeCopy(v, s)
+ // result: (MVC [makeValAndOff32(int32(s), 0)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 0 && s <= 256 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(s), 0))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 256 && s <= 512 && logLargeCopy(v, s)
+ // result: (MVC [makeValAndOff32(int32(s)-256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 256 && s <= 512 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(s)-256, 256))
+ v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(256, 0))
+ v0.AddArg3(dst, src, mem)
+ v.AddArg3(dst, src, v0)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 512 && s <= 768 && logLargeCopy(v, s)
+ // result: (MVC [makeValAndOff32(int32(s)-512, 512)] dst src (MVC [makeValAndOff32(256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem)))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 512 && s <= 768 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(s)-512, 512))
+ v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(256, 256))
+ v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(256, 0))
+ v1.AddArg3(dst, src, mem)
+ v0.AddArg3(dst, src, v1)
+ v.AddArg3(dst, src, v0)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 768 && s <= 1024 && logLargeCopy(v, s)
+ // result: (MVC [makeValAndOff32(int32(s)-768, 768)] dst src (MVC [makeValAndOff32(256, 512)] dst src (MVC [makeValAndOff32(256, 256)] dst src (MVC [makeValAndOff32(256, 0)] dst src mem))))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 768 && s <= 1024 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(s)-768, 768))
+ v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(256, 512))
+ v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(256, 256))
+ v2 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v2.AuxInt = valAndOffToAuxInt(makeValAndOff32(256, 0))
+ v2.AddArg3(dst, src, mem)
+ v1.AddArg3(dst, src, v2)
+ v0.AddArg3(dst, src, v1)
+ v.AddArg3(dst, src, v0)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 1024 && logLargeCopy(v, s)
+ // result: (LoweredMove [s%256] dst src (ADD <src.Type> src (MOVDconst [(s/256)*256])) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 1024 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XLoweredMove)
+ v.AuxInt = int64ToAuxInt(s % 256)
+ v0 := b.NewValue0(v.Pos, OpS390XADD, src.Type)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt((s / 256) * 256)
+ v0.AddArg2(src, v1)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32F x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64F x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqB x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqPtr x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORWconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpS390XXORWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OffPtr [off] ptr:(SP))
+ // result: (MOVDaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (ADDconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADD (MOVDconst [off]) ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(off)
+ v.AddArg2(v0, ptr)
+ return true
+ }
+}
+func rewriteValueS390X_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpS390XLoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpS390XLoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpS390XLoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 x)
+ // result: (MOVBZreg (SumBytes2 (POPCNT <typ.UInt16> x)))
+ for {
+ x := v_0
+ v.reset(OpS390XMOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XSumBytes2, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt16)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpPopCount32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount32 x)
+ // result: (MOVBZreg (SumBytes4 (POPCNT <typ.UInt32> x)))
+ for {
+ x := v_0
+ v.reset(OpS390XMOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XSumBytes4, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpPopCount64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount64 x)
+ // result: (MOVBZreg (SumBytes8 (POPCNT <typ.UInt64> x)))
+ for {
+ x := v_0
+ v.reset(OpS390XMOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XSumBytes8, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpPopCount8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount8 x)
+ // result: (POPCNT (MOVBZreg x))
+ for {
+ x := v_0
+ v.reset(OpS390XPOPCNT)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVDconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVDconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpRound(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Round x)
+ // result: (FIDBR [1] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpRoundToEven(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RoundToEven x)
+ // result: (FIDBR [4] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(4)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x16 x y)
+ // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x32 x y)
+ // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x8 x y)
+ // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x16 x y)
+ // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x32 x y)
+ // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x8 x y)
+ // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRD <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRD <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRD <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRD <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x16 x y)
+ // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x32 x y)
+ // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x8 x y)
+ // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x16 x y)
+ // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x32 x y)
+ // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x8 x y)
+ // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADD x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (ADDconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLDconst x [c]) (SRDconst x [64-c]))
+ // result: (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XSLDconst {
+ continue
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 64-c || x != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD idx (MOVDaddr [c] {s} ptr))
+ // cond: ptr.Op != OpSB
+ // result: (MOVDaddridx [c] {s} ptr idx)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ idx := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ s := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ if !(ptr.Op != OpSB) {
+ continue
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, idx)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (NEG y))
+ // result: (SUB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XNEG {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSUB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ADDload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XADDload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDC x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (ADDCconst x [int16(c)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XADDCconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDE x y (FlagEQ))
+ // result: (ADDC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XFlagEQ {
+ break
+ }
+ v.reset(OpS390XADDC)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDE x y (FlagLT))
+ // result: (ADDC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XFlagLT {
+ break
+ }
+ v.reset(OpS390XADDC)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c)))))
+ // result: (ADDE x y c)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpS390XADDCconst || auxIntToInt16(v_2_0.AuxInt) != -1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpSelect0 {
+ break
+ }
+ v_2_0_0_0 := v_2_0_0.Args[0]
+ if v_2_0_0_0.Op != OpS390XADDE {
+ break
+ }
+ c := v_2_0_0_0.Args[2]
+ v_2_0_0_0_0 := v_2_0_0_0.Args[0]
+ if v_2_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_2_0_0_0_1 := v_2_0_0_0.Args[1]
+ if v_2_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_0_0_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpS390XADDE)
+ v.AddArg3(x, y, c)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDW x (MOVDconst [c]))
+ // result: (ADDWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XADDWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDW (SLWconst x [c]) (SRWconst x [32-c]))
+ // result: (RLLconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XSLWconst {
+ continue
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 32-c || x != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpS390XRLLconst)
+ v.AuxInt = uint8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDW x (NEGW y))
+ // result: (SUBW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XNEGW {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSUBW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ADDWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XADDWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ADDW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ADDWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XADDWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDWconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) + d)
+ return true
+ }
+ // match: (ADDWconst [c] (ADDWconst [d] x))
+ // result: (ADDWconst [int32(c+d)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XADDWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XADDWconst)
+ v.AuxInt = int32ToAuxInt(int32(c + d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ADDWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XADDWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XADDWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [c] (MOVDaddr [d] {s} x:(SB)))
+ // cond: ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d))
+ // result: (MOVDaddr [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if x.Op != OpSB || !(((c+d)&1 == 0) && is32Bit(int64(c)+int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDaddr [d] {s} x))
+ // cond: x.Op != OpSB && is20Bit(int64(c)+int64(d))
+ // result: (MOVDaddr [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(x.Op != OpSB && is20Bit(int64(c)+int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDaddridx [d] {s} x y))
+ // cond: is20Bit(int64(c)+int64(d))
+ // result: (MOVDaddridx [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is20Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) + d)
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (ADD x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (ADDload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ADDload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XADDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XADDload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XAND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AND x (MOVDconst [c]))
+ // cond: s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil
+ // result: (RISBGZ x {*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c))})
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil) {
+ continue
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MOVDconst [c]))
+ // cond: is32Bit(c) && c < 0
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c) && c < 0) {
+ continue
+ }
+ v.reset(OpS390XANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MOVDconst [c]))
+ // cond: is32Bit(c) && c >= 0
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32(c)] x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c) && c >= 0) {
+ continue
+ }
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (AND <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ANDload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XANDload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDW x (MOVDconst [c]))
+ // result: (ANDWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XANDWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDW x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ANDWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XANDWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ANDW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ANDWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XANDWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDWconst [c] (ANDWconst [d] x))
+ // result: (ANDWconst [c&d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XANDWconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDWconst [0x00ff] x)
+ // result: (MOVBZreg x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0x00ff {
+ break
+ }
+ x := v_0
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDWconst [0xffff] x)
+ // result: (MOVHZreg x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xffff {
+ break
+ }
+ x := v_0
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDWconst [c] _)
+ // cond: int32(c)==0
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDWconst [c] x)
+ // cond: int32(c)==-1
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == -1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) & d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ANDWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XANDWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XANDWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [0] _)
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c&d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (AND x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XAND)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (ANDload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ANDload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XANDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XANDload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMP x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (CMPconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XCMPconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (InvertFlags (CMPconst x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMP y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPU x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (CMPUconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XCMPUconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPU (MOVDconst [c]) x)
+ // cond: isU32Bit(c)
+ // result: (InvertFlags (CMPUconst x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(isU32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPU x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPU y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)==uint64(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) == uint64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)<uint64(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)>uint64(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPUconst (SRDconst _ [c]) [n])
+ // cond: c > 0 && c < 64 && (1<<uint(64-c)) <= uint64(n)
+ // result: (FlagLT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ if !(c > 0 && c < 64 && (1<<uint(64-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPUconst (RISBGZ x {r}) [c])
+ // cond: r.OutMask() < uint64(uint32(c))
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ if !(r.OutMask() < uint64(uint32(c))) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPUconst (MOVWZreg x) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst x:(MOVHreg _) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVHreg {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst x:(MOVHZreg _) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVHZreg {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst x:(MOVBreg _) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVBreg {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst x:(MOVBZreg _) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVBZreg {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst (MOVWZreg x:(ANDWconst [m] _)) [c])
+ // cond: int32(m) >= 0
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(x.AuxInt)
+ if !(int32(m) >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst (MOVWreg x:(ANDWconst [m] _)) [c])
+ // cond: int32(m) >= 0
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(x.AuxInt)
+ if !(int32(m) >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVDconst [c]))
+ // result: (CMPWconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVDconst [c]) x)
+ // result: (InvertFlags (CMPWconst x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x (MOVWreg y))
+ // result: (CMPW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XCMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW x (MOVWZreg y))
+ // result: (CMPW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XCMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW (MOVWreg x) y)
+ // result: (CMPW x y)
+ for {
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpS390XCMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW (MOVWZreg x) y)
+ // result: (CMPW x y)
+ for {
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpS390XCMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPWU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPWU x (MOVDconst [c]))
+ // result: (CMPWUconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWU (MOVDconst [c]) x)
+ // result: (InvertFlags (CMPWUconst x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPWU x y)
+ // cond: x.ID > y.ID
+ // result: (InvertFlags (CMPWU y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(x.ID > y.ID) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPWU x (MOVWreg y))
+ // result: (CMPWU x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XCMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU x (MOVWZreg y))
+ // result: (CMPWU x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XCMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU (MOVWreg x) y)
+ // result: (CMPWU x y)
+ for {
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpS390XCMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU (MOVWZreg x) y)
+ // result: (CMPWU x y)
+ for {
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpS390XCMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPWUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)==uint32(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) == uint32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)<uint32(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)>uint32(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPWUconst (MOVBZreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVBZreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVHZreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVHZreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (SRWconst _ [c]) [n])
+ // cond: c > 0 && c < 32 && (1<<uint(32-c)) <= uint32(n)
+ // result: (FlagLT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSRWconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ if !(c > 0 && c < 32 && (1<<uint(32-c)) <= uint32(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (ANDWconst _ [m]) [n])
+ // cond: uint32(m) < uint32(n)
+ // result: (FlagLT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(m) < uint32(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVWreg x) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWUconst (MOVWZreg x) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)<int32(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) < int32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)>int32(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) > int32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPWconst (MOVBZreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVBZreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst (MOVHZreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVHZreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst (SRWconst _ [c]) [n])
+ // cond: c > 0 && n < 0
+ // result: (FlagGT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSRWconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ if !(c > 0 && n < 0) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPWconst (ANDWconst _ [m]) [n])
+ // cond: int32(m) >= 0 && int32(m) < int32(n)
+ // result: (FlagLT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(int32(m) >= 0 && int32(m) < int32(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst x:(SRWconst _ [c]) [n])
+ // cond: c > 0 && n >= 0
+ // result: (CMPWUconst x [n])
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XSRWconst {
+ break
+ }
+ c := auxIntToUint8(x.AuxInt)
+ if !(c > 0 && n >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(n)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWconst (MOVWreg x) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWconst (MOVWZreg x) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x==int64(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x == int64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x<int64(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < int64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x>int64(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > int64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPconst (SRDconst _ [c]) [n])
+ // cond: c > 0 && n < 0
+ // result: (FlagGT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ if !(c > 0 && n < 0) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPconst (RISBGZ x {r}) [c])
+ // cond: c > 0 && r.OutMask() < uint64(c)
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ if !(c > 0 && r.OutMask() < uint64(c)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPconst (MOVWreg x) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(MOVHreg _) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVHreg {
+ break
+ }
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(MOVHZreg _) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVHZreg {
+ break
+ }
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(MOVBreg _) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVBreg {
+ break
+ }
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(MOVBZreg _) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVBZreg {
+ break
+ }
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst (MOVWZreg x:(ANDWconst [m] _)) [c])
+ // cond: int32(m) >= 0 && c >= 0
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(x.AuxInt)
+ if !(int32(m) >= 0 && c >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst (MOVWreg x:(ANDWconst [m] _)) [c])
+ // cond: int32(m) >= 0 && c >= 0
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(x.AuxInt)
+ if !(int32(m) >= 0 && c >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(SRDconst _ [c]) [n])
+ // cond: c > 0 && n >= 0
+ // result: (CMPUconst x [n])
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(x.AuxInt)
+ if !(c > 0 && n >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPUconst)
+ v.AuxInt = int32ToAuxInt(n)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCPSDR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CPSDR y (FMOVDconst [c]))
+ // cond: !math.Signbit(c)
+ // result: (LPDFR y)
+ for {
+ y := v_0
+ if v_1.Op != OpS390XFMOVDconst {
+ break
+ }
+ c := auxIntToFloat64(v_1.AuxInt)
+ if !(!math.Signbit(c)) {
+ break
+ }
+ v.reset(OpS390XLPDFR)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CPSDR y (FMOVDconst [c]))
+ // cond: math.Signbit(c)
+ // result: (LNDFR y)
+ for {
+ y := v_0
+ if v_1.Op != OpS390XFMOVDconst {
+ break
+ }
+ c := auxIntToFloat64(v_1.AuxInt)
+ if !(math.Signbit(c)) {
+ break
+ }
+ v.reset(OpS390XLNDFR)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFCMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FCMP x (FMOVDconst [0.0]))
+ // result: (LTDBR x)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XFMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0.0 {
+ break
+ }
+ v.reset(OpS390XLTDBR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FCMP (FMOVDconst [0.0]) x)
+ // result: (InvertFlags (LTDBR <v.Type> x))
+ for {
+ if v_0.Op != OpS390XFMOVDconst || auxIntToFloat64(v_0.AuxInt) != 0.0 {
+ break
+ }
+ x := v_1
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XLTDBR, v.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFCMPS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FCMPS x (FMOVSconst [0.0]))
+ // result: (LTEBR x)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XFMOVSconst || auxIntToFloat32(v_1.AuxInt) != 0.0 {
+ break
+ }
+ v.reset(OpS390XLTEBR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FCMPS (FMOVSconst [0.0]) x)
+ // result: (InvertFlags (LTEBR <v.Type> x))
+ for {
+ if v_0.Op != OpS390XFMOVSconst || auxIntToFloat32(v_0.AuxInt) != 0.0 {
+ break
+ }
+ x := v_1
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XLTEBR, v.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (LDGR x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XLDGR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FMOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XFMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (FMOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XFMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XFMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XFMOVSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (FMOVSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XFMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XFMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFNEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEG (LPDFR x))
+ // result: (LNDFR x)
+ for {
+ if v_0.Op != OpS390XLPDFR {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XLNDFR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FNEG (LNDFR x))
+ // result: (LPDFR x)
+ for {
+ if v_0.Op != OpS390XLNDFR {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XLPDFR)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFNEGS(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEGS (LPDFR x))
+ // result: (LNDFR x)
+ for {
+ if v_0.Op != OpS390XLPDFR {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XLNDFR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FNEGS (LNDFR x))
+ // result: (LPDFR x)
+ for {
+ if v_0.Op != OpS390XLNDFR {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XLPDFR)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLDGR(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (LDGR <t> (RISBGZ x {r}))
+ // cond: r == s390x.NewRotateParams(1, 63, 0)
+ // result: (LPDFR (LDGR <t> x))
+ for {
+ t := v.Type
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(r == s390x.NewRotateParams(1, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XLPDFR)
+ v0 := b.NewValue0(v.Pos, OpS390XLDGR, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LDGR <t> (OR (MOVDconst [-1<<63]) x))
+ // result: (LNDFR (LDGR <t> x))
+ for {
+ t := v.Type
+ if v_0.Op != OpS390XOR {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0.AuxInt) != -1<<63 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpS390XLNDFR)
+ v0 := b.NewValue0(v.Pos, OpS390XLDGR, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (LDGR <t> x:(ORload <t1> [off] {sym} (MOVDconst [-1<<63]) ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (LNDFR <t> (LDGR <t> (MOVDload <t1> [off] {sym} ptr mem)))
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XORload {
+ break
+ }
+ t1 := x.Type
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[2]
+ x_0 := x.Args[0]
+ if x_0.Op != OpS390XMOVDconst || auxIntToInt64(x_0.AuxInt) != -1<<63 {
+ break
+ }
+ ptr := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XLNDFR, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x.Pos, OpS390XLDGR, t)
+ v2 := b.NewValue0(x.Pos, OpS390XMOVDload, t1)
+ v2.AuxInt = int32ToAuxInt(off)
+ v2.Aux = symToAux(sym)
+ v2.AddArg2(ptr, mem)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (LDGR (LGDR x))
+ // result: x
+ for {
+ if v_0.Op != OpS390XLGDR {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLEDBR(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LEDBR (LPDFR (LDEBR x)))
+ // result: (LPDFR x)
+ for {
+ if v_0.Op != OpS390XLPDFR {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XLDEBR {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpS390XLPDFR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEDBR (LNDFR (LDEBR x)))
+ // result: (LNDFR x)
+ for {
+ if v_0.Op != OpS390XLNDFR {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XLDEBR {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpS390XLNDFR)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLGDR(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LGDR (LDGR x))
+ // result: x
+ for {
+ if v_0.Op != OpS390XLDGR {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLOCGR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LOCGR {c} x y (InvertFlags cmp))
+ // result: (LOCGR {c.ReverseComparison()} x y cmp)
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (LOCGR {c} _ x (FlagEQ))
+ // cond: c&s390x.Equal != 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_1
+ if v_2.Op != OpS390XFlagEQ || !(c&s390x.Equal != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} _ x (FlagLT))
+ // cond: c&s390x.Less != 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_1
+ if v_2.Op != OpS390XFlagLT || !(c&s390x.Less != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} _ x (FlagGT))
+ // cond: c&s390x.Greater != 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_1
+ if v_2.Op != OpS390XFlagGT || !(c&s390x.Greater != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} _ x (FlagOV))
+ // cond: c&s390x.Unordered != 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_1
+ if v_2.Op != OpS390XFlagOV || !(c&s390x.Unordered != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} x _ (FlagEQ))
+ // cond: c&s390x.Equal == 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ if v_2.Op != OpS390XFlagEQ || !(c&s390x.Equal == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} x _ (FlagLT))
+ // cond: c&s390x.Less == 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ if v_2.Op != OpS390XFlagLT || !(c&s390x.Less == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} x _ (FlagGT))
+ // cond: c&s390x.Greater == 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ if v_2.Op != OpS390XFlagGT || !(c&s390x.Greater == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} x _ (FlagOV))
+ // cond: c&s390x.Unordered == 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ if v_2.Op != OpS390XFlagOV || !(c&s390x.Unordered == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLTDBR(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (LTDBR (Select0 x:(FADD _ _)))
+ // cond: b == x.Block
+ // result: (Select1 x)
+ for {
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XFADD || !(b == x.Block) {
+ break
+ }
+ v.reset(OpSelect1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LTDBR (Select0 x:(FSUB _ _)))
+ // cond: b == x.Block
+ // result: (Select1 x)
+ for {
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XFSUB || !(b == x.Block) {
+ break
+ }
+ v.reset(OpSelect1)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLTEBR(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (LTEBR (Select0 x:(FADDS _ _)))
+ // cond: b == x.Block
+ // result: (Select1 x)
+ for {
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XFADDS || !(b == x.Block) {
+ break
+ }
+ v.reset(OpSelect1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LTEBR (Select0 x:(FSUBS _ _)))
+ // cond: b == x.Block
+ // result: (Select1 x)
+ for {
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XFSUBS || !(b == x.Block) {
+ break
+ }
+ v.reset(OpSelect1)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLoweredRound32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LoweredRound32F x:(FMOVSconst))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XFMOVSconst {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLoweredRound64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LoweredRound64F x:(FMOVDconst))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XFMOVDconst {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBZload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVBZreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVBZload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVBZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBZreg e:(MOVBreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVBZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg <t> x:(MOVBload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBZload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVBload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVBZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVBZreg x:(Arg <t>))
+ // cond: !t.IsSigned() && t.Size() == 1
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(!t.IsSigned() && t.Size() == 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64( uint8(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ // match: (MOVBZreg x:(LOCGR (MOVDconst [c]) (MOVDconst [d]) _))
+ // cond: int64(uint8(c)) == c && int64(uint8(d)) == d && (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XLOCGR {
+ break
+ }
+ _ = x.Args[1]
+ x_0 := x.Args[0]
+ if x_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(x_0.AuxInt)
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(x_1.AuxInt)
+ if !(int64(uint8(c)) == c && int64(uint8(d)) == d && (!x.Type.IsSigned() || x.Type.Size() > 1)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg (RISBGZ x {r}))
+ // cond: r.OutMerge(0x000000ff) != nil
+ // result: (RISBGZ x {*r.OutMerge(0x000000ff)})
+ for {
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(r.OutMerge(0x000000ff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.OutMerge(0x000000ff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (ANDWconst [m] x))
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
+ for {
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(uint8(m)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVBreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBreg e:(MOVBreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVBZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBreg <t> x:(MOVBZload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVBZload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVBload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVBreg x:(Arg <t>))
+ // cond: t.IsSigned() && t.Size() == 1
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(t.IsSigned() && t.Size() == 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBreg (MOVDconst [c]))
+ // result: (MOVDconst [int64( int8(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ // match: (MOVBreg (ANDWconst [m] x))
+ // cond: int8(m) >= 0
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
+ for {
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(int8(m) >= 0) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(uint8(m)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: is20Bit(int64(off)) && ptr.Op != OpSB
+ // result: (MOVBstoreconst [makeValAndOff32(int32(int8(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is20Bit(int64(off)) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst || auxIntToUint8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w0 := v_1
+ if w0.Op != OpS390XSRDconst {
+ break
+ }
+ j := auxIntToUint8(w0.AuxInt)
+ w := w0.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst || auxIntToUint8(x_1.AuxInt) != j+8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRWconst [8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRWconst || auxIntToUint8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w0:(SRWconst [j] w) x:(MOVBstore [i-1] {s} p (SRWconst [j+8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w0 := v_1
+ if w0.Op != OpS390XSRWconst {
+ break
+ }
+ j := auxIntToUint8(w0.AuxInt)
+ w := w0.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRWconst || auxIntToUint8(x_1.AuxInt) != j+8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SRDconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHBRstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHBRstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SRDconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRDconst [j-8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHBRstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst {
+ break
+ }
+ j := auxIntToUint8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpS390XSRDconst || auxIntToUint8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHBRstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHBRstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHBRstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SRWconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRWconst [j-8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHBRstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRWconst {
+ break
+ }
+ j := auxIntToUint8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpS390XSRWconst || auxIntToUint8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHBRstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: is20Bit(sc.Off()+int64(off))
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(sc.Off() + int64(off))) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
+ // result: (MOVHstoreconst [makeValAndOff32(c.Val32()&0xff | a.Val32()<<8, a.Off32())] {s} p mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpS390XMOVBstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(p.Op != OpSB && x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c.Val32()&0xff|a.Val32()<<8, a.Off32()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDaddridx [c] {s} (ADDconst [d] x) y)
+ // cond: is20Bit(int64(c)+int64(d))
+ // result: (MOVDaddridx [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is20Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MOVDaddridx [c] {s} x (ADDconst [d] y))
+ // cond: is20Bit(int64(c)+int64(d))
+ // result: (MOVDaddridx [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is20Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB
+ // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (LGDR x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XFMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XLGDR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB
+ // result: (MOVDstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)
+ // result: (STMG2 [i-8] {s} p w0 w1 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w1 := v_1
+ x := v_2
+ if x.Op != OpS390XMOVDstore || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTMG2)
+ v.AuxInt = int32ToAuxInt(i - 8)
+ v.Aux = symToAux(s)
+ v.AddArg4(p, w0, w1, mem)
+ return true
+ }
+ // match: (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)
+ // result: (STMG3 [i-16] {s} p w0 w1 w2 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w2 := v_1
+ x := v_2
+ if x.Op != OpS390XSTMG2 || auxIntToInt32(x.AuxInt) != i-16 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ if !(x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTMG3)
+ v.AuxInt = int32ToAuxInt(i - 16)
+ v.Aux = symToAux(s)
+ v.AddArg5(p, w0, w1, w2, mem)
+ return true
+ }
+ // match: (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-24) && clobber(x)
+ // result: (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w3 := v_1
+ x := v_2
+ if x.Op != OpS390XSTMG3 || auxIntToInt32(x.AuxInt) != i-24 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[4]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ w2 := x.Args[3]
+ if !(x.Uses == 1 && is20Bit(int64(i)-24) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTMG4)
+ v.AuxInt = int32ToAuxInt(i - 24)
+ v.Aux = symToAux(s)
+ v.AddArg6(p, w0, w1, w2, w3, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: isU12Bit(sc.Off()+int64(off))
+ // result: (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU12Bit(sc.Off() + int64(off))) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVDstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHBRstore [i] {s} p (SRDconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWBRstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVHBRstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWBRstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVHBRstore [i] {s} p (SRDconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRDconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWBRstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst {
+ break
+ }
+ j := auxIntToUint8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVHBRstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpS390XSRDconst || auxIntToUint8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWBRstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVHBRstore [i] {s} p (SRWconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWBRstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVHBRstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWBRstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVHBRstore [i] {s} p (SRWconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRWconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWBRstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRWconst {
+ break
+ }
+ j := auxIntToUint8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVHBRstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpS390XSRWconst || auxIntToUint8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWBRstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHZload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVHZreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVHZload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVHZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))
+ // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVHZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHZreg e:(MOVBZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVHZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 2)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg <t> x:(MOVHload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHZload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVHload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVHZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVHZreg x:(Arg <t>))
+ // cond: !t.IsSigned() && t.Size() <= 2
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(!t.IsSigned() && t.Size() <= 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ // match: (MOVHZreg (RISBGZ x {r}))
+ // cond: r.OutMerge(0x0000ffff) != nil
+ // result: (RISBGZ x {*r.OutMerge(0x0000ffff)})
+ for {
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(r.OutMerge(0x0000ffff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.OutMerge(0x0000ffff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (ANDWconst [m] x))
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
+ for {
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(uint16(m)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVHreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHreg e:(MOVBreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg <t> x:(MOVHZload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVHZload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVHload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVHreg x:(Arg <t>))
+ // cond: t.IsSigned() && t.Size() <= 2
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(t.IsSigned() && t.Size() <= 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ // match: (MOVHreg (ANDWconst [m] x))
+ // cond: int16(m) >= 0
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
+ for {
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(int16(m) >= 0) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(uint16(m)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: isU12Bit(int64(off)) && ptr.Op != OpSB
+ // result: (MOVHstoreconst [makeValAndOff32(int32(int16(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(isU12Bit(int64(off)) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst || auxIntToUint8(x_1.AuxInt) != 16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w0 := v_1
+ if w0.Op != OpS390XSRDconst {
+ break
+ }
+ j := auxIntToUint8(w0.AuxInt)
+ w := w0.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst || auxIntToUint8(x_1.AuxInt) != j+16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRWconst [16] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRWconst || auxIntToUint8(x_1.AuxInt) != 16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w0 := v_1
+ if w0.Op != OpS390XSRWconst {
+ break
+ }
+ j := auxIntToUint8(w0.AuxInt)
+ w := w0.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRWconst || auxIntToUint8(x_1.AuxInt) != j+16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: isU12Bit(sc.Off()+int64(off))
+ // result: (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU12Bit(sc.Off() + int64(off))) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVHstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
+ // result: (MOVWstore [a.Off32()] {s} p (MOVDconst [int64(c.Val32()&0xffff | a.Val32()<<16)]) mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpS390XMOVHstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(p.Op != OpSB && x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(a.Off32())
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(c.Val32()&0xffff | a.Val32()<<16))
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWBRstore [i] {s} p (SRDconst [32] w) x:(MOVWBRstore [i-4] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVDBRstore [i-4] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 32 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVWBRstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDBRstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVWBRstore [i] {s} p (SRDconst [j] w) x:(MOVWBRstore [i-4] {s} p w0:(SRDconst [j-32] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVDBRstore [i-4] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst {
+ break
+ }
+ j := auxIntToUint8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVWBRstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpS390XSRDconst || auxIntToUint8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDBRstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWZload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVWZreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVWZload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVWZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))
+ // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVWZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWZreg e:(MOVBZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVWZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVWZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVHZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 2)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 4)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVWZload || !(!x.Type.IsSigned() || x.Type.Size() > 4) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg <t> x:(MOVWload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWZload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVWload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVWZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWZreg x:(Arg <t>))
+ // cond: !t.IsSigned() && t.Size() <= 4
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(!t.IsSigned() && t.Size() <= 4) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (MOVWZreg (RISBGZ x {r}))
+ // cond: r.OutMerge(0xffffffff) != nil
+ // result: (RISBGZ x {*r.OutMerge(0xffffffff)})
+ for {
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(r.OutMerge(0xffffffff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.OutMerge(0xffffffff))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVWreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWreg e:(MOVBreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVWreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVWreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVWload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 2)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg <t> x:(MOVWZload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVWZload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVWload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWreg x:(Arg <t>))
+ // cond: t.IsSigned() && t.Size() <= 4
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(t.IsSigned() && t.Size() <= 4) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB
+ // result: (MOVWstoreconst [makeValAndOff32(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVDstore [i-4] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 32 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVDstore [i-4] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w0 := v_1
+ if w0.Op != OpS390XSRDconst {
+ break
+ }
+ j := auxIntToUint8(w0.AuxInt)
+ w := w0.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst || auxIntToUint8(x_1.AuxInt) != j+32 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && clobber(x)
+ // result: (STM2 [i-4] {s} p w0 w1 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w1 := v_1
+ x := v_2
+ if x.Op != OpS390XMOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTM2)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg4(p, w0, w1, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)
+ // result: (STM3 [i-8] {s} p w0 w1 w2 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w2 := v_1
+ x := v_2
+ if x.Op != OpS390XSTM2 || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ if !(x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTM3)
+ v.AuxInt = int32ToAuxInt(i - 8)
+ v.Aux = symToAux(s)
+ v.AddArg5(p, w0, w1, w2, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-12) && clobber(x)
+ // result: (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w3 := v_1
+ x := v_2
+ if x.Op != OpS390XSTM3 || auxIntToInt32(x.AuxInt) != i-12 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[4]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ w2 := x.Args[3]
+ if !(x.Uses == 1 && is20Bit(int64(i)-12) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTM4)
+ v.AuxInt = int32ToAuxInt(i - 12)
+ v.Aux = symToAux(s)
+ v.AddArg6(p, w0, w1, w2, w3, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: isU12Bit(sc.Off()+int64(off))
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU12Bit(sc.Off() + int64(off))) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x)
+ // result: (MOVDstore [a.Off32()] {s} p (MOVDconst [c.Val()&0xffffffff | a.Val()<<32]) mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpS390XMOVWstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(p.Op != OpSB && x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(a.Off32())
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c.Val()&0xffffffff | a.Val()<<32)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLD x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (MULLDconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XMULLDconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULLD <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (MULLDload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XMULLDload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLDconst <t> x [c])
+ // cond: isPowerOfTwo32(c&(c-1))
+ // result: (ADD (SLDconst <t> x [uint8(log32(c&(c-1)))]) (SLDconst <t> x [uint8(log32(c&^(c-1)))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c & (c - 1))) {
+ break
+ }
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(c & (c - 1))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLDconst <t> x [c])
+ // cond: isPowerOfTwo32(c+(c&^(c-1)))
+ // result: (SUB (SLDconst <t> x [uint8(log32(c+(c&^(c-1))))]) (SLDconst <t> x [uint8(log32(c&^(c-1)))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c + (c &^ (c - 1)))) {
+ break
+ }
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(c + (c &^ (c - 1)))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLDconst <t> x [c])
+ // cond: isPowerOfTwo32(-c+(-c&^(-c-1)))
+ // result: (SUB (SLDconst <t> x [uint8(log32(-c&^(-c-1)))]) (SLDconst <t> x [uint8(log32(-c+(-c&^(-c-1))))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(-c + (-c &^ (-c - 1)))) {
+ break
+ }
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(-c &^ (-c - 1))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(-c + (-c &^ (-c - 1)))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)*d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) * d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MULLD x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMULLD)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (MULLDload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMULLDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XMULLDload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLW x (MOVDconst [c]))
+ // result: (MULLWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XMULLWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULLW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (MULLWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XMULLWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (MULLW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (MULLWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XMULLWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLWconst <t> x [c])
+ // cond: isPowerOfTwo32(c&(c-1))
+ // result: (ADDW (SLWconst <t> x [uint8(log32(c&(c-1)))]) (SLWconst <t> x [uint8(log32(c&^(c-1)))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c & (c - 1))) {
+ break
+ }
+ v.reset(OpS390XADDW)
+ v0 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(c & (c - 1))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLWconst <t> x [c])
+ // cond: isPowerOfTwo32(c+(c&^(c-1)))
+ // result: (SUBW (SLWconst <t> x [uint8(log32(c+(c&^(c-1))))]) (SLWconst <t> x [uint8(log32(c&^(c-1)))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c + (c &^ (c - 1)))) {
+ break
+ }
+ v.reset(OpS390XSUBW)
+ v0 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(c + (c &^ (c - 1)))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLWconst <t> x [c])
+ // cond: isPowerOfTwo32(-c+(-c&^(-c-1)))
+ // result: (SUBW (SLWconst <t> x [uint8(log32(-c&^(-c-1)))]) (SLWconst <t> x [uint8(log32(-c+(-c&^(-c-1))))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(-c + (-c &^ (-c - 1)))) {
+ break
+ }
+ v.reset(OpS390XSUBW)
+ v0 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(-c &^ (-c - 1))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(-c + (-c &^ (-c - 1)))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c*int32(d))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c * int32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (MULLWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMULLWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XMULLWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XNEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEG (MOVDconst [c]))
+ // result: (MOVDconst [-c])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ // match: (NEG (ADDconst [c] (NEG x)))
+ // cond: c != -(1<<31)
+ // result: (ADDconst [-c] x)
+ for {
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XNEG {
+ break
+ }
+ x := v_0_0.Args[0]
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XNEGW(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGW (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(-c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(-c)))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XNOT(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NOT x)
+ // result: (XOR (MOVDconst [-1]) x)
+ for {
+ x := v_0
+ v.reset(OpS390XXOR)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XNOTW(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NOTW x)
+ // result: (XORWconst [-1] x)
+ for {
+ x := v_0
+ v.reset(OpS390XXORWconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OR x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR (SLDconst x [c]) (SRDconst x [64-c]))
+ // result: (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XSLDconst {
+ continue
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 64-c || x != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR (MOVDconst [-1<<63]) (LGDR <t> x))
+ // result: (LGDR <t> (LNDFR <x.Type> x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0.AuxInt) != -1<<63 || v_1.Op != OpS390XLGDR {
+ continue
+ }
+ t := v_1.Type
+ x := v_1.Args[0]
+ v.reset(OpS390XLGDR)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpS390XLNDFR, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OR (RISBGZ (LGDR x) {r}) (LGDR (LPDFR <t> y)))
+ // cond: r == s390x.NewRotateParams(0, 0, 0)
+ // result: (LGDR (CPSDR <t> y x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XRISBGZ {
+ continue
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XLGDR {
+ continue
+ }
+ x := v_0_0.Args[0]
+ if v_1.Op != OpS390XLGDR {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpS390XLPDFR {
+ continue
+ }
+ t := v_1_0.Type
+ y := v_1_0.Args[0]
+ if !(r == s390x.NewRotateParams(0, 0, 0)) {
+ continue
+ }
+ v.reset(OpS390XLGDR)
+ v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OR (RISBGZ (LGDR x) {r}) (MOVDconst [c]))
+ // cond: c >= 0 && r == s390x.NewRotateParams(0, 0, 0)
+ // result: (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [math.Float64frombits(uint64(c))]) x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XRISBGZ {
+ continue
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XLGDR {
+ continue
+ }
+ x := v_0_0.Args[0]
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c >= 0 && r == s390x.NewRotateParams(0, 0, 0)) {
+ continue
+ }
+ v.reset(OpS390XLGDR)
+ v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type)
+ v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type)
+ v1.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(c)))
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (OR <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ORload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XORload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR x1:(MOVBZload [i1] {s} p mem) sh:(SLDconst [8] x0:(MOVBZload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLDconst || auxIntToUint8(sh.AuxInt) != 8 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR x1:(MOVHZload [i1] {s} p mem) sh:(SLDconst [16] x0:(MOVHZload [i0] {s} p mem)))
+ // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpS390XMOVHZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLDconst || auxIntToUint8(sh.AuxInt) != 16 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpS390XMOVHZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR x1:(MOVWZload [i1] {s} p mem) sh:(SLDconst [32] x0:(MOVWZload [i0] {s} p mem)))
+ // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpS390XMOVWZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLDconst || auxIntToUint8(sh.AuxInt) != 32 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpS390XMOVWZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpS390XMOVDload, typ.UInt64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) y))
+ // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpS390XSLDconst {
+ continue
+ }
+ j0 := auxIntToUint8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ or := v_1
+ if or.Op != OpS390XOR {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpS390XSLDconst {
+ continue
+ }
+ j1 := auxIntToUint8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type)
+ v1.AuxInt = uint8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem)) y))
+ // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVWZload [i0] {s} p mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpS390XSLDconst {
+ continue
+ }
+ j0 := auxIntToUint8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpS390XMOVHZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ or := v_1
+ if or.Op != OpS390XOR {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpS390XSLDconst {
+ continue
+ }
+ j1 := auxIntToUint8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpS390XMOVHZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type)
+ v1.AuxInt = uint8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR x0:(MOVBZload [i0] {s} p mem) sh:(SLDconst [8] x1:(MOVBZload [i1] {s} p mem)))
+ // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLDconst || auxIntToUint8(sh.AuxInt) != 8 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))))
+ // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r0 := v_0
+ if r0.Op != OpS390XMOVHZreg {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpS390XMOVHBRload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLDconst || auxIntToUint8(sh.AuxInt) != 16 {
+ continue
+ }
+ r1 := sh.Args[0]
+ if r1.Op != OpS390XMOVHZreg {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpS390XMOVHBRload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR r0:(MOVWZreg x0:(MOVWBRload [i0] {s} p mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRload [i1] {s} p mem))))
+ // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r0 := v_0
+ if r0.Op != OpS390XMOVWZreg {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpS390XMOVWBRload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLDconst || auxIntToUint8(sh.AuxInt) != 32 {
+ continue
+ }
+ r1 := sh.Args[0]
+ if r1.Op != OpS390XMOVWZreg {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpS390XMOVWBRload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpS390XMOVDBRload, typ.UInt64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) y))
+ // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpS390XSLDconst {
+ continue
+ }
+ j1 := auxIntToUint8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ or := v_1
+ if or.Op != OpS390XOR {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpS390XSLDconst {
+ continue
+ }
+ j0 := auxIntToUint8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type)
+ v1.AuxInt = uint8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16)
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))) or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))) y))
+ // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpS390XSLDconst {
+ continue
+ }
+ j1 := auxIntToUint8(s1.AuxInt)
+ r1 := s1.Args[0]
+ if r1.Op != OpS390XMOVHZreg {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpS390XMOVHBRload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ or := v_1
+ if or.Op != OpS390XOR {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpS390XSLDconst {
+ continue
+ }
+ j0 := auxIntToUint8(s0.AuxInt)
+ r0 := s0.Args[0]
+ if r0.Op != OpS390XMOVHZreg {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpS390XMOVHBRload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type)
+ v1.AuxInt = uint8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORW x (MOVDconst [c]))
+ // result: (ORWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XORWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORW (SLWconst x [c]) (SRWconst x [32-c]))
+ // result: (RLLconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XSLWconst {
+ continue
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 32-c || x != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpS390XRLLconst)
+ v.AuxInt = uint8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORW x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ORWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XORWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ORWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XORWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORW x1:(MOVBZload [i1] {s} p mem) sh:(SLWconst [8] x0:(MOVBZload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLWconst || auxIntToUint8(sh.AuxInt) != 8 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORW x1:(MOVHZload [i1] {s} p mem) sh:(SLWconst [16] x0:(MOVHZload [i0] {s} p mem)))
+ // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpS390XMOVHZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLWconst || auxIntToUint8(sh.AuxInt) != 16 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpS390XMOVHZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORW s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) y))
+ // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORW <v.Type> (SLWconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpS390XSLWconst {
+ continue
+ }
+ j0 := auxIntToUint8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ or := v_1
+ if or.Op != OpS390XORW {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpS390XSLWconst {
+ continue
+ }
+ j1 := auxIntToUint8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type)
+ v1.AuxInt = uint8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORW x0:(MOVBZload [i0] {s} p mem) sh:(SLWconst [8] x1:(MOVBZload [i1] {s} p mem)))
+ // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLWconst || auxIntToUint8(sh.AuxInt) != 8 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORW r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))))
+ // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r0 := v_0
+ if r0.Op != OpS390XMOVHZreg {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpS390XMOVHBRload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLWconst || auxIntToUint8(sh.AuxInt) != 16 {
+ continue
+ }
+ r1 := sh.Args[0]
+ if r1.Op != OpS390XMOVHZreg {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpS390XMOVHBRload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORW s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) y))
+ // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORW <v.Type> (SLWconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpS390XSLWconst {
+ continue
+ }
+ j1 := auxIntToUint8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ or := v_1
+ if or.Op != OpS390XORW {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpS390XSLWconst {
+ continue
+ }
+ j0 := auxIntToUint8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type)
+ v1.AuxInt = uint8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16)
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORWconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORWconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) | d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ORWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XORWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XORWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVDconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c|d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (OR x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XOR)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (ORload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ORload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XORload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XORload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XRISBGZ(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RISBGZ (MOVWZreg x) {r})
+ // cond: r.InMerge(0xffffffff) != nil
+ // result: (RISBGZ x {*r.InMerge(0xffffffff)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(r.InMerge(0xffffffff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.InMerge(0xffffffff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (MOVHZreg x) {r})
+ // cond: r.InMerge(0x0000ffff) != nil
+ // result: (RISBGZ x {*r.InMerge(0x0000ffff)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(r.InMerge(0x0000ffff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.InMerge(0x0000ffff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (MOVBZreg x) {r})
+ // cond: r.InMerge(0x000000ff) != nil
+ // result: (RISBGZ x {*r.InMerge(0x000000ff)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(r.InMerge(0x000000ff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.InMerge(0x000000ff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (SLDconst x [c]) {r})
+ // cond: r.InMerge(^uint64(0)<<c) != nil
+ // result: (RISBGZ x {(*r.InMerge(^uint64(0)<<c)).RotateLeft(c)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XSLDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(r.InMerge(^uint64(0)<<c) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*r.InMerge(^uint64(0) << c)).RotateLeft(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (SRDconst x [c]) {r})
+ // cond: r.InMerge(^uint64(0)>>c) != nil
+ // result: (RISBGZ x {(*r.InMerge(^uint64(0)>>c)).RotateLeft(-c)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(r.InMerge(^uint64(0)>>c) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*r.InMerge(^uint64(0) >> c)).RotateLeft(-c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (RISBGZ x {y}) {z})
+ // cond: z.InMerge(y.OutMask()) != nil
+ // result: (RISBGZ x {(*z.InMerge(y.OutMask())).RotateLeft(y.Amount)})
+ for {
+ z := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ y := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(z.InMerge(y.OutMask()) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*z.InMerge(y.OutMask())).RotateLeft(y.Amount))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r.End == 63 && r.Start == -r.Amount&63
+ // result: (SRDconst x [-r.Amount&63])
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r.End == 63 && r.Start == -r.Amount&63) {
+ break
+ }
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = uint8ToAuxInt(-r.Amount & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r.Start == 0 && r.End == 63-r.Amount
+ // result: (SLDconst x [r.Amount])
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r.Start == 0 && r.End == 63-r.Amount) {
+ break
+ }
+ v.reset(OpS390XSLDconst)
+ v.AuxInt = uint8ToAuxInt(r.Amount)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (SRADconst x [c]) {r})
+ // cond: r.Start == r.End && (r.Start+r.Amount)&63 <= c
+ // result: (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XSRADconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(r.Start == r.End && (r.Start+r.Amount)&63 <= c) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(r.Start, r.Start, -r.Start&63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r == s390x.NewRotateParams(56, 63, 0)
+ // result: (MOVBZreg x)
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r == s390x.NewRotateParams(56, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r == s390x.NewRotateParams(48, 63, 0)
+ // result: (MOVHZreg x)
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r == s390x.NewRotateParams(48, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r == s390x.NewRotateParams(32, 63, 0)
+ // result: (MOVWZreg x)
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r == s390x.NewRotateParams(32, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (LGDR <t> x) {r})
+ // cond: r == s390x.NewRotateParams(1, 63, 0)
+ // result: (LGDR <t> (LPDFR <x.Type> x))
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XLGDR {
+ break
+ }
+ t := v_0.Type
+ x := v_0.Args[0]
+ if !(r == s390x.NewRotateParams(1, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XLGDR)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpS390XLPDFR, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XRLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RLL x (MOVDconst [c]))
+ // result: (RLLconst x [uint8(c&31)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XRLLconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XRLLG(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RLLG x (MOVDconst [c]))
+ // result: (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))})
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, uint8(c&63)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SLD x (MOVDconst [c]))
+ // result: (SLDconst x [uint8(c&63)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XSLDconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLD x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (AND (MOVDconst [c]) y))
+ // result: (SLD x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSLD)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SLD x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVWreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVHreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVBreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVWZreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVHZreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVBZreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLDconst (SRDconst x [c]) [d])
+ // result: (RISBGZ x {s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63))})
+ for {
+ d := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst (RISBGZ x {r}) [c])
+ // cond: s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil
+ // result: (RISBGZ x {(*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount)})
+ for {
+ c := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SLW x (MOVDconst [c]))
+ // cond: c&32 == 0
+ // result: (SLWconst x [uint8(c&31)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 == 0) {
+ break
+ }
+ v.reset(OpS390XSLWconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLW _ (MOVDconst [c]))
+ // cond: c&32 != 0
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 != 0) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SLW x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (AND (MOVDconst [c]) y))
+ // result: (SLW x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSLW)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SLW x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVWreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVHreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVBreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVWZreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVHZreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVBZreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLWconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRAD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRAD x (MOVDconst [c]))
+ // result: (SRADconst x [uint8(c&63)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XSRADconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAD x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (AND (MOVDconst [c]) y))
+ // result: (SRAD x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SRAD x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVWreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVHreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVBreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVWZreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVHZreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVBZreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRADconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRADconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SRADconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [d>>uint64(c)])
+ for {
+ c := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRAW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRAW x (MOVDconst [c]))
+ // cond: c&32 == 0
+ // result: (SRAWconst x [uint8(c&31)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 == 0) {
+ break
+ }
+ v.reset(OpS390XSRAWconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAW x (MOVDconst [c]))
+ // cond: c&32 != 0
+ // result: (SRAWconst x [31])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 != 0) {
+ break
+ }
+ v.reset(OpS390XSRAWconst)
+ v.AuxInt = uint8ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAW x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (AND (MOVDconst [c]) y))
+ // result: (SRAW x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SRAW x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVWreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVHreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVBreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVWZreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVHZreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVBZreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRAWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAWconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SRAWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(int32(d))>>uint64(c)])
+ for {
+ c := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRD x (MOVDconst [c]))
+ // result: (SRDconst x [uint8(c&63)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRD x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (AND (MOVDconst [c]) y))
+ // result: (SRD x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSRD)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SRD x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVWreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVHreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVBreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVWZreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVHZreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVBZreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRDconst (SLDconst x [c]) [d])
+ // result: (RISBGZ x {s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63))})
+ for {
+ d := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XSLDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRDconst (RISBGZ x {r}) [c])
+ // cond: s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil
+ // result: (RISBGZ x {(*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount)})
+ for {
+ c := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRDconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRW x (MOVDconst [c]))
+ // cond: c&32 == 0
+ // result: (SRWconst x [uint8(c&31)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 == 0) {
+ break
+ }
+ v.reset(OpS390XSRWconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRW _ (MOVDconst [c]))
+ // cond: c&32 != 0
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 != 0) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRW x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (AND (MOVDconst [c]) y))
+ // result: (SRW x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SRW x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVWreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVHreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVBreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVWZreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVHZreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVBZreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRWconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSTM2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)
+ // result: (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w2 := v_1
+ w3 := v_2
+ x := v_3
+ if x.Op != OpS390XSTM2 || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ if !(x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTM4)
+ v.AuxInt = int32ToAuxInt(i - 8)
+ v.Aux = symToAux(s)
+ v.AddArg6(p, w0, w1, w2, w3, mem)
+ return true
+ }
+ // match: (STM2 [i] {s} p (SRDconst [32] x) x mem)
+ // result: (MOVDstore [i] {s} p x mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 32 {
+ break
+ }
+ x := v_1.Args[0]
+ if x != v_2 {
+ break
+ }
+ mem := v_3
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSTMG2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)
+ // result: (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w2 := v_1
+ w3 := v_2
+ x := v_3
+ if x.Op != OpS390XSTMG2 || auxIntToInt32(x.AuxInt) != i-16 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ if !(x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTMG4)
+ v.AuxInt = int32ToAuxInt(i - 16)
+ v.Aux = symToAux(s)
+ v.AddArg6(p, w0, w1, w2, w3, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUB x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XSUBconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (NEG (SUBconst <v.Type> x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XNEG)
+ v0 := b.NewValue0(v.Pos, OpS390XSUBconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUB x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUB <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (SUBload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ break
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ break
+ }
+ v.reset(OpS390XSUBload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBE x y (FlagGT))
+ // result: (SUBC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XFlagGT {
+ break
+ }
+ v.reset(OpS390XSUBC)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBE x y (FlagOV))
+ // result: (SUBC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XFlagOV {
+ break
+ }
+ v.reset(OpS390XSUBC)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c))))))
+ // result: (SUBE x y c)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpS390XSUBC {
+ break
+ }
+ _ = v_2_0.Args[1]
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 {
+ break
+ }
+ v_2_0_1 := v_2_0.Args[1]
+ if v_2_0_1.Op != OpS390XNEG {
+ break
+ }
+ v_2_0_1_0 := v_2_0_1.Args[0]
+ if v_2_0_1_0.Op != OpSelect0 {
+ break
+ }
+ v_2_0_1_0_0 := v_2_0_1_0.Args[0]
+ if v_2_0_1_0_0.Op != OpS390XSUBE {
+ break
+ }
+ c := v_2_0_1_0_0.Args[2]
+ v_2_0_1_0_0_0 := v_2_0_1_0_0.Args[0]
+ if v_2_0_1_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_1_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_2_0_1_0_0_1 := v_2_0_1_0_0.Args[1]
+ if v_2_0_1_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_1_0_0_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpS390XSUBE)
+ v.AddArg3(x, y, c)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBW x (MOVDconst [c]))
+ // result: (SUBWconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XSUBWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBW (MOVDconst [c]) x)
+ // result: (NEGW (SUBWconst <v.Type> x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpS390XNEGW)
+ v0 := b.NewValue0(v.Pos, OpS390XSUBWconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBW x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUBW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (SUBWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ break
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ break
+ }
+ v.reset(OpS390XSUBWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (SUBW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (SUBWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ break
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ break
+ }
+ v.reset(OpS390XSUBWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBWconst [c] x)
+ // cond: int32(c) == 0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBWconst [c] x)
+ // result: (ADDWconst [-int32(c)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ v.reset(OpS390XADDWconst)
+ v.AuxInt = int32ToAuxInt(-int32(c))
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XSUBWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (SUBWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XSUBWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XSUBWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBconst [c] x)
+ // cond: c != -(1<<31)
+ // result: (ADDconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst (MOVDconst [d]) [c])
+ // result: (MOVDconst [d-int64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(d - int64(c))
+ return true
+ }
+ // match: (SUBconst (SUBconst x [d]) [c])
+ // cond: is32Bit(-int64(c)-int64(d))
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-int64(c) - int64(d))) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (SUB x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SUBload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (SUBload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XSUBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XSUBload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSumBytes2(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SumBytes2 x)
+ // result: (ADDW (SRWconst <typ.UInt8> x [8]) x)
+ for {
+ x := v_0
+ v.reset(OpS390XADDW)
+ v0 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt8)
+ v0.AuxInt = uint8ToAuxInt(8)
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XSumBytes4(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SumBytes4 x)
+ // result: (SumBytes2 (ADDW <typ.UInt16> (SRWconst <typ.UInt16> x [16]) x))
+ for {
+ x := v_0
+ v.reset(OpS390XSumBytes2)
+ v0 := b.NewValue0(v.Pos, OpS390XADDW, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt16)
+ v1.AuxInt = uint8ToAuxInt(16)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XSumBytes8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SumBytes8 x)
+ // result: (SumBytes4 (ADDW <typ.UInt32> (SRDconst <typ.UInt32> x [32]) x))
+ for {
+ x := v_0
+ v.reset(OpS390XSumBytes4)
+ v0 := b.NewValue0(v.Pos, OpS390XADDW, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpS390XSRDconst, typ.UInt32)
+ v1.AuxInt = uint8ToAuxInt(32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XXOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XXORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLDconst x [c]) (SRDconst x [64-c]))
+ // result: (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XSLDconst {
+ continue
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 64-c || x != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XOR <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (XORload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XXORload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORW x (MOVDconst [c]))
+ // result: (XORWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XXORWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORW (SLWconst x [c]) (SRWconst x [32-c]))
+ // result: (RLLconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XSLWconst {
+ continue
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 32-c || x != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpS390XRLLconst)
+ v.AuxInt = uint8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORW x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XORW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (XORWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XXORWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (XORW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (XORWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XXORWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORWconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (XORWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XXORWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XXORWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c^d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (XOR x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XXOR)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (XORload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (XORload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XXORload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XXORload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Add64carry x y c))
+ // result: (Select0 <typ.UInt64> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XADDCconst, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2.AuxInt = int16ToAuxInt(-1)
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Sub64borrow x y c))
+ // result: (Select0 <typ.UInt64> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XSUBC, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v2.AddArg2(v3, c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 <t> (AddTupleFirst32 val tuple))
+ // result: (ADDW val (Select0 <t> tuple))
+ for {
+ t := v.Type
+ if v_0.Op != OpS390XAddTupleFirst32 {
+ break
+ }
+ tuple := v_0.Args[1]
+ val := v_0.Args[0]
+ v.reset(OpS390XADDW)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v0.AddArg(tuple)
+ v.AddArg2(val, v0)
+ return true
+ }
+ // match: (Select0 <t> (AddTupleFirst64 val tuple))
+ // result: (ADD val (Select0 <t> tuple))
+ for {
+ t := v.Type
+ if v_0.Op != OpS390XAddTupleFirst64 {
+ break
+ }
+ tuple := v_0.Args[1]
+ val := v_0.Args[0]
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v0.AddArg(tuple)
+ v.AddArg2(val, v0)
+ return true
+ }
+ // match: (Select0 (ADDCconst (MOVDconst [c]) [d]))
+ // result: (MOVDconst [c+int64(d)])
+ for {
+ if v_0.Op != OpS390XADDCconst {
+ break
+ }
+ d := auxIntToInt16(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c + int64(d))
+ return true
+ }
+ // match: (Select0 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ // result: (MOVDconst [c-d])
+ for {
+ if v_0.Op != OpS390XSUBC {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c - d)
+ return true
+ }
+ // match: (Select0 (FADD (FMUL y z) x))
+ // result: (FMADD x y z)
+ for {
+ if v_0.Op != OpS390XFADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpS390XFMUL {
+ continue
+ }
+ z := v_0_0.Args[1]
+ y := v_0_0.Args[0]
+ x := v_0_1
+ v.reset(OpS390XFMADD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (FSUB (FMUL y z) x))
+ // result: (FMSUB x y z)
+ for {
+ if v_0.Op != OpS390XFSUB {
+ break
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XFMUL {
+ break
+ }
+ z := v_0_0.Args[1]
+ y := v_0_0.Args[0]
+ v.reset(OpS390XFMSUB)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (Select0 (FADDS (FMULS y z) x))
+ // result: (FMADDS x y z)
+ for {
+ if v_0.Op != OpS390XFADDS {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpS390XFMULS {
+ continue
+ }
+ z := v_0_0.Args[1]
+ y := v_0_0.Args[0]
+ x := v_0_1
+ v.reset(OpS390XFMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (FSUBS (FMULS y z) x))
+ // result: (FMSUBS x y z)
+ for {
+ if v_0.Op != OpS390XFSUBS {
+ break
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XFMULS {
+ break
+ }
+ z := v_0_0.Args[1]
+ y := v_0_0.Args[0]
+ v.reset(OpS390XFMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Add64carry x y c))
+ // result: (Select0 <typ.UInt64> (ADDE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v5 := b.NewValue0(v.Pos, OpS390XADDCconst, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v5.AuxInt = int16ToAuxInt(-1)
+ v5.AddArg(c)
+ v4.AddArg(v5)
+ v3.AddArg3(x, y, v4)
+ v2.AddArg(v3)
+ v0.AddArg3(v1, v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Sub64borrow x y c))
+ // result: (NEG (Select0 <typ.UInt64> (SUBE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c)))))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpS390XNEG)
+ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v6 := b.NewValue0(v.Pos, OpS390XSUBC, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v6.AddArg2(v2, c)
+ v5.AddArg(v6)
+ v4.AddArg3(x, y, v5)
+ v3.AddArg(v4)
+ v1.AddArg3(v2, v2, v3)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (AddTupleFirst32 _ tuple))
+ // result: (Select1 tuple)
+ for {
+ if v_0.Op != OpS390XAddTupleFirst32 {
+ break
+ }
+ tuple := v_0.Args[1]
+ v.reset(OpSelect1)
+ v.AddArg(tuple)
+ return true
+ }
+ // match: (Select1 (AddTupleFirst64 _ tuple))
+ // result: (Select1 tuple)
+ for {
+ if v_0.Op != OpS390XAddTupleFirst64 {
+ break
+ }
+ tuple := v_0.Args[1]
+ v.reset(OpSelect1)
+ v.AddArg(tuple)
+ return true
+ }
+ // match: (Select1 (ADDCconst (MOVDconst [c]) [d]))
+ // cond: uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0
+ // result: (FlagEQ)
+ for {
+ if v_0.Op != OpS390XADDCconst {
+ break
+ }
+ d := auxIntToInt16(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if !(uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (Select1 (ADDCconst (MOVDconst [c]) [d]))
+ // cond: uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0
+ // result: (FlagLT)
+ for {
+ if v_0.Op != OpS390XADDCconst {
+ break
+ }
+ d := auxIntToInt16(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if !(uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ // cond: uint64(d) <= uint64(c) && c-d == 0
+ // result: (FlagGT)
+ for {
+ if v_0.Op != OpS390XSUBC {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(uint64(d) <= uint64(c) && c-d == 0) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ // cond: uint64(d) <= uint64(c) && c-d != 0
+ // result: (FlagOV)
+ for {
+ if v_0.Op != OpS390XSUBC {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(uint64(d) <= uint64(c) && c-d != 0) {
+ break
+ }
+ v.reset(OpS390XFlagOV)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRADconst (NEG <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpS390XSRADconst)
+ v.AuxInt = uint8ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpS390XNEG, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpS390XFMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (FMOVSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpS390XFMOVSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSub32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Sub32F x y)
+ // result: (Select0 (FSUBS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpS390XFSUBS, types.NewTuple(typ.Float32, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpSub64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Sub64F x y)
+ // result: (Select0 (FSUB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpS390XFSUB, types.NewTuple(typ.Float64, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpTrunc(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc x)
+ // result: (FIDBR [5] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(5)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (MOVBstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (MOVHstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (MOVWstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [8] destptr mem)
+ // result: (MOVDstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff32(0,2)] destptr (MOVHstoreconst [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 2))
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff32(0,4)] destptr (MOVWstoreconst [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4))
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (MOVHstoreconst [makeValAndOff32(0,4)] destptr (MOVWstoreconst [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4))
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (MOVWstoreconst [makeValAndOff32(0,3)] destptr (MOVWstoreconst [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 3))
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 0 && s <= 1024
+ // result: (CLEAR [makeValAndOff32(int32(s), 0)] destptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 0 && s <= 1024) {
+ break
+ }
+ v.reset(OpS390XCLEAR)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(s), 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 1024
+ // result: (LoweredZero [s%256] destptr (ADDconst <destptr.Type> destptr [(int32(s)/256)*256]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 1024) {
+ break
+ }
+ v.reset(OpS390XLoweredZero)
+ v.AuxInt = int64ToAuxInt(s % 256)
+ v0 := b.NewValue0(v.Pos, OpS390XADDconst, destptr.Type)
+ v0.AuxInt = int32ToAuxInt((int32(s) / 256) * 256)
+ v0.AddArg(destptr)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockS390X(b *Block) bool {
+ typ := &b.Func.Config.Types
+ switch b.Kind {
+ case BlockS390XBRC:
+ // match: (BRC {c} x:(CMP _ _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMP {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPW _ _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPW {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPU _ _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPU {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPWU _ _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPWU {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPconst _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPWconst _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPUconst _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPWUconst _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMP x y) yes no)
+ // result: (CGRJ {c&^s390x.Unordered} x y yes no)
+ for b.Controls[0].Op == OpS390XCMP {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl2(BlockS390XCGRJ, x, y)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPW x y) yes no)
+ // result: (CRJ {c&^s390x.Unordered} x y yes no)
+ for b.Controls[0].Op == OpS390XCMPW {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl2(BlockS390XCRJ, x, y)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPU x y) yes no)
+ // result: (CLGRJ {c&^s390x.Unordered} x y yes no)
+ for b.Controls[0].Op == OpS390XCMPU {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl2(BlockS390XCLGRJ, x, y)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPWU x y) yes no)
+ // result: (CLRJ {c&^s390x.Unordered} x y yes no)
+ for b.Controls[0].Op == OpS390XCMPWU {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl2(BlockS390XCLRJ, x, y)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPconst x [y]) yes no)
+ // cond: y == int32( int8(y))
+ // result: (CGIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(int8(y))) {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPWconst x [y]) yes no)
+ // cond: y == int32( int8(y))
+ // result: (CIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(int8(y))) {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPUconst x [y]) yes no)
+ // cond: y == int32(uint8(y))
+ // result: (CLGIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(uint8(y))) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPWUconst x [y]) yes no)
+ // cond: y == int32(uint8(y))
+ // result: (CLIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(uint8(y))) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {s390x.Less} (CMPconst x [ 128]) yes no)
+ // result: (CGIJ {s390x.LessOrEqual} x [ 127] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 128 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(127)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.Less} (CMPWconst x [ 128]) yes no)
+ // result: (CIJ {s390x.LessOrEqual} x [ 127] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 128 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(127)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.LessOrEqual} (CMPconst x [-129]) yes no)
+ // result: (CGIJ {s390x.Less} x [-128] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != -129 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.LessOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(-128)
+ b.Aux = s390xCCMaskToAux(s390x.Less)
+ return true
+ }
+ // match: (BRC {s390x.LessOrEqual} (CMPWconst x [-129]) yes no)
+ // result: (CIJ {s390x.Less} x [-128] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != -129 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.LessOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(-128)
+ b.Aux = s390xCCMaskToAux(s390x.Less)
+ return true
+ }
+ // match: (BRC {s390x.Greater} (CMPconst x [-129]) yes no)
+ // result: (CGIJ {s390x.GreaterOrEqual} x [-128] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != -129 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(-128)
+ b.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.Greater} (CMPWconst x [-129]) yes no)
+ // result: (CIJ {s390x.GreaterOrEqual} x [-128] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != -129 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(-128)
+ b.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.GreaterOrEqual} (CMPconst x [ 128]) yes no)
+ // result: (CGIJ {s390x.Greater} x [ 127] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 128 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(127)
+ b.Aux = s390xCCMaskToAux(s390x.Greater)
+ return true
+ }
+ // match: (BRC {s390x.GreaterOrEqual} (CMPWconst x [ 128]) yes no)
+ // result: (CIJ {s390x.Greater} x [ 127] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 128 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(127)
+ b.Aux = s390xCCMaskToAux(s390x.Greater)
+ return true
+ }
+ // match: (BRC {s390x.Less} (CMPWUconst x [256]) yes no)
+ // result: (CLIJ {s390x.LessOrEqual} x [255] yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 256 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(255)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.Less} (CMPUconst x [256]) yes no)
+ // result: (CLGIJ {s390x.LessOrEqual} x [255] yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 256 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(255)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.GreaterOrEqual} (CMPWUconst x [256]) yes no)
+ // result: (CLIJ {s390x.Greater} x [255] yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 256 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(255)
+ b.Aux = s390xCCMaskToAux(s390x.Greater)
+ return true
+ }
+ // match: (BRC {s390x.GreaterOrEqual} (CMPUconst x [256]) yes no)
+ // result: (CLGIJ {s390x.Greater} x [255] yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 256 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(255)
+ b.Aux = s390xCCMaskToAux(s390x.Greater)
+ return true
+ }
+ // match: (BRC {c} (CMPconst x [y]) yes no)
+ // cond: y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)
+ // result: (CLGIJ {c} x [uint8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (BRC {c} (CMPWconst x [y]) yes no)
+ // cond: y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)
+ // result: (CLIJ {c} x [uint8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (BRC {c} (CMPUconst x [y]) yes no)
+ // cond: y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)
+ // result: (CGIJ {c} x [ int8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (BRC {c} (CMPWUconst x [y]) yes no)
+ // cond: y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)
+ // result: (CIJ {c} x [ int8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (BRC {c} (InvertFlags cmp) yes no)
+ // result: (BRC {c.ReverseComparison()} cmp yes no)
+ for b.Controls[0].Op == OpS390XInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XBRC, cmp)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (BRC {c} (FlagEQ) yes no)
+ // cond: c&s390x.Equal != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XFlagEQ {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (BRC {c} (FlagLT) yes no)
+ // cond: c&s390x.Less != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XFlagLT {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (BRC {c} (FlagGT) yes no)
+ // cond: c&s390x.Greater != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XFlagGT {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (BRC {c} (FlagOV) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XFlagOV {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (BRC {c} (FlagEQ) yes no)
+ // cond: c&s390x.Equal == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XFlagEQ {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (BRC {c} (FlagLT) yes no)
+ // cond: c&s390x.Less == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XFlagLT {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (BRC {c} (FlagGT) yes no)
+ // cond: c&s390x.Greater == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XFlagGT {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (BRC {c} (FlagOV) yes no)
+ // cond: c&s390x.Unordered == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XFlagOV {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCGIJ:
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal != 0 && int64(x) == int64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0 && int64(x) == int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less != 0 && int64(x) < int64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0 && int64(x) < int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater != 0 && int64(x) > int64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0 && int64(x) > int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal == 0 && int64(x) == int64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0 && int64(x) == int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less == 0 && int64(x) < int64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0 && int64(x) < int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater == 0 && int64(x) > int64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0 && int64(x) > int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.NoCarry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.NoCarry)
+ return true
+ }
+ // match: (CGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1])
+ // result: (BRC {s390x.NoCarry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.NoCarry)
+ return true
+ }
+ // match: (CGIJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.NoBorrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.NoBorrow)
+ return true
+ }
+ // match: (CGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ // match: (CGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ // match: (CGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1])
+ // result: (BRC {s390x.NoBorrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.NoBorrow)
+ return true
+ }
+ // match: (CGIJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ case BlockS390XCGRJ:
+ // match: (CGRJ {c} x (MOVDconst [y]) yes no)
+ // cond: is8Bit(y)
+ // result: (CGIJ {c} x [ int8(y)] yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(is8Bit(y)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CGRJ {c} (MOVDconst [x]) y yes no)
+ // cond: is8Bit(x)
+ // result: (CGIJ {c.ReverseComparison()} y [ int8(x)] yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(is8Bit(x)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, y)
+ b.AuxInt = int8ToAuxInt(int8(x))
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CGRJ {c} x (MOVDconst [y]) yes no)
+ // cond: !is8Bit(y) && is32Bit(y)
+ // result: (BRC {c} (CMPconst x [int32(y)]) yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(!is8Bit(y) && is32Bit(y)) {
+ break
+ }
+ v0 := b.NewValue0(x.Pos, OpS390XCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(y))
+ v0.AddArg(x)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CGRJ {c} (MOVDconst [x]) y yes no)
+ // cond: !is8Bit(x) && is32Bit(x)
+ // result: (BRC {c.ReverseComparison()} (CMPconst y [int32(x)]) yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(!is8Bit(x) && is32Bit(x)) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpS390XCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(x))
+ v0.AddArg(y)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CGRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal != 0
+ // result: (First yes no)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CGRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal == 0
+ // result: (First no yes)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCIJ:
+ // match: (CIJ {c} (MOVWreg x) [y] yes no)
+ // result: (CIJ {c} x [y] yes no)
+ for b.Controls[0].Op == OpS390XMOVWreg {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(y)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CIJ {c} (MOVWZreg x) [y] yes no)
+ // result: (CIJ {c} x [y] yes no)
+ for b.Controls[0].Op == OpS390XMOVWZreg {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(y)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal != 0 && int32(x) == int32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0 && int32(x) == int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less != 0 && int32(x) < int32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0 && int32(x) < int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater != 0 && int32(x) > int32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0 && int32(x) > int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal == 0 && int32(x) == int32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0 && int32(x) == int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less == 0 && int32(x) < int32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0 && int32(x) < int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater == 0 && int32(x) > int32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0 && int32(x) > int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCLGIJ:
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal != 0 && uint64(x) == uint64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0 && uint64(x) == uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less != 0 && uint64(x) < uint64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0 && uint64(x) < uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater != 0 && uint64(x) > uint64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0 && uint64(x) > uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal == 0 && uint64(x) == uint64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0 && uint64(x) == uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less == 0 && uint64(x) < uint64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0 && uint64(x) < uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater == 0 && uint64(x) > uint64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0 && uint64(x) > uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLGIJ {s390x.GreaterOrEqual} _ [0] yes no)
+ // result: (First yes no)
+ for {
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGIJ {s390x.Less} _ [0] yes no)
+ // result: (First no yes)
+ for {
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.NoCarry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.NoCarry)
+ return true
+ }
+ // match: (CLGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CLGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CLGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1])
+ // result: (BRC {s390x.NoCarry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.NoCarry)
+ return true
+ }
+ // match: (CLGIJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CLGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.NoBorrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.NoBorrow)
+ return true
+ }
+ // match: (CLGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ // match: (CLGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ // match: (CLGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1])
+ // result: (BRC {s390x.NoBorrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.NoBorrow)
+ return true
+ }
+ // match: (CLGIJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ case BlockS390XCLGRJ:
+ // match: (CLGRJ {c} x (MOVDconst [y]) yes no)
+ // cond: isU8Bit(y)
+ // result: (CLGIJ {c} x [uint8(y)] yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(isU8Bit(y)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLGRJ {c} (MOVDconst [x]) y yes no)
+ // cond: isU8Bit(x)
+ // result: (CLGIJ {c.ReverseComparison()} y [uint8(x)] yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(isU8Bit(x)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, y)
+ b.AuxInt = uint8ToAuxInt(uint8(x))
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CLGRJ {c} x (MOVDconst [y]) yes no)
+ // cond: !isU8Bit(y) && isU32Bit(y)
+ // result: (BRC {c} (CMPUconst x [int32(y)]) yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(!isU8Bit(y) && isU32Bit(y)) {
+ break
+ }
+ v0 := b.NewValue0(x.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(y))
+ v0.AddArg(x)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLGRJ {c} (MOVDconst [x]) y yes no)
+ // cond: !isU8Bit(x) && isU32Bit(x)
+ // result: (BRC {c.ReverseComparison()} (CMPUconst y [int32(x)]) yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(!isU8Bit(x) && isU32Bit(x)) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(x))
+ v0.AddArg(y)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CLGRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal != 0
+ // result: (First yes no)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal == 0
+ // result: (First no yes)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCLIJ:
+ // match: (CLIJ {s390x.LessOrGreater} (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp) [0] yes no)
+ // cond: int32(x) != 0
+ // result: (BRC {d} cmp yes no)
+ for b.Controls[0].Op == OpS390XLOCGR {
+ v_0 := b.Controls[0]
+ d := auxToS390xCCMask(v_0.Aux)
+ cmp := v_0.Args[2]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0_1.AuxInt)
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater || !(int32(x) != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, cmp)
+ b.Aux = s390xCCMaskToAux(d)
+ return true
+ }
+ // match: (CLIJ {c} (MOVWreg x) [y] yes no)
+ // result: (CLIJ {c} x [y] yes no)
+ for b.Controls[0].Op == OpS390XMOVWreg {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(y)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLIJ {c} (MOVWZreg x) [y] yes no)
+ // result: (CLIJ {c} x [y] yes no)
+ for b.Controls[0].Op == OpS390XMOVWZreg {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(y)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal != 0 && uint32(x) == uint32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0 && uint32(x) == uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less != 0 && uint32(x) < uint32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0 && uint32(x) < uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater != 0 && uint32(x) > uint32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0 && uint32(x) > uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal == 0 && uint32(x) == uint32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0 && uint32(x) == uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less == 0 && uint32(x) < uint32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0 && uint32(x) < uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater == 0 && uint32(x) > uint32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0 && uint32(x) > uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLIJ {s390x.GreaterOrEqual} _ [0] yes no)
+ // result: (First yes no)
+ for {
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLIJ {s390x.Less} _ [0] yes no)
+ // result: (First no yes)
+ for {
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCLRJ:
+ // match: (CLRJ {c} x (MOVDconst [y]) yes no)
+ // cond: isU8Bit(y)
+ // result: (CLIJ {c} x [uint8(y)] yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(isU8Bit(y)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLRJ {c} (MOVDconst [x]) y yes no)
+ // cond: isU8Bit(x)
+ // result: (CLIJ {c.ReverseComparison()} y [uint8(x)] yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(isU8Bit(x)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, y)
+ b.AuxInt = uint8ToAuxInt(uint8(x))
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CLRJ {c} x (MOVDconst [y]) yes no)
+ // cond: !isU8Bit(y) && isU32Bit(y)
+ // result: (BRC {c} (CMPWUconst x [int32(y)]) yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(!isU8Bit(y) && isU32Bit(y)) {
+ break
+ }
+ v0 := b.NewValue0(x.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(y))
+ v0.AddArg(x)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLRJ {c} (MOVDconst [x]) y yes no)
+ // cond: !isU8Bit(x) && isU32Bit(x)
+ // result: (BRC {c.ReverseComparison()} (CMPWUconst y [int32(x)]) yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(!isU8Bit(x) && isU32Bit(x)) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(x))
+ v0.AddArg(y)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CLRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal != 0
+ // result: (First yes no)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal == 0
+ // result: (First no yes)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCRJ:
+ // match: (CRJ {c} x (MOVDconst [y]) yes no)
+ // cond: is8Bit(y)
+ // result: (CIJ {c} x [ int8(y)] yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(is8Bit(y)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CRJ {c} (MOVDconst [x]) y yes no)
+ // cond: is8Bit(x)
+ // result: (CIJ {c.ReverseComparison()} y [ int8(x)] yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(is8Bit(x)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, y)
+ b.AuxInt = int8ToAuxInt(int8(x))
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CRJ {c} x (MOVDconst [y]) yes no)
+ // cond: !is8Bit(y) && is32Bit(y)
+ // result: (BRC {c} (CMPWconst x [int32(y)]) yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(!is8Bit(y) && is32Bit(y)) {
+ break
+ }
+ v0 := b.NewValue0(x.Pos, OpS390XCMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(y))
+ v0.AddArg(x)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CRJ {c} (MOVDconst [x]) y yes no)
+ // cond: !is8Bit(x) && is32Bit(x)
+ // result: (BRC {c.ReverseComparison()} (CMPWconst y [int32(x)]) yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(!is8Bit(x) && is32Bit(x)) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpS390XCMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(x))
+ v0.AddArg(y)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal != 0
+ // result: (First yes no)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal == 0
+ // result: (First no yes)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (CLIJ {s390x.LessOrGreater} (MOVBZreg <typ.Bool> cond) [0] yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpS390XMOVBZreg, typ.Bool)
+ v0.AddArg(cond)
+ b.resetWithControl(BlockS390XCLIJ, v0)
+ b.AuxInt = uint8ToAuxInt(0)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrGreater)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go
new file mode 100644
index 0000000..c8ecefc
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteWasm.go
@@ -0,0 +1,4905 @@
+// Code generated from gen/Wasm.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+import "cmd/internal/objabi"
+import "cmd/compile/internal/types"
+
+func rewriteValueWasm(v *Value) bool {
+ switch v.Op {
+ case OpAbs:
+ v.Op = OpWasmF64Abs
+ return true
+ case OpAdd16:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAdd32:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAdd32F:
+ v.Op = OpWasmF32Add
+ return true
+ case OpAdd64:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAdd64F:
+ v.Op = OpWasmF64Add
+ return true
+ case OpAdd8:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAddPtr:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAddr:
+ return rewriteValueWasm_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpWasmI64And
+ return true
+ case OpAnd32:
+ v.Op = OpWasmI64And
+ return true
+ case OpAnd64:
+ v.Op = OpWasmI64And
+ return true
+ case OpAnd8:
+ v.Op = OpWasmI64And
+ return true
+ case OpAndB:
+ v.Op = OpWasmI64And
+ return true
+ case OpBitLen64:
+ return rewriteValueWasm_OpBitLen64(v)
+ case OpCeil:
+ v.Op = OpWasmF64Ceil
+ return true
+ case OpClosureCall:
+ v.Op = OpWasmLoweredClosureCall
+ return true
+ case OpCom16:
+ return rewriteValueWasm_OpCom16(v)
+ case OpCom32:
+ return rewriteValueWasm_OpCom32(v)
+ case OpCom64:
+ return rewriteValueWasm_OpCom64(v)
+ case OpCom8:
+ return rewriteValueWasm_OpCom8(v)
+ case OpCondSelect:
+ v.Op = OpWasmSelect
+ return true
+ case OpConst16:
+ return rewriteValueWasm_OpConst16(v)
+ case OpConst32:
+ return rewriteValueWasm_OpConst32(v)
+ case OpConst32F:
+ v.Op = OpWasmF32Const
+ return true
+ case OpConst64:
+ v.Op = OpWasmI64Const
+ return true
+ case OpConst64F:
+ v.Op = OpWasmF64Const
+ return true
+ case OpConst8:
+ return rewriteValueWasm_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueWasm_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueWasm_OpConstNil(v)
+ case OpConvert:
+ v.Op = OpWasmLoweredConvert
+ return true
+ case OpCopysign:
+ v.Op = OpWasmF64Copysign
+ return true
+ case OpCtz16:
+ return rewriteValueWasm_OpCtz16(v)
+ case OpCtz16NonZero:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCtz32:
+ return rewriteValueWasm_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCtz64:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCtz64NonZero:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCtz8:
+ return rewriteValueWasm_OpCtz8(v)
+ case OpCtz8NonZero:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpWasmI64TruncSatF32S
+ return true
+ case OpCvt32Fto32U:
+ v.Op = OpWasmI64TruncSatF32U
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpWasmI64TruncSatF32S
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpWasmF64PromoteF32
+ return true
+ case OpCvt32Fto64U:
+ v.Op = OpWasmI64TruncSatF32U
+ return true
+ case OpCvt32Uto32F:
+ return rewriteValueWasm_OpCvt32Uto32F(v)
+ case OpCvt32Uto64F:
+ return rewriteValueWasm_OpCvt32Uto64F(v)
+ case OpCvt32to32F:
+ return rewriteValueWasm_OpCvt32to32F(v)
+ case OpCvt32to64F:
+ return rewriteValueWasm_OpCvt32to64F(v)
+ case OpCvt64Fto32:
+ v.Op = OpWasmI64TruncSatF64S
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpWasmF32DemoteF64
+ return true
+ case OpCvt64Fto32U:
+ v.Op = OpWasmI64TruncSatF64U
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpWasmI64TruncSatF64S
+ return true
+ case OpCvt64Fto64U:
+ v.Op = OpWasmI64TruncSatF64U
+ return true
+ case OpCvt64Uto32F:
+ v.Op = OpWasmF32ConvertI64U
+ return true
+ case OpCvt64Uto64F:
+ v.Op = OpWasmF64ConvertI64U
+ return true
+ case OpCvt64to32F:
+ v.Op = OpWasmF32ConvertI64S
+ return true
+ case OpCvt64to64F:
+ v.Op = OpWasmF64ConvertI64S
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueWasm_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueWasm_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueWasm_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpWasmF32Div
+ return true
+ case OpDiv32u:
+ return rewriteValueWasm_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValueWasm_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpWasmF64Div
+ return true
+ case OpDiv64u:
+ v.Op = OpWasmI64DivU
+ return true
+ case OpDiv8:
+ return rewriteValueWasm_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueWasm_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueWasm_OpEq16(v)
+ case OpEq32:
+ return rewriteValueWasm_OpEq32(v)
+ case OpEq32F:
+ v.Op = OpWasmF32Eq
+ return true
+ case OpEq64:
+ v.Op = OpWasmI64Eq
+ return true
+ case OpEq64F:
+ v.Op = OpWasmF64Eq
+ return true
+ case OpEq8:
+ return rewriteValueWasm_OpEq8(v)
+ case OpEqB:
+ v.Op = OpWasmI64Eq
+ return true
+ case OpEqPtr:
+ v.Op = OpWasmI64Eq
+ return true
+ case OpFloor:
+ v.Op = OpWasmF64Floor
+ return true
+ case OpGetCallerPC:
+ v.Op = OpWasmLoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpWasmLoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpWasmLoweredGetClosurePtr
+ return true
+ case OpInterCall:
+ v.Op = OpWasmLoweredInterCall
+ return true
+ case OpIsInBounds:
+ v.Op = OpWasmI64LtU
+ return true
+ case OpIsNonNil:
+ return rewriteValueWasm_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ v.Op = OpWasmI64LeU
+ return true
+ case OpLeq16:
+ return rewriteValueWasm_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueWasm_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueWasm_OpLeq32(v)
+ case OpLeq32F:
+ v.Op = OpWasmF32Le
+ return true
+ case OpLeq32U:
+ return rewriteValueWasm_OpLeq32U(v)
+ case OpLeq64:
+ v.Op = OpWasmI64LeS
+ return true
+ case OpLeq64F:
+ v.Op = OpWasmF64Le
+ return true
+ case OpLeq64U:
+ v.Op = OpWasmI64LeU
+ return true
+ case OpLeq8:
+ return rewriteValueWasm_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueWasm_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueWasm_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueWasm_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueWasm_OpLess32(v)
+ case OpLess32F:
+ v.Op = OpWasmF32Lt
+ return true
+ case OpLess32U:
+ return rewriteValueWasm_OpLess32U(v)
+ case OpLess64:
+ v.Op = OpWasmI64LtS
+ return true
+ case OpLess64F:
+ v.Op = OpWasmF64Lt
+ return true
+ case OpLess64U:
+ v.Op = OpWasmI64LtU
+ return true
+ case OpLess8:
+ return rewriteValueWasm_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueWasm_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueWasm_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueWasm_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueWasm_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueWasm_OpLsh16x32(v)
+ case OpLsh16x64:
+ v.Op = OpLsh64x64
+ return true
+ case OpLsh16x8:
+ return rewriteValueWasm_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueWasm_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueWasm_OpLsh32x32(v)
+ case OpLsh32x64:
+ v.Op = OpLsh64x64
+ return true
+ case OpLsh32x8:
+ return rewriteValueWasm_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueWasm_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueWasm_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueWasm_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueWasm_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueWasm_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueWasm_OpLsh8x32(v)
+ case OpLsh8x64:
+ v.Op = OpLsh64x64
+ return true
+ case OpLsh8x8:
+ return rewriteValueWasm_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueWasm_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueWasm_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueWasm_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueWasm_OpMod32u(v)
+ case OpMod64:
+ return rewriteValueWasm_OpMod64(v)
+ case OpMod64u:
+ v.Op = OpWasmI64RemU
+ return true
+ case OpMod8:
+ return rewriteValueWasm_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueWasm_OpMod8u(v)
+ case OpMove:
+ return rewriteValueWasm_OpMove(v)
+ case OpMul16:
+ v.Op = OpWasmI64Mul
+ return true
+ case OpMul32:
+ v.Op = OpWasmI64Mul
+ return true
+ case OpMul32F:
+ v.Op = OpWasmF32Mul
+ return true
+ case OpMul64:
+ v.Op = OpWasmI64Mul
+ return true
+ case OpMul64F:
+ v.Op = OpWasmF64Mul
+ return true
+ case OpMul8:
+ v.Op = OpWasmI64Mul
+ return true
+ case OpNeg16:
+ return rewriteValueWasm_OpNeg16(v)
+ case OpNeg32:
+ return rewriteValueWasm_OpNeg32(v)
+ case OpNeg32F:
+ v.Op = OpWasmF32Neg
+ return true
+ case OpNeg64:
+ return rewriteValueWasm_OpNeg64(v)
+ case OpNeg64F:
+ v.Op = OpWasmF64Neg
+ return true
+ case OpNeg8:
+ return rewriteValueWasm_OpNeg8(v)
+ case OpNeq16:
+ return rewriteValueWasm_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueWasm_OpNeq32(v)
+ case OpNeq32F:
+ v.Op = OpWasmF32Ne
+ return true
+ case OpNeq64:
+ v.Op = OpWasmI64Ne
+ return true
+ case OpNeq64F:
+ v.Op = OpWasmF64Ne
+ return true
+ case OpNeq8:
+ return rewriteValueWasm_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpWasmI64Ne
+ return true
+ case OpNeqPtr:
+ v.Op = OpWasmI64Ne
+ return true
+ case OpNilCheck:
+ v.Op = OpWasmLoweredNilCheck
+ return true
+ case OpNot:
+ v.Op = OpWasmI64Eqz
+ return true
+ case OpOffPtr:
+ v.Op = OpWasmI64AddConst
+ return true
+ case OpOr16:
+ v.Op = OpWasmI64Or
+ return true
+ case OpOr32:
+ v.Op = OpWasmI64Or
+ return true
+ case OpOr64:
+ v.Op = OpWasmI64Or
+ return true
+ case OpOr8:
+ v.Op = OpWasmI64Or
+ return true
+ case OpOrB:
+ v.Op = OpWasmI64Or
+ return true
+ case OpPopCount16:
+ return rewriteValueWasm_OpPopCount16(v)
+ case OpPopCount32:
+ return rewriteValueWasm_OpPopCount32(v)
+ case OpPopCount64:
+ v.Op = OpWasmI64Popcnt
+ return true
+ case OpPopCount8:
+ return rewriteValueWasm_OpPopCount8(v)
+ case OpRotateLeft16:
+ return rewriteValueWasm_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ v.Op = OpWasmI32Rotl
+ return true
+ case OpRotateLeft64:
+ v.Op = OpWasmI64Rotl
+ return true
+ case OpRotateLeft8:
+ return rewriteValueWasm_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRoundToEven:
+ v.Op = OpWasmF64Nearest
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueWasm_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueWasm_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueWasm_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueWasm_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueWasm_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueWasm_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueWasm_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueWasm_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueWasm_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueWasm_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueWasm_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueWasm_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueWasm_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueWasm_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueWasm_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueWasm_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueWasm_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueWasm_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueWasm_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueWasm_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueWasm_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueWasm_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueWasm_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueWasm_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueWasm_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueWasm_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueWasm_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueWasm_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueWasm_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueWasm_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueWasm_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueWasm_OpRsh8x8(v)
+ case OpSignExt16to32:
+ return rewriteValueWasm_OpSignExt16to32(v)
+ case OpSignExt16to64:
+ return rewriteValueWasm_OpSignExt16to64(v)
+ case OpSignExt32to64:
+ return rewriteValueWasm_OpSignExt32to64(v)
+ case OpSignExt8to16:
+ return rewriteValueWasm_OpSignExt8to16(v)
+ case OpSignExt8to32:
+ return rewriteValueWasm_OpSignExt8to32(v)
+ case OpSignExt8to64:
+ return rewriteValueWasm_OpSignExt8to64(v)
+ case OpSlicemask:
+ return rewriteValueWasm_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpWasmF64Sqrt
+ return true
+ case OpStaticCall:
+ v.Op = OpWasmLoweredStaticCall
+ return true
+ case OpStore:
+ return rewriteValueWasm_OpStore(v)
+ case OpSub16:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpSub32:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpSub32F:
+ v.Op = OpWasmF32Sub
+ return true
+ case OpSub64:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpSub64F:
+ v.Op = OpWasmF64Sub
+ return true
+ case OpSub8:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpSubPtr:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpTrunc:
+ v.Op = OpWasmF64Trunc
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpWasmLoweredWB
+ return true
+ case OpWasmF64Add:
+ return rewriteValueWasm_OpWasmF64Add(v)
+ case OpWasmF64Mul:
+ return rewriteValueWasm_OpWasmF64Mul(v)
+ case OpWasmI64Add:
+ return rewriteValueWasm_OpWasmI64Add(v)
+ case OpWasmI64AddConst:
+ return rewriteValueWasm_OpWasmI64AddConst(v)
+ case OpWasmI64And:
+ return rewriteValueWasm_OpWasmI64And(v)
+ case OpWasmI64Eq:
+ return rewriteValueWasm_OpWasmI64Eq(v)
+ case OpWasmI64Eqz:
+ return rewriteValueWasm_OpWasmI64Eqz(v)
+ case OpWasmI64LeU:
+ return rewriteValueWasm_OpWasmI64LeU(v)
+ case OpWasmI64Load:
+ return rewriteValueWasm_OpWasmI64Load(v)
+ case OpWasmI64Load16S:
+ return rewriteValueWasm_OpWasmI64Load16S(v)
+ case OpWasmI64Load16U:
+ return rewriteValueWasm_OpWasmI64Load16U(v)
+ case OpWasmI64Load32S:
+ return rewriteValueWasm_OpWasmI64Load32S(v)
+ case OpWasmI64Load32U:
+ return rewriteValueWasm_OpWasmI64Load32U(v)
+ case OpWasmI64Load8S:
+ return rewriteValueWasm_OpWasmI64Load8S(v)
+ case OpWasmI64Load8U:
+ return rewriteValueWasm_OpWasmI64Load8U(v)
+ case OpWasmI64LtU:
+ return rewriteValueWasm_OpWasmI64LtU(v)
+ case OpWasmI64Mul:
+ return rewriteValueWasm_OpWasmI64Mul(v)
+ case OpWasmI64Ne:
+ return rewriteValueWasm_OpWasmI64Ne(v)
+ case OpWasmI64Or:
+ return rewriteValueWasm_OpWasmI64Or(v)
+ case OpWasmI64Shl:
+ return rewriteValueWasm_OpWasmI64Shl(v)
+ case OpWasmI64ShrS:
+ return rewriteValueWasm_OpWasmI64ShrS(v)
+ case OpWasmI64ShrU:
+ return rewriteValueWasm_OpWasmI64ShrU(v)
+ case OpWasmI64Store:
+ return rewriteValueWasm_OpWasmI64Store(v)
+ case OpWasmI64Store16:
+ return rewriteValueWasm_OpWasmI64Store16(v)
+ case OpWasmI64Store32:
+ return rewriteValueWasm_OpWasmI64Store32(v)
+ case OpWasmI64Store8:
+ return rewriteValueWasm_OpWasmI64Store8(v)
+ case OpWasmI64Xor:
+ return rewriteValueWasm_OpWasmI64Xor(v)
+ case OpXor16:
+ v.Op = OpWasmI64Xor
+ return true
+ case OpXor32:
+ v.Op = OpWasmI64Xor
+ return true
+ case OpXor64:
+ v.Op = OpWasmI64Xor
+ return true
+ case OpXor8:
+ v.Op = OpWasmI64Xor
+ return true
+ case OpZero:
+ return rewriteValueWasm_OpZero(v)
+ case OpZeroExt16to32:
+ return rewriteValueWasm_OpZeroExt16to32(v)
+ case OpZeroExt16to64:
+ return rewriteValueWasm_OpZeroExt16to64(v)
+ case OpZeroExt32to64:
+ return rewriteValueWasm_OpZeroExt32to64(v)
+ case OpZeroExt8to16:
+ return rewriteValueWasm_OpZeroExt8to16(v)
+ case OpZeroExt8to32:
+ return rewriteValueWasm_OpZeroExt8to32(v)
+ case OpZeroExt8to64:
+ return rewriteValueWasm_OpZeroExt8to64(v)
+ }
+ return false
+}
+func rewriteValueWasm_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (LoweredAddr {sym} [0] base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpWasmLoweredAddr)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueWasm_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (I64Sub (I64Const [64]) (I64Clz x))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Clz, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com16 x)
+ // result: (I64Xor x (I64Const [-1]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com32 x)
+ // result: (I64Xor x (I64Const [-1]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com64 x)
+ // result: (I64Xor x (I64Const [-1]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com8 x)
+ // result: (I64Xor x (I64Const [-1]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpConst16(v *Value) bool {
+ // match: (Const16 [c])
+ // result: (I64Const [int64(c)])
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+}
+func rewriteValueWasm_OpConst32(v *Value) bool {
+ // match: (Const32 [c])
+ // result: (I64Const [int64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+}
+func rewriteValueWasm_OpConst8(v *Value) bool {
+ // match: (Const8 [c])
+ // result: (I64Const [int64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+}
+func rewriteValueWasm_OpConstBool(v *Value) bool {
+ // match: (ConstBool [c])
+ // result: (I64Const [b2i(c)])
+ for {
+ c := auxIntToBool(v.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(b2i(c))
+ return true
+ }
+}
+func rewriteValueWasm_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (I64Const [0])
+ for {
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 x)
+ // result: (I64Ctz (I64Or x (I64Const [0x10000])))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Ctz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0x10000)
+ v0.AddArg2(x, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 x)
+ // result: (I64Ctz (I64Or x (I64Const [0x100000000])))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Ctz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0x100000000)
+ v0.AddArg2(x, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 x)
+ // result: (I64Ctz (I64Or x (I64Const [0x100])))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Ctz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0x100)
+ v0.AddArg2(x, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCvt32Uto32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Uto32F x)
+ // result: (F32ConvertI64U (ZeroExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmF32ConvertI64U)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCvt32Uto64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Uto64F x)
+ // result: (F64ConvertI64U (ZeroExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmF64ConvertI64U)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCvt32to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32to32F x)
+ // result: (F32ConvertI64S (SignExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmF32ConvertI64S)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCvt32to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32to64F x)
+ // result: (F64ConvertI64S (SignExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmF64ConvertI64S)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 [false] x y)
+ // result: (I64DivS (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivS)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (I64DivU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 [false] x y)
+ // result: (I64DivS (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivS)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (I64DivU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 [false] x y)
+ // result: (I64DivS x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivS)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (I64DivS (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivS)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (I64DivU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (I64Eq (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Eq)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (I64Eq (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Eq)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (I64Eq (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Eq)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil p)
+ // result: (I64Eqz (I64Eqz p))
+ for {
+ p := v_0
+ v.reset(OpWasmI64Eqz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (I64LeS (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeS)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (I64LeU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (I64LeS (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeS)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (I64LeU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (I64LeS (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeS)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (I64LeU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (I64LtS (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtS)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (I64LtU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (I64LtS (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtS)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (I64LtU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (I64LtS (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtS)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (I64LtU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (F32Load ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpWasmF32Load)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (F64Load ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpWasmF64Load)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 8
+ // result: (I64Load ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 8) {
+ break
+ }
+ v.reset(OpWasmI64Load)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 4 && !t.IsSigned()
+ // result: (I64Load32U ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 4 && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load32U)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 4 && t.IsSigned()
+ // result: (I64Load32S ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 4 && t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load32S)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 2 && !t.IsSigned()
+ // result: (I64Load16U ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 2 && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load16U)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 2 && t.IsSigned()
+ // result: (I64Load16S ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 2 && t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load16S)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 1 && !t.IsSigned()
+ // result: (I64Load8U ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 1 && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load8U)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 1 && t.IsSigned()
+ // result: (I64Load8S ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 1 && t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load8S)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (LoweredAddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpWasmLoweredAddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (I64Shl x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpWasmI64Shl)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x64 x (I64Const [c]))
+ // cond: uint64(c) < 64
+ // result: (I64Shl x (I64Const [c]))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpWasmI64Shl)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x64 x (I64Const [c]))
+ // cond: uint64(c) >= 64
+ // result: (I64Const [0])
+ for {
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // result: (Select (I64Shl x y) (I64Const [0]) (I64LtU y (I64Const [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmSelect)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 [false] x y)
+ // result: (I64RemS (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemS)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (I64RemU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 [false] x y)
+ // result: (I64RemS (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemS)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (I64RemU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 [false] x y)
+ // result: (I64RemS x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemS)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (I64RemS (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemS)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (I64RemU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (I64Store8 dst (I64Load8U src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (I64Store16 dst (I64Load16U src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store16)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (I64Store32 dst (I64Load32U src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store32)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (I64Store dst (I64Load src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // result: (I64Store [8] dst (I64Load [8] src mem) (I64Store dst (I64Load src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (I64Store8 [2] dst (I64Load8U [2] src mem) (I64Store16 dst (I64Load16U src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store16, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (I64Store8 [4] dst (I64Load8U [4] src mem) (I64Store32 dst (I64Load32U src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8)
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (I64Store16 [4] dst (I64Load16U [4] src mem) (I64Store32 dst (I64Load32U src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store16)
+ v.AuxInt = int64ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16)
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (I64Store32 [3] dst (I64Load32U [3] src mem) (I64Store32 dst (I64Load32U src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store32)
+ v.AuxInt = int64ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && s < 16
+ // result: (I64Store [s-8] dst (I64Load [s-8] src mem) (I64Store dst (I64Load src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && s < 16) {
+ break
+ }
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(s - 8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(s - 8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 16 && s%16 != 0 && s%16 <= 8
+ // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (I64Store dst (I64Load src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && s%16 != 0 && s%16 <= 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 16 && s%16 != 0 && s%16 > 8
+ // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (I64Store [8] dst (I64Load [8] src mem) (I64Store dst (I64Load src mem) mem)))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && s%16 != 0 && s%16 > 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(8)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(src, mem)
+ v4 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v5 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v5.AddArg2(src, mem)
+ v4.AddArg3(dst, v5, mem)
+ v2.AddArg3(dst, v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s%8 == 0 && logLargeCopy(v, s)
+ // result: (LoweredMove [s/8] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%8 == 0 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpWasmLoweredMove)
+ v.AuxInt = int64ToAuxInt(s / 8)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpNeg16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg16 x)
+ // result: (I64Sub (I64Const [0]) x)
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeg32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg32 x)
+ // result: (I64Sub (I64Const [0]) x)
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeg64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg64 x)
+ // result: (I64Sub (I64Const [0]) x)
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeg8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg8 x)
+ // result: (I64Sub (I64Const [0]) x)
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (I64Ne (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Ne)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (I64Ne (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Ne)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (I64Ne (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Ne)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 x)
+ // result: (I64Popcnt (ZeroExt16to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Popcnt)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpPopCount32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount32 x)
+ // result: (I64Popcnt (ZeroExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Popcnt)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpPopCount8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount8 x)
+ // result: (I64Popcnt (ZeroExt8to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Popcnt)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (I64Const [c]))
+ // result: (Or16 (Lsh16x64 <t> x (I64Const [c&15])) (Rsh16Ux64 <t> x (I64Const [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (I64Const [c]))
+ // result: (Or8 (Lsh8x64 <t> x (I64Const [c&7])) (Rsh8Ux64 <t> x (I64Const [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt16to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt32to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 [c] x y)
+ // result: (Rsh64Ux64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 [c] x y)
+ // result: (Rsh64Ux64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (I64ShrU x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpWasmI64ShrU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux64 x (I64Const [c]))
+ // cond: uint64(c) < 64
+ // result: (I64ShrU x (I64Const [c]))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpWasmI64ShrU)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 x (I64Const [c]))
+ // cond: uint64(c) >= 64
+ // result: (I64Const [0])
+ for {
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 x y)
+ // result: (Select (I64ShrU x y) (I64Const [0]) (I64LtU y (I64Const [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmSelect)
+ v0 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 [c] x y)
+ // result: (Rsh64Ux64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 [c] x y)
+ // result: (Rsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 [c] x y)
+ // result: (Rsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (I64ShrS x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpWasmI64ShrS)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x64 x (I64Const [c]))
+ // cond: uint64(c) < 64
+ // result: (I64ShrS x (I64Const [c]))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x (I64Const [c]))
+ // cond: uint64(c) >= 64
+ // result: (I64ShrS x (I64Const [63]))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // result: (I64ShrS x (Select <typ.Int64> y (I64Const [63]) (I64LtU y (I64Const [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmSelect, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 [c] x y)
+ // result: (Rsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt8to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt16to32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt16to32 x:(I64Load16S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load16S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt16to32 x)
+ // cond: objabi.GOWASM.SignExt
+ // result: (I64Extend16S x)
+ for {
+ x := v_0
+ if !(objabi.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend16S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt16to32 x)
+ // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(48)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt16to64 x:(I64Load16S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load16S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt16to64 x)
+ // cond: objabi.GOWASM.SignExt
+ // result: (I64Extend16S x)
+ for {
+ x := v_0
+ if !(objabi.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend16S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt16to64 x)
+ // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(48)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt32to64 x:(I64Load32S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load32S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt32to64 x)
+ // cond: objabi.GOWASM.SignExt
+ // result: (I64Extend32S x)
+ for {
+ x := v_0
+ if !(objabi.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend32S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt32to64 x)
+ // result: (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(32)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt8to16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt8to16 x:(I64Load8S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt8to16 x)
+ // cond: objabi.GOWASM.SignExt
+ // result: (I64Extend8S x)
+ for {
+ x := v_0
+ if !(objabi.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend8S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt8to16 x)
+ // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(56)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt8to32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt8to32 x:(I64Load8S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt8to32 x)
+ // cond: objabi.GOWASM.SignExt
+ // result: (I64Extend8S x)
+ for {
+ x := v_0
+ if !(objabi.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend8S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt8to32 x)
+ // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(56)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt8to64 x:(I64Load8S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt8to64 x)
+ // cond: objabi.GOWASM.SignExt
+ // result: (I64Extend8S x)
+ for {
+ x := v_0
+ if !(objabi.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend8S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt8to64 x)
+ // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(56)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Slicemask x)
+ // result: (I64ShrS (I64Sub (I64Const [0]) x) (I64Const [63]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Sub, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v0.AddArg2(v1, x)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueWasm_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: is64BitFloat(t)
+ // result: (F64Store ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpWasmF64Store)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: is32BitFloat(t)
+ // result: (F32Store ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpWasmF32Store)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8
+ // result: (I64Store ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8) {
+ break
+ }
+ v.reset(OpWasmI64Store)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4
+ // result: (I64Store32 ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4) {
+ break
+ }
+ v.reset(OpWasmI64Store32)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (I64Store16 ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpWasmI64Store16)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (I64Store8 ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpWasmI64Store8)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmF64Add(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (F64Add (F64Const [x]) (F64Const [y]))
+ // result: (F64Const [x + y])
+ for {
+ if v_0.Op != OpWasmF64Const {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpWasmF64Const {
+ break
+ }
+ y := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpWasmF64Const)
+ v.AuxInt = float64ToAuxInt(x + y)
+ return true
+ }
+ // match: (F64Add (F64Const [x]) y)
+ // cond: y.Op != OpWasmF64Const
+ // result: (F64Add y (F64Const [x]))
+ for {
+ if v_0.Op != OpWasmF64Const {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmF64Const) {
+ break
+ }
+ v.reset(OpWasmF64Add)
+ v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64)
+ v0.AuxInt = float64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmF64Mul(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (F64Mul (F64Const [x]) (F64Const [y]))
+ // cond: !math.IsNaN(x * y)
+ // result: (F64Const [x * y])
+ for {
+ if v_0.Op != OpWasmF64Const {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpWasmF64Const {
+ break
+ }
+ y := auxIntToFloat64(v_1.AuxInt)
+ if !(!math.IsNaN(x * y)) {
+ break
+ }
+ v.reset(OpWasmF64Const)
+ v.AuxInt = float64ToAuxInt(x * y)
+ return true
+ }
+ // match: (F64Mul (F64Const [x]) y)
+ // cond: y.Op != OpWasmF64Const
+ // result: (F64Mul y (F64Const [x]))
+ for {
+ if v_0.Op != OpWasmF64Const {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmF64Const) {
+ break
+ }
+ v.reset(OpWasmF64Mul)
+ v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64)
+ v0.AuxInt = float64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Add(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Add (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x + y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x + y)
+ return true
+ }
+ // match: (I64Add (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Add y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Add)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ // match: (I64Add x (I64Const [y]))
+ // result: (I64AddConst [y] x)
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64AddConst)
+ v.AuxInt = int64ToAuxInt(y)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64AddConst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (I64AddConst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (I64AddConst [off] (LoweredAddr {sym} [off2] base))
+ // cond: isU32Bit(off+int64(off2))
+ // result: (LoweredAddr {sym} [int32(off)+off2] base)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ if !(isU32Bit(off + int64(off2))) {
+ break
+ }
+ v.reset(OpWasmLoweredAddr)
+ v.AuxInt = int32ToAuxInt(int32(off) + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ // match: (I64AddConst [off] x:(SP))
+ // cond: isU32Bit(off)
+ // result: (LoweredAddr [int32(off)] x)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP || !(isU32Bit(off)) {
+ break
+ }
+ v.reset(OpWasmLoweredAddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64And(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64And (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x & y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x & y)
+ return true
+ }
+ // match: (I64And (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64And y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Eq(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Eq (I64Const [x]) (I64Const [y]))
+ // cond: x == y
+ // result: (I64Const [1])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (I64Eq (I64Const [x]) (I64Const [y]))
+ // cond: x != y
+ // result: (I64Const [0])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x != y) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (I64Eq (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Eq y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Eq)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ // match: (I64Eq x (I64Const [0]))
+ // result: (I64Eqz x)
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpWasmI64Eqz)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Eqz(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (I64Eqz (I64Eqz (I64Eqz x)))
+ // result: (I64Eqz x)
+ for {
+ if v_0.Op != OpWasmI64Eqz {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpWasmI64Eqz {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpWasmI64Eqz)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64LeU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64LeU x (I64Const [0]))
+ // result: (I64Eqz x)
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpWasmI64Eqz)
+ v.AddArg(x)
+ return true
+ }
+ // match: (I64LeU (I64Const [1]) x)
+ // result: (I64Eqz (I64Eqz x))
+ for {
+ if v_0.Op != OpWasmI64Const || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpWasmI64Eqz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (I64Load [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (I64Load [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(read64(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, off+int64(off2), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load16S(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Load16S [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load16S [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load16S)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (I64Load16U [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load16U [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load16U)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (I64Load16U [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load32S(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Load32S [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load32S [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load32S)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (I64Load32U [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load32U [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load32U)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (I64Load32U [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load8S(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Load8S [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load8S [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load8S)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Load8U [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load8U [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load8U)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (I64Load8U [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(read8(sym, off+int64(off2)))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(read8(sym, off+int64(off2))))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64LtU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64LtU (I64Const [0]) x)
+ // result: (I64Eqz (I64Eqz x))
+ for {
+ if v_0.Op != OpWasmI64Const || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpWasmI64Eqz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (I64LtU x (I64Const [1]))
+ // result: (I64Eqz x)
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpWasmI64Eqz)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Mul(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Mul (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x * y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x * y)
+ return true
+ }
+ // match: (I64Mul (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Mul y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Mul)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Ne(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Ne (I64Const [x]) (I64Const [y]))
+ // cond: x == y
+ // result: (I64Const [0])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (I64Ne (I64Const [x]) (I64Const [y]))
+ // cond: x != y
+ // result: (I64Const [1])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x != y) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (I64Ne (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Ne y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Ne)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ // match: (I64Ne x (I64Const [0]))
+ // result: (I64Eqz (I64Eqz x))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpWasmI64Eqz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Or(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Or (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x | y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x | y)
+ return true
+ }
+ // match: (I64Or (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Or y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Or)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Shl(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Shl (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x << uint64(y)])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x << uint64(y))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64ShrS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64ShrS (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x >> uint64(y)])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x >> uint64(y))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64ShrU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64ShrU (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [int64(uint64(x) >> uint64(y))])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(uint64(x) >> uint64(y)))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Store(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Store [off] (I64AddConst [off2] ptr) val mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Store [off+off2] ptr val mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Store16(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Store16 [off] (I64AddConst [off2] ptr) val mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Store16 [off+off2] ptr val mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Store16)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Store32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Store32 [off] (I64AddConst [off2] ptr) val mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Store32 [off+off2] ptr val mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Store32)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Store8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Store8 [off] (I64AddConst [off2] ptr) val mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Store8 [off+off2] ptr val mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Xor(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Xor (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x ^ y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x ^ y)
+ return true
+ }
+ // match: (I64Xor (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Xor y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (I64Store8 destptr (I64Const [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (I64Store16 destptr (I64Const [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store16)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (I64Store32 destptr (I64Const [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store32)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [8] destptr mem)
+ // result: (I64Store destptr (I64Const [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (I64Store8 [2] destptr (I64Const [0]) (I64Store16 destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store16, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (I64Store8 [4] destptr (I64Const [0]) (I64Store32 destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (I64Store16 [4] destptr (I64Const [0]) (I64Store32 destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store16)
+ v.AuxInt = int64ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (I64Store32 [3] destptr (I64Const [0]) (I64Store32 destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store32)
+ v.AuxInt = int64ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%8 != 0 && s > 8
+ // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (I64Store destptr (I64Const [0]) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%8 != 0 && s > 8) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(s % 8)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v1.AddArg3(destptr, v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [16] destptr mem)
+ // result: (I64Store [8] destptr (I64Const [0]) (I64Store destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [24] destptr mem)
+ // result: (I64Store [16] destptr (I64Const [0]) (I64Store [8] destptr (I64Const [0]) (I64Store destptr (I64Const [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2.AddArg3(destptr, v0, mem)
+ v1.AddArg3(destptr, v0, v2)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [32] destptr mem)
+ // result: (I64Store [24] destptr (I64Const [0]) (I64Store [16] destptr (I64Const [0]) (I64Store [8] destptr (I64Const [0]) (I64Store destptr (I64Const [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(24)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(16)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(8)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v3.AddArg3(destptr, v0, mem)
+ v2.AddArg3(destptr, v0, v3)
+ v1.AddArg3(destptr, v0, v2)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%8 == 0 && s > 32
+ // result: (LoweredZero [s/8] destptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%8 == 0 && s > 32) {
+ break
+ }
+ v.reset(OpWasmLoweredZero)
+ v.AuxInt = int64ToAuxInt(s / 8)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpZeroExt16to32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt16to32 x:(I64Load16U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load16U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt16to32 x)
+ // result: (I64And x (I64Const [0xffff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xffff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt16to64 x:(I64Load16U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load16U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt16to64 x)
+ // result: (I64And x (I64Const [0xffff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xffff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt32to64 x:(I64Load32U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load32U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt32to64 x)
+ // result: (I64And x (I64Const [0xffffffff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xffffffff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt8to16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt8to16 x:(I64Load8U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt8to16 x)
+ // result: (I64And x (I64Const [0xff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt8to32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt8to32 x:(I64Load8U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt8to32 x)
+ // result: (I64And x (I64Const [0xff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt8to64 x:(I64Load8U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt8to64 x)
+ // result: (I64And x (I64Const [0xff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteBlockWasm(b *Block) bool {
+ switch b.Kind {
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite_test.go b/src/cmd/compile/internal/ssa/rewrite_test.go
new file mode 100644
index 0000000..64d128b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewrite_test.go
@@ -0,0 +1,220 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "testing"
+
+// We generate memmove for copy(x[1:], x[:]), however we may change it to OpMove,
+// because size is known. Check that OpMove is alias-safe, or we did call memmove.
+func TestMove(t *testing.T) {
+ x := [...]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40}
+ copy(x[1:], x[:])
+ for i := 1; i < len(x); i++ {
+ if int(x[i]) != i {
+ t.Errorf("Memmove got converted to OpMove in alias-unsafe way. Got %d insted of %d in position %d", int(x[i]), i, i+1)
+ }
+ }
+}
+
+func TestMoveSmall(t *testing.T) {
+ x := [...]byte{1, 2, 3, 4, 5, 6, 7}
+ copy(x[1:], x[:])
+ for i := 1; i < len(x); i++ {
+ if int(x[i]) != i {
+ t.Errorf("Memmove got converted to OpMove in alias-unsafe way. Got %d instead of %d in position %d", int(x[i]), i, i+1)
+ }
+ }
+}
+
+func TestSubFlags(t *testing.T) {
+ if !subFlags32(0, 1).lt() {
+ t.Errorf("subFlags32(0,1).lt() returned false")
+ }
+ if !subFlags32(0, 1).ult() {
+ t.Errorf("subFlags32(0,1).ult() returned false")
+ }
+}
+
+func TestIsPPC64WordRotateMask(t *testing.T) {
+ tests := []struct {
+ input int64
+ expected bool
+ }{
+ {0x00000001, true},
+ {0x80000001, true},
+ {0x80010001, false},
+ {0xFFFFFFFA, false},
+ {0xF0F0F0F0, false},
+ {0xFFFFFFFD, true},
+ {0x80000000, true},
+ {0x00000000, false},
+ {0xFFFFFFFF, true},
+ {0x0000FFFF, true},
+ {0xFF0000FF, true},
+ {0x00FFFF00, true},
+ }
+
+ for _, v := range tests {
+ if v.expected != isPPC64WordRotateMask(v.input) {
+ t.Errorf("isPPC64WordRotateMask(0x%x) failed", v.input)
+ }
+ }
+}
+
+func TestEncodeDecodePPC64WordRotateMask(t *testing.T) {
+ tests := []struct {
+ rotate int64
+ mask uint64
+ nbits,
+ mb,
+ me,
+ encoded int64
+ }{
+ {1, 0x00000001, 32, 31, 31, 0x20011f20},
+ {2, 0x80000001, 32, 31, 0, 0x20021f01},
+ {3, 0xFFFFFFFD, 32, 31, 29, 0x20031f1e},
+ {4, 0x80000000, 32, 0, 0, 0x20040001},
+ {5, 0xFFFFFFFF, 32, 0, 31, 0x20050020},
+ {6, 0x0000FFFF, 32, 16, 31, 0x20061020},
+ {7, 0xFF0000FF, 32, 24, 7, 0x20071808},
+ {8, 0x00FFFF00, 32, 8, 23, 0x20080818},
+
+ {9, 0x0000000000FFFF00, 64, 40, 55, 0x40092838},
+ {10, 0xFFFF000000000000, 64, 0, 15, 0x400A0010},
+ {10, 0xFFFF000000000001, 64, 63, 15, 0x400A3f10},
+ }
+
+ for i, v := range tests {
+ result := encodePPC64RotateMask(v.rotate, int64(v.mask), v.nbits)
+ if result != v.encoded {
+ t.Errorf("encodePPC64RotateMask(%d,0x%x,%d) = 0x%x, expected 0x%x", v.rotate, v.mask, v.nbits, result, v.encoded)
+ }
+ rotate, mb, me, mask := DecodePPC64RotateMask(result)
+ if rotate != v.rotate || mb != v.mb || me != v.me || mask != v.mask {
+ t.Errorf("DecodePPC64Failure(Test %d) got (%d, %d, %d, %x) expected (%d, %d, %d, %x)", i, rotate, mb, me, mask, v.rotate, v.mb, v.me, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64ClrlsldiSrw(t *testing.T) {
+ tests := []struct {
+ clrlsldi int32
+ srw int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ // ((x>>4)&0xFF)<<4
+ {newPPC64ShiftAuxInt(4, 56, 63, 64), 4, true, 0, 0xFF0},
+ // ((x>>4)&0xFFFF)<<4
+ {newPPC64ShiftAuxInt(4, 48, 63, 64), 4, true, 0, 0xFFFF0},
+ // ((x>>4)&0xFFFF)<<17
+ {newPPC64ShiftAuxInt(17, 48, 63, 64), 4, false, 0, 0},
+ // ((x>>4)&0xFFFF)<<16
+ {newPPC64ShiftAuxInt(16, 48, 63, 64), 4, true, 12, 0xFFFF0000},
+ // ((x>>32)&0xFFFF)<<17
+ {newPPC64ShiftAuxInt(17, 48, 63, 64), 32, false, 0, 0},
+ }
+ for i, v := range tests {
+ result := mergePPC64ClrlsldiSrw(int64(v.clrlsldi), v.srw)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64ClrlsldiSrw(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64ClrlsldiSrw(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64ClrlsldiSrw(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64ClrlsldiRlwinm(t *testing.T) {
+ tests := []struct {
+ clrlsldi int32
+ rlwinm int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ // ((x<<4)&0xFF00)<<4
+ {newPPC64ShiftAuxInt(4, 56, 63, 64), encodePPC64RotateMask(4, 0xFF00, 32), false, 0, 0},
+ // ((x>>4)&0xFF)<<4
+ {newPPC64ShiftAuxInt(4, 56, 63, 64), encodePPC64RotateMask(28, 0x0FFFFFFF, 32), true, 0, 0xFF0},
+ // ((x>>4)&0xFFFF)<<4
+ {newPPC64ShiftAuxInt(4, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), true, 0, 0xFFFF0},
+ // ((x>>4)&0xFFFF)<<17
+ {newPPC64ShiftAuxInt(17, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), false, 0, 0},
+ // ((x>>4)&0xFFFF)<<16
+ {newPPC64ShiftAuxInt(16, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), true, 12, 0xFFFF0000},
+ // ((x>>4)&0xF000FFFF)<<16
+ {newPPC64ShiftAuxInt(16, 48, 63, 64), encodePPC64RotateMask(28, 0xF000FFFF, 32), true, 12, 0xFFFF0000},
+ }
+ for i, v := range tests {
+ result := mergePPC64ClrlsldiRlwinm(v.clrlsldi, v.rlwinm)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64SldiSrw(t *testing.T) {
+ tests := []struct {
+ sld int64
+ srw int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ {4, 4, true, 0, 0xFFFFFFF0},
+ {4, 8, true, 28, 0x0FFFFFF0},
+ {0, 0, true, 0, 0xFFFFFFFF},
+ {8, 4, false, 0, 0},
+ {0, 32, false, 0, 0},
+ {0, 31, true, 1, 0x1},
+ {31, 31, true, 0, 0x80000000},
+ {32, 32, false, 0, 0},
+ }
+ for i, v := range tests {
+ result := mergePPC64SldiSrw(v.sld, v.srw)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64SldiSrw(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64SldiSrw(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64SldiSrw(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64AndSrwi(t *testing.T) {
+ tests := []struct {
+ and int64
+ srw int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ {0x000000FF, 8, true, 24, 0xFF},
+ {0xF00000FF, 8, true, 24, 0xFF},
+ {0x0F0000FF, 4, false, 0, 0},
+ {0x00000000, 4, false, 0, 0},
+ {0xF0000000, 4, false, 0, 0},
+ {0xF0000000, 32, false, 0, 0},
+ {0xFFFFFFFF, 0, true, 0, 0xFFFFFFFF},
+ }
+ for i, v := range tests {
+ result := mergePPC64AndSrwi(v.and, v.srw)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64AndSrwi(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64AndSrwi(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64AndSrwi(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go
new file mode 100644
index 0000000..e0fa976
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritedec.go
@@ -0,0 +1,415 @@
+// Code generated from gen/dec.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValuedec(v *Value) bool {
+ switch v.Op {
+ case OpComplexImag:
+ return rewriteValuedec_OpComplexImag(v)
+ case OpComplexReal:
+ return rewriteValuedec_OpComplexReal(v)
+ case OpIData:
+ return rewriteValuedec_OpIData(v)
+ case OpITab:
+ return rewriteValuedec_OpITab(v)
+ case OpLoad:
+ return rewriteValuedec_OpLoad(v)
+ case OpSliceCap:
+ return rewriteValuedec_OpSliceCap(v)
+ case OpSliceLen:
+ return rewriteValuedec_OpSliceLen(v)
+ case OpSlicePtr:
+ return rewriteValuedec_OpSlicePtr(v)
+ case OpStore:
+ return rewriteValuedec_OpStore(v)
+ case OpStringLen:
+ return rewriteValuedec_OpStringLen(v)
+ case OpStringPtr:
+ return rewriteValuedec_OpStringPtr(v)
+ }
+ return false
+}
+func rewriteValuedec_OpComplexImag(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ComplexImag (ComplexMake _ imag ))
+ // result: imag
+ for {
+ if v_0.Op != OpComplexMake {
+ break
+ }
+ imag := v_0.Args[1]
+ v.copyOf(imag)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpComplexReal(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ComplexReal (ComplexMake real _ ))
+ // result: real
+ for {
+ if v_0.Op != OpComplexMake {
+ break
+ }
+ real := v_0.Args[0]
+ v.copyOf(real)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpIData(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (IData (IMake _ data))
+ // result: data
+ for {
+ if v_0.Op != OpIMake {
+ break
+ }
+ data := v_0.Args[1]
+ v.copyOf(data)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpITab(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ITab (IMake itab _))
+ // result: itab
+ for {
+ if v_0.Op != OpIMake {
+ break
+ }
+ itab := v_0.Args[0]
+ v.copyOf(itab)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Load <t> ptr mem)
+ // cond: t.IsComplex() && t.Size() == 8
+ // result: (ComplexMake (Load <typ.Float32> ptr mem) (Load <typ.Float32> (OffPtr <typ.Float32Ptr> [4] ptr) mem) )
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsComplex() && t.Size() == 8) {
+ break
+ }
+ v.reset(OpComplexMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr)
+ v2.AuxInt = int64ToAuxInt(4)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsComplex() && t.Size() == 16
+ // result: (ComplexMake (Load <typ.Float64> ptr mem) (Load <typ.Float64> (OffPtr <typ.Float64Ptr> [8] ptr) mem) )
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsComplex() && t.Size() == 16) {
+ break
+ }
+ v.reset(OpComplexMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr)
+ v2.AuxInt = int64ToAuxInt(8)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsString()
+ // result: (StringMake (Load <typ.BytePtr> ptr mem) (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsString()) {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v2.AuxInt = int64ToAuxInt(config.PtrSize)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsSlice()
+ // result: (SliceMake (Load <t.Elem().PtrTo()> ptr mem) (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem) (Load <typ.Int> (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsSlice()) {
+ break
+ }
+ v.reset(OpSliceMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.Elem().PtrTo())
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v2.AuxInt = int64ToAuxInt(config.PtrSize)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v3 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v4.AuxInt = int64ToAuxInt(2 * config.PtrSize)
+ v4.AddArg(ptr)
+ v3.AddArg2(v4, mem)
+ v.AddArg3(v0, v1, v3)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsInterface()
+ // result: (IMake (Load <typ.Uintptr> ptr mem) (Load <typ.BytePtr> (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsInterface()) {
+ break
+ }
+ v.reset(OpIMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Uintptr)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr)
+ v2.AuxInt = int64ToAuxInt(config.PtrSize)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpSliceCap(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SliceCap (SliceMake _ _ cap))
+ // result: cap
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ cap := v_0.Args[2]
+ v.copyOf(cap)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpSliceLen(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SliceLen (SliceMake _ len _))
+ // result: len
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ len := v_0.Args[1]
+ v.copyOf(len)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpSlicePtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SlicePtr (SliceMake ptr _ _ ))
+ // result: ptr
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ ptr := v_0.Args[0]
+ v.copyOf(ptr)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Store {t} dst (ComplexMake real imag) mem)
+ // cond: t.Size() == 8
+ // result: (Store {typ.Float32} (OffPtr <typ.Float32Ptr> [4] dst) imag (Store {typ.Float32} dst real mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpComplexMake {
+ break
+ }
+ imag := v_1.Args[1]
+ real := v_1.Args[0]
+ mem := v_2
+ if !(t.Size() == 8) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.Float32)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr)
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.Float32)
+ v1.AddArg3(dst, real, mem)
+ v.AddArg3(v0, imag, v1)
+ return true
+ }
+ // match: (Store {t} dst (ComplexMake real imag) mem)
+ // cond: t.Size() == 16
+ // result: (Store {typ.Float64} (OffPtr <typ.Float64Ptr> [8] dst) imag (Store {typ.Float64} dst real mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpComplexMake {
+ break
+ }
+ imag := v_1.Args[1]
+ real := v_1.Args[0]
+ mem := v_2
+ if !(t.Size() == 16) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.Float64)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr)
+ v0.AuxInt = int64ToAuxInt(8)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.Float64)
+ v1.AddArg3(dst, real, mem)
+ v.AddArg3(v0, imag, v1)
+ return true
+ }
+ // match: (Store dst (StringMake ptr len) mem)
+ // result: (Store {typ.Int} (OffPtr <typ.IntPtr> [config.PtrSize] dst) len (Store {typ.BytePtr} dst ptr mem))
+ for {
+ dst := v_0
+ if v_1.Op != OpStringMake {
+ break
+ }
+ len := v_1.Args[1]
+ ptr := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.Int)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v0.AuxInt = int64ToAuxInt(config.PtrSize)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.BytePtr)
+ v1.AddArg3(dst, ptr, mem)
+ v.AddArg3(v0, len, v1)
+ return true
+ }
+ // match: (Store {t} dst (SliceMake ptr len cap) mem)
+ // result: (Store {typ.Int} (OffPtr <typ.IntPtr> [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr <typ.IntPtr> [config.PtrSize] dst) len (Store {t.Elem().PtrTo()} dst ptr mem)))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpSliceMake {
+ break
+ }
+ cap := v_1.Args[2]
+ ptr := v_1.Args[0]
+ len := v_1.Args[1]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.Int)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v0.AuxInt = int64ToAuxInt(2 * config.PtrSize)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.Int)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v2.AuxInt = int64ToAuxInt(config.PtrSize)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t.Elem().PtrTo())
+ v3.AddArg3(dst, ptr, mem)
+ v1.AddArg3(v2, len, v3)
+ v.AddArg3(v0, cap, v1)
+ return true
+ }
+ // match: (Store dst (IMake itab data) mem)
+ // result: (Store {typ.BytePtr} (OffPtr <typ.BytePtrPtr> [config.PtrSize] dst) data (Store {typ.Uintptr} dst itab mem))
+ for {
+ dst := v_0
+ if v_1.Op != OpIMake {
+ break
+ }
+ data := v_1.Args[1]
+ itab := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr)
+ v0.AuxInt = int64ToAuxInt(config.PtrSize)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.Uintptr)
+ v1.AddArg3(dst, itab, mem)
+ v.AddArg3(v0, data, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStringLen(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (StringLen (StringMake _ len))
+ // result: len
+ for {
+ if v_0.Op != OpStringMake {
+ break
+ }
+ len := v_0.Args[1]
+ v.copyOf(len)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStringPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (StringPtr (StringMake ptr _))
+ // result: ptr
+ for {
+ if v_0.Op != OpStringMake {
+ break
+ }
+ ptr := v_0.Args[0]
+ v.copyOf(ptr)
+ return true
+ }
+ return false
+}
+func rewriteBlockdec(b *Block) bool {
+ switch b.Kind {
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go
new file mode 100644
index 0000000..c49bc80
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritedec64.go
@@ -0,0 +1,2464 @@
+// Code generated from gen/dec64.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValuedec64(v *Value) bool {
+ switch v.Op {
+ case OpAdd64:
+ return rewriteValuedec64_OpAdd64(v)
+ case OpAnd64:
+ return rewriteValuedec64_OpAnd64(v)
+ case OpArg:
+ return rewriteValuedec64_OpArg(v)
+ case OpBitLen64:
+ return rewriteValuedec64_OpBitLen64(v)
+ case OpBswap64:
+ return rewriteValuedec64_OpBswap64(v)
+ case OpCom64:
+ return rewriteValuedec64_OpCom64(v)
+ case OpConst64:
+ return rewriteValuedec64_OpConst64(v)
+ case OpCtz64:
+ return rewriteValuedec64_OpCtz64(v)
+ case OpCtz64NonZero:
+ v.Op = OpCtz64
+ return true
+ case OpEq64:
+ return rewriteValuedec64_OpEq64(v)
+ case OpInt64Hi:
+ return rewriteValuedec64_OpInt64Hi(v)
+ case OpInt64Lo:
+ return rewriteValuedec64_OpInt64Lo(v)
+ case OpLeq64:
+ return rewriteValuedec64_OpLeq64(v)
+ case OpLeq64U:
+ return rewriteValuedec64_OpLeq64U(v)
+ case OpLess64:
+ return rewriteValuedec64_OpLess64(v)
+ case OpLess64U:
+ return rewriteValuedec64_OpLess64U(v)
+ case OpLoad:
+ return rewriteValuedec64_OpLoad(v)
+ case OpLsh16x64:
+ return rewriteValuedec64_OpLsh16x64(v)
+ case OpLsh32x64:
+ return rewriteValuedec64_OpLsh32x64(v)
+ case OpLsh64x16:
+ return rewriteValuedec64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValuedec64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValuedec64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValuedec64_OpLsh64x8(v)
+ case OpLsh8x64:
+ return rewriteValuedec64_OpLsh8x64(v)
+ case OpMul64:
+ return rewriteValuedec64_OpMul64(v)
+ case OpNeg64:
+ return rewriteValuedec64_OpNeg64(v)
+ case OpNeq64:
+ return rewriteValuedec64_OpNeq64(v)
+ case OpOr32:
+ return rewriteValuedec64_OpOr32(v)
+ case OpOr64:
+ return rewriteValuedec64_OpOr64(v)
+ case OpRsh16Ux64:
+ return rewriteValuedec64_OpRsh16Ux64(v)
+ case OpRsh16x64:
+ return rewriteValuedec64_OpRsh16x64(v)
+ case OpRsh32Ux64:
+ return rewriteValuedec64_OpRsh32Ux64(v)
+ case OpRsh32x64:
+ return rewriteValuedec64_OpRsh32x64(v)
+ case OpRsh64Ux16:
+ return rewriteValuedec64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValuedec64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValuedec64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValuedec64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValuedec64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValuedec64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValuedec64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValuedec64_OpRsh64x8(v)
+ case OpRsh8Ux64:
+ return rewriteValuedec64_OpRsh8Ux64(v)
+ case OpRsh8x64:
+ return rewriteValuedec64_OpRsh8x64(v)
+ case OpSignExt16to64:
+ return rewriteValuedec64_OpSignExt16to64(v)
+ case OpSignExt32to64:
+ return rewriteValuedec64_OpSignExt32to64(v)
+ case OpSignExt8to64:
+ return rewriteValuedec64_OpSignExt8to64(v)
+ case OpStore:
+ return rewriteValuedec64_OpStore(v)
+ case OpSub64:
+ return rewriteValuedec64_OpSub64(v)
+ case OpTrunc64to16:
+ return rewriteValuedec64_OpTrunc64to16(v)
+ case OpTrunc64to32:
+ return rewriteValuedec64_OpTrunc64to32(v)
+ case OpTrunc64to8:
+ return rewriteValuedec64_OpTrunc64to8(v)
+ case OpXor64:
+ return rewriteValuedec64_OpXor64(v)
+ case OpZeroExt16to64:
+ return rewriteValuedec64_OpZeroExt16to64(v)
+ case OpZeroExt32to64:
+ return rewriteValuedec64_OpZeroExt32to64(v)
+ case OpZeroExt8to64:
+ return rewriteValuedec64_OpZeroExt8to64(v)
+ }
+ return false
+}
+func rewriteValuedec64_OpAdd64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Add64 x y)
+ // result: (Int64Make (Add32withcarry <typ.Int32> (Int64Hi x) (Int64Hi y) (Select1 <types.TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y)))) (Select0 <typ.UInt32> (Add32carry (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpAdd32withcarry, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(y)
+ v4.AddArg2(v5, v6)
+ v3.AddArg(v4)
+ v0.AddArg3(v1, v2, v3)
+ v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v7.AddArg(v4)
+ v.AddArg2(v0, v7)
+ return true
+ }
+}
+func rewriteValuedec64_OpAnd64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (And64 x y)
+ // result: (Int64Make (And32 <typ.UInt32> (Int64Hi x) (Int64Hi y)) (And32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpArg(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")
+ // result: (Int64Make (Arg <typ.Int32> {n} [off+4]) (Arg <typ.UInt32> {n} [off]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(off + 4)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")
+ // result: (Int64Make (Arg <typ.UInt32> {n} [off+4]) (Arg <typ.UInt32> {n} [off]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off + 4)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")
+ // result: (Int64Make (Arg <typ.Int32> {n} [off]) (Arg <typ.UInt32> {n} [off+4]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(off + 4)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")
+ // result: (Int64Make (Arg <typ.UInt32> {n} [off]) (Arg <typ.UInt32> {n} [off+4]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(off + 4)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (Add32 <typ.Int> (BitLen32 <typ.Int> (Int64Hi x)) (BitLen32 <typ.Int> (Or32 <typ.UInt32> (Int64Lo x) (Zeromask (Int64Hi x)))))
+ for {
+ x := v_0
+ v.reset(OpAdd32)
+ v.Type = typ.Int
+ v0 := b.NewValue0(v.Pos, OpBitLen32, typ.Int)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpBitLen32, typ.Int)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v5.AddArg(v1)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpBswap64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Bswap64 x)
+ // result: (Int64Make (Bswap32 <typ.UInt32> (Int64Lo x)) (Bswap32 <typ.UInt32> (Int64Hi x)))
+ for {
+ x := v_0
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpBswap32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpBswap32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com64 x)
+ // result: (Int64Make (Com32 <typ.UInt32> (Int64Hi x)) (Com32 <typ.UInt32> (Int64Lo x)))
+ for {
+ x := v_0
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpCom32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpCom32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpConst64(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Const64 <t> [c])
+ // cond: t.IsSigned()
+ // result: (Int64Make (Const32 <typ.Int32> [int32(c>>32)]) (Const32 <typ.UInt32> [int32(c)]))
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if !(t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(int32(c >> 32))
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Const64 <t> [c])
+ // cond: !t.IsSigned()
+ // result: (Int64Make (Const32 <typ.UInt32> [int32(c>>32)]) (Const32 <typ.UInt32> [int32(c)]))
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if !(!t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c >> 32))
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64 x)
+ // result: (Add32 <typ.UInt32> (Ctz32 <typ.UInt32> (Int64Lo x)) (And32 <typ.UInt32> (Com32 <typ.UInt32> (Zeromask (Int64Lo x))) (Ctz32 <typ.UInt32> (Int64Hi x))))
+ for {
+ x := v_0
+ v.reset(OpAdd32)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpCtz32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpCom32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v4.AddArg(v1)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Pos, OpCtz32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v6.AddArg(x)
+ v5.AddArg(v6)
+ v2.AddArg2(v3, v5)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64 x y)
+ // result: (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Eq32 (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAndB)
+ v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpInt64Hi(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Int64Hi (Int64Make hi _))
+ // result: hi
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ v.copyOf(hi)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpInt64Lo(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Int64Lo (Int64Make _ lo))
+ // result: lo
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.copyOf(lo)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4.AddArg2(v1, v2)
+ v5 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4.AddArg2(v1, v2)
+ v5 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64 x y)
+ // result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4.AddArg2(v1, v2)
+ v5 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64U x y)
+ // result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4.AddArg2(v1, v2)
+ v5 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && !config.BigEndian && t.IsSigned()
+ // result: (Int64Make (Load <typ.Int32> (OffPtr <typ.Int32Ptr> [4] ptr) mem) (Load <typ.UInt32> ptr mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) && !config.BigEndian && t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Int32Ptr)
+ v1.AuxInt = int64ToAuxInt(4)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2.AddArg2(ptr, mem)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && !config.BigEndian && !t.IsSigned()
+ // result: (Int64Make (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem) (Load <typ.UInt32> ptr mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) && !config.BigEndian && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr)
+ v1.AuxInt = int64ToAuxInt(4)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2.AddArg2(ptr, mem)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && config.BigEndian && t.IsSigned()
+ // result: (Int64Make (Load <typ.Int32> ptr mem) (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) && config.BigEndian && t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr)
+ v2.AuxInt = int64ToAuxInt(4)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && config.BigEndian && !t.IsSigned()
+ // result: (Int64Make (Load <typ.UInt32> ptr mem) (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) && config.BigEndian && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr)
+ v2.AuxInt = int64ToAuxInt(4)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh16x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Lsh16x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLsh16x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Lsh16x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x64 x y)
+ // result: (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh32x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Lsh32x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLsh32x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Lsh32x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x64 x y)
+ // result: (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 x s)
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x16 <typ.UInt32> (Int64Hi x) s) (Rsh32Ux16 <typ.UInt32> (Int64Lo x) (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (Lsh32x16 <typ.UInt32> (Int64Lo x) (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))) (Lsh32x16 <typ.UInt32> (Int64Lo x) s))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg2(v3, s)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v7 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v7.AuxInt = int16ToAuxInt(32)
+ v6.AddArg2(v7, s)
+ v4.AddArg2(v5, v6)
+ v1.AddArg2(v2, v4)
+ v8 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v9.AddArg2(s, v7)
+ v8.AddArg2(v5, v9)
+ v0.AddArg2(v1, v8)
+ v10 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v10.AddArg2(v5, s)
+ v.AddArg2(v0, v10)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 x s)
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x32 <typ.UInt32> (Int64Hi x) s) (Rsh32Ux32 <typ.UInt32> (Int64Lo x) (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (Lsh32x32 <typ.UInt32> (Int64Lo x) (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))) (Lsh32x32 <typ.UInt32> (Int64Lo x) s))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg2(v3, s)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v7.AuxInt = int32ToAuxInt(32)
+ v6.AddArg2(v7, s)
+ v4.AddArg2(v5, v6)
+ v1.AddArg2(v2, v4)
+ v8 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v9.AddArg2(s, v7)
+ v8.AddArg2(v5, v9)
+ v0.AddArg2(v1, v8)
+ v10 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v10.AddArg2(v5, s)
+ v.AddArg2(v0, v10)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const64 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Lsh64x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLsh64x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Lsh64x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // result: (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 x s)
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x8 <typ.UInt32> (Int64Hi x) s) (Rsh32Ux8 <typ.UInt32> (Int64Lo x) (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (Lsh32x8 <typ.UInt32> (Int64Lo x) (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))) (Lsh32x8 <typ.UInt32> (Int64Lo x) s))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg2(v3, s)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v7 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v7.AuxInt = int8ToAuxInt(32)
+ v6.AddArg2(v7, s)
+ v4.AddArg2(v5, v6)
+ v1.AddArg2(v2, v4)
+ v8 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v9.AddArg2(s, v7)
+ v8.AddArg2(v5, v9)
+ v0.AddArg2(v1, v8)
+ v10 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v10.AddArg2(v5, s)
+ v.AddArg2(v0, v10)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh8x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Lsh8x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLsh8x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Lsh8x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x64 x y)
+ // result: (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpMul64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul64 x y)
+ // result: (Int64Make (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Int64Lo x) (Int64Hi y)) (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Int64Hi x) (Int64Lo y)) (Select0 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))) (Select1 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32))
+ v9.AddArg2(v2, v7)
+ v8.AddArg(v9)
+ v4.AddArg2(v5, v8)
+ v0.AddArg2(v1, v4)
+ v10 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32)
+ v10.AddArg(v9)
+ v.AddArg2(v0, v10)
+ return true
+ }
+}
+func rewriteValuedec64_OpNeg64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg64 <t> x)
+ // result: (Sub64 (Const64 <t> [0]) x)
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValuedec64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x y)
+ // result: (OrB (Neq32 (Int64Hi x) (Int64Hi y)) (Neq32 (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpNeq32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpNeq32, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpOr32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Or32 <typ.UInt32> (Zeromask (Const32 [c])) y)
+ // cond: c == 0
+ // result: y
+ for {
+ if v.Type != typ.UInt32 {
+ break
+ }
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpZeromask {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ y := v_1
+ if !(c == 0) {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Or32 <typ.UInt32> (Zeromask (Const32 [c])) y)
+ // cond: c != 0
+ // result: (Const32 <typ.UInt32> [-1])
+ for {
+ if v.Type != typ.UInt32 {
+ break
+ }
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpZeromask {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if !(c != 0) {
+ continue
+ }
+ v.reset(OpConst32)
+ v.Type = typ.UInt32
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuedec64_OpOr64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Or64 x y)
+ // result: (Int64Make (Or32 <typ.UInt32> (Int64Hi x) (Int64Hi y)) (Or32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16Ux64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh16Ux32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh16Ux32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh16Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh16Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux64 x y)
+ // result: (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh16Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Signmask (SignExt16to32 x))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpSignmask)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh16x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh16x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh16x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // result: (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32Ux64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh32Ux32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh32Ux32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh32Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh32Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 x y)
+ // result: (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh32Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Signmask x)
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpSignmask)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh32x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh32x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh32x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // result: (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 x s)
+ // result: (Int64Make (Rsh32Ux16 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s) (Lsh32x16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (Rsh32Ux16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v8 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v8.AuxInt = int16ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v10.AddArg2(s, v8)
+ v9.AddArg2(v1, v10)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 x s)
+ // result: (Int64Make (Rsh32Ux32 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s) (Lsh32x32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (Rsh32Ux32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v10.AddArg2(s, v8)
+ v9.AddArg2(v1, v10)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const64 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh64Ux32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh64Ux32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh64Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh64Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 x y)
+ // result: (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 x s)
+ // result: (Int64Make (Rsh32Ux8 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s) (Lsh32x8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (Rsh32Ux8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v8 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v8.AuxInt = int8ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v10.AddArg2(s, v8)
+ v9.AddArg2(v1, v10)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 x s)
+ // result: (Int64Make (Rsh32x16 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s) (Lsh32x16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (And32 <typ.UInt32> (Rsh32x16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32]))) (Zeromask (ZeroExt16to32 (Rsh16Ux32 <typ.UInt16> s (Const32 <typ.UInt32> [5])))))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v8 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v8.AuxInt = int16ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v11.AddArg2(s, v8)
+ v10.AddArg2(v1, v11)
+ v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v14 := b.NewValue0(v.Pos, OpRsh16Ux32, typ.UInt16)
+ v15 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v15.AuxInt = int32ToAuxInt(5)
+ v14.AddArg2(s, v15)
+ v13.AddArg(v14)
+ v12.AddArg(v13)
+ v9.AddArg2(v10, v12)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 x s)
+ // result: (Int64Make (Rsh32x32 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s) (Lsh32x32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (And32 <typ.UInt32> (Rsh32x32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32]))) (Zeromask (Rsh32Ux32 <typ.UInt32> s (Const32 <typ.UInt32> [5]))))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v11.AddArg2(s, v8)
+ v10.AddArg2(v1, v11)
+ v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v14 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v14.AuxInt = int32ToAuxInt(5)
+ v13.AddArg2(s, v14)
+ v12.AddArg(v13)
+ v9.AddArg2(v10, v12)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x)))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (Rsh64x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh64x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh64x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh64x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // result: (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 x s)
+ // result: (Int64Make (Rsh32x8 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s) (Lsh32x8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (And32 <typ.UInt32> (Rsh32x8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32]))) (Zeromask (ZeroExt8to32 (Rsh8Ux32 <typ.UInt8> s (Const32 <typ.UInt32> [5])))))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v8 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v8.AuxInt = int8ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v11.AddArg2(s, v8)
+ v10.AddArg2(v1, v11)
+ v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v14 := b.NewValue0(v.Pos, OpRsh8Ux32, typ.UInt8)
+ v15 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v15.AuxInt = int32ToAuxInt(5)
+ v14.AddArg2(s, v15)
+ v13.AddArg(v14)
+ v12.AddArg(v13)
+ v9.AddArg2(v10, v12)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8Ux64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh8Ux32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh8Ux32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh8Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh8Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux64 x y)
+ // result: (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh8Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Signmask (SignExt8to32 x))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpSignmask)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh8x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh8x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh8x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // result: (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpSignExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt16to64 x)
+ // result: (SignExt32to64 (SignExt16to32 x))
+ for {
+ x := v_0
+ v.reset(OpSignExt32to64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpSignExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt32to64 x)
+ // result: (Int64Make (Signmask x) x)
+ for {
+ x := v_0
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValuedec64_OpSignExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt8to64 x)
+ // result: (SignExt32to64 (SignExt8to32 x))
+ for {
+ x := v_0
+ v.reset(OpSignExt32to64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Store {t} dst (Int64Make hi lo) mem)
+ // cond: t.Size() == 8 && !config.BigEndian
+ // result: (Store {hi.Type} (OffPtr <hi.Type.PtrTo()> [4] dst) hi (Store {lo.Type} dst lo mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ mem := v_2
+ if !(t.Size() == 8 && !config.BigEndian) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(hi.Type)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, hi.Type.PtrTo())
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(lo.Type)
+ v1.AddArg3(dst, lo, mem)
+ v.AddArg3(v0, hi, v1)
+ return true
+ }
+ // match: (Store {t} dst (Int64Make hi lo) mem)
+ // cond: t.Size() == 8 && config.BigEndian
+ // result: (Store {lo.Type} (OffPtr <lo.Type.PtrTo()> [4] dst) lo (Store {hi.Type} dst hi mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ mem := v_2
+ if !(t.Size() == 8 && config.BigEndian) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(lo.Type)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, lo.Type.PtrTo())
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(hi.Type)
+ v1.AddArg3(dst, hi, mem)
+ v.AddArg3(v0, lo, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpSub64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Sub64 x y)
+ // result: (Int64Make (Sub32withcarry <typ.Int32> (Int64Hi x) (Int64Hi y) (Select1 <types.TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y)))) (Select0 <typ.UInt32> (Sub32carry (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpSub32withcarry, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpSub32carry, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(y)
+ v4.AddArg2(v5, v6)
+ v3.AddArg(v4)
+ v0.AddArg3(v1, v2, v3)
+ v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v7.AddArg(v4)
+ v.AddArg2(v0, v7)
+ return true
+ }
+}
+func rewriteValuedec64_OpTrunc64to16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Trunc64to16 (Int64Make _ lo))
+ // result: (Trunc32to16 lo)
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpTrunc32to16)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Trunc64to16 x)
+ // result: (Trunc32to16 (Int64Lo x))
+ for {
+ x := v_0
+ v.reset(OpTrunc32to16)
+ v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpTrunc64to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to32 (Int64Make _ lo))
+ // result: lo
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.copyOf(lo)
+ return true
+ }
+ // match: (Trunc64to32 x)
+ // result: (Int64Lo x)
+ for {
+ x := v_0
+ v.reset(OpInt64Lo)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuedec64_OpTrunc64to8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Trunc64to8 (Int64Make _ lo))
+ // result: (Trunc32to8 lo)
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpTrunc32to8)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Trunc64to8 x)
+ // result: (Trunc32to8 (Int64Lo x))
+ for {
+ x := v_0
+ v.reset(OpTrunc32to8)
+ v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpXor64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Xor64 x y)
+ // result: (Int64Make (Xor32 <typ.UInt32> (Int64Hi x) (Int64Hi y)) (Xor32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpXor32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpXor32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpZeroExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt16to64 x)
+ // result: (ZeroExt32to64 (ZeroExt16to32 x))
+ for {
+ x := v_0
+ v.reset(OpZeroExt32to64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpZeroExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt32to64 x)
+ // result: (Int64Make (Const32 <typ.UInt32> [0]) x)
+ for {
+ x := v_0
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValuedec64_OpZeroExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt8to64 x)
+ // result: (ZeroExt32to64 (ZeroExt8to32 x))
+ for {
+ x := v_0
+ v.reset(OpZeroExt32to64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlockdec64(b *Block) bool {
+ switch b.Kind {
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritedecArgs.go b/src/cmd/compile/internal/ssa/rewritedecArgs.go
new file mode 100644
index 0000000..23ff417
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritedecArgs.go
@@ -0,0 +1,247 @@
+// Code generated from gen/decArgs.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+func rewriteValuedecArgs(v *Value) bool {
+ switch v.Op {
+ case OpArg:
+ return rewriteValuedecArgs_OpArg(v)
+ }
+ return false
+}
+func rewriteValuedecArgs_OpArg(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ fe := b.Func.fe
+ typ := &b.Func.Config.Types
+ // match: (Arg {n} [off])
+ // cond: v.Type.IsString()
+ // result: (StringMake (Arg <typ.BytePtr> {n} [off]) (Arg <typ.Int> {n} [off+int32(config.PtrSize)]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(v.Type.IsString()) {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.BytePtr)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.Int)
+ v1.AuxInt = int32ToAuxInt(off + int32(config.PtrSize))
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: v.Type.IsSlice()
+ // result: (SliceMake (Arg <v.Type.Elem().PtrTo()> {n} [off]) (Arg <typ.Int> {n} [off+int32(config.PtrSize)]) (Arg <typ.Int> {n} [off+2*int32(config.PtrSize)]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(v.Type.IsSlice()) {
+ break
+ }
+ v.reset(OpSliceMake)
+ v0 := b.NewValue0(v.Pos, OpArg, v.Type.Elem().PtrTo())
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.Int)
+ v1.AuxInt = int32ToAuxInt(off + int32(config.PtrSize))
+ v1.Aux = symToAux(n)
+ v2 := b.NewValue0(v.Pos, OpArg, typ.Int)
+ v2.AuxInt = int32ToAuxInt(off + 2*int32(config.PtrSize))
+ v2.Aux = symToAux(n)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: v.Type.IsInterface()
+ // result: (IMake (Arg <typ.Uintptr> {n} [off]) (Arg <typ.BytePtr> {n} [off+int32(config.PtrSize)]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(v.Type.IsInterface()) {
+ break
+ }
+ v.reset(OpIMake)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.Uintptr)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.BytePtr)
+ v1.AuxInt = int32ToAuxInt(off + int32(config.PtrSize))
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: v.Type.IsComplex() && v.Type.Size() == 16
+ // result: (ComplexMake (Arg <typ.Float64> {n} [off]) (Arg <typ.Float64> {n} [off+8]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(v.Type.IsComplex() && v.Type.Size() == 16) {
+ break
+ }
+ v.reset(OpComplexMake)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.Float64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.Float64)
+ v1.AuxInt = int32ToAuxInt(off + 8)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: v.Type.IsComplex() && v.Type.Size() == 8
+ // result: (ComplexMake (Arg <typ.Float32> {n} [off]) (Arg <typ.Float32> {n} [off+4]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(v.Type.IsComplex() && v.Type.Size() == 8) {
+ break
+ }
+ v.reset(OpComplexMake)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.Float32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.Float32)
+ v1.AuxInt = int32ToAuxInt(off + 4)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg <t>)
+ // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)
+ // result: (StructMake0)
+ for {
+ t := v.Type
+ if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake0)
+ return true
+ }
+ // match: (Arg <t> {n} [off])
+ // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)
+ // result: (StructMake1 (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake1)
+ v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
+ v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0)))
+ v0.Aux = symToAux(n)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Arg <t> {n} [off])
+ // cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)
+ // result: (StructMake2 (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]) (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))]))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake2)
+ v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
+ v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0)))
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1))
+ v1.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(1)))
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg <t> {n} [off])
+ // cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)
+ // result: (StructMake3 (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]) (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))]) (Arg <t.FieldType(2)> {n} [off+int32(t.FieldOff(2))]))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake3)
+ v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
+ v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0)))
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1))
+ v1.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(1)))
+ v1.Aux = symToAux(n)
+ v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2))
+ v2.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(2)))
+ v2.Aux = symToAux(n)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Arg <t> {n} [off])
+ // cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)
+ // result: (StructMake4 (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]) (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))]) (Arg <t.FieldType(2)> {n} [off+int32(t.FieldOff(2))]) (Arg <t.FieldType(3)> {n} [off+int32(t.FieldOff(3))]))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake4)
+ v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
+ v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0)))
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1))
+ v1.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(1)))
+ v1.Aux = symToAux(n)
+ v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2))
+ v2.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(2)))
+ v2.Aux = symToAux(n)
+ v3 := b.NewValue0(v.Pos, OpArg, t.FieldType(3))
+ v3.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(3)))
+ v3.Aux = symToAux(n)
+ v.AddArg4(v0, v1, v2, v3)
+ return true
+ }
+ // match: (Arg <t>)
+ // cond: t.IsArray() && t.NumElem() == 0
+ // result: (ArrayMake0)
+ for {
+ t := v.Type
+ if !(t.IsArray() && t.NumElem() == 0) {
+ break
+ }
+ v.reset(OpArrayMake0)
+ return true
+ }
+ // match: (Arg <t> {n} [off])
+ // cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)
+ // result: (ArrayMake1 (Arg <t.Elem()> {n} [off]))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpArrayMake1)
+ v0 := b.NewValue0(v.Pos, OpArg, t.Elem())
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(n)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteBlockdecArgs(b *Block) bool {
+ switch b.Kind {
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
new file mode 100644
index 0000000..958e24d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -0,0 +1,25091 @@
+// Code generated from gen/generic.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValuegeneric(v *Value) bool {
+ switch v.Op {
+ case OpAdd16:
+ return rewriteValuegeneric_OpAdd16(v)
+ case OpAdd32:
+ return rewriteValuegeneric_OpAdd32(v)
+ case OpAdd32F:
+ return rewriteValuegeneric_OpAdd32F(v)
+ case OpAdd64:
+ return rewriteValuegeneric_OpAdd64(v)
+ case OpAdd64F:
+ return rewriteValuegeneric_OpAdd64F(v)
+ case OpAdd8:
+ return rewriteValuegeneric_OpAdd8(v)
+ case OpAddPtr:
+ return rewriteValuegeneric_OpAddPtr(v)
+ case OpAnd16:
+ return rewriteValuegeneric_OpAnd16(v)
+ case OpAnd32:
+ return rewriteValuegeneric_OpAnd32(v)
+ case OpAnd64:
+ return rewriteValuegeneric_OpAnd64(v)
+ case OpAnd8:
+ return rewriteValuegeneric_OpAnd8(v)
+ case OpAndB:
+ return rewriteValuegeneric_OpAndB(v)
+ case OpArraySelect:
+ return rewriteValuegeneric_OpArraySelect(v)
+ case OpCom16:
+ return rewriteValuegeneric_OpCom16(v)
+ case OpCom32:
+ return rewriteValuegeneric_OpCom32(v)
+ case OpCom64:
+ return rewriteValuegeneric_OpCom64(v)
+ case OpCom8:
+ return rewriteValuegeneric_OpCom8(v)
+ case OpConstInterface:
+ return rewriteValuegeneric_OpConstInterface(v)
+ case OpConstSlice:
+ return rewriteValuegeneric_OpConstSlice(v)
+ case OpConstString:
+ return rewriteValuegeneric_OpConstString(v)
+ case OpConvert:
+ return rewriteValuegeneric_OpConvert(v)
+ case OpCtz16:
+ return rewriteValuegeneric_OpCtz16(v)
+ case OpCtz32:
+ return rewriteValuegeneric_OpCtz32(v)
+ case OpCtz64:
+ return rewriteValuegeneric_OpCtz64(v)
+ case OpCtz8:
+ return rewriteValuegeneric_OpCtz8(v)
+ case OpCvt32Fto32:
+ return rewriteValuegeneric_OpCvt32Fto32(v)
+ case OpCvt32Fto64:
+ return rewriteValuegeneric_OpCvt32Fto64(v)
+ case OpCvt32Fto64F:
+ return rewriteValuegeneric_OpCvt32Fto64F(v)
+ case OpCvt32to32F:
+ return rewriteValuegeneric_OpCvt32to32F(v)
+ case OpCvt32to64F:
+ return rewriteValuegeneric_OpCvt32to64F(v)
+ case OpCvt64Fto32:
+ return rewriteValuegeneric_OpCvt64Fto32(v)
+ case OpCvt64Fto32F:
+ return rewriteValuegeneric_OpCvt64Fto32F(v)
+ case OpCvt64Fto64:
+ return rewriteValuegeneric_OpCvt64Fto64(v)
+ case OpCvt64to32F:
+ return rewriteValuegeneric_OpCvt64to32F(v)
+ case OpCvt64to64F:
+ return rewriteValuegeneric_OpCvt64to64F(v)
+ case OpCvtBoolToUint8:
+ return rewriteValuegeneric_OpCvtBoolToUint8(v)
+ case OpDiv16:
+ return rewriteValuegeneric_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValuegeneric_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValuegeneric_OpDiv32(v)
+ case OpDiv32F:
+ return rewriteValuegeneric_OpDiv32F(v)
+ case OpDiv32u:
+ return rewriteValuegeneric_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValuegeneric_OpDiv64(v)
+ case OpDiv64F:
+ return rewriteValuegeneric_OpDiv64F(v)
+ case OpDiv64u:
+ return rewriteValuegeneric_OpDiv64u(v)
+ case OpDiv8:
+ return rewriteValuegeneric_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValuegeneric_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValuegeneric_OpEq16(v)
+ case OpEq32:
+ return rewriteValuegeneric_OpEq32(v)
+ case OpEq32F:
+ return rewriteValuegeneric_OpEq32F(v)
+ case OpEq64:
+ return rewriteValuegeneric_OpEq64(v)
+ case OpEq64F:
+ return rewriteValuegeneric_OpEq64F(v)
+ case OpEq8:
+ return rewriteValuegeneric_OpEq8(v)
+ case OpEqB:
+ return rewriteValuegeneric_OpEqB(v)
+ case OpEqInter:
+ return rewriteValuegeneric_OpEqInter(v)
+ case OpEqPtr:
+ return rewriteValuegeneric_OpEqPtr(v)
+ case OpEqSlice:
+ return rewriteValuegeneric_OpEqSlice(v)
+ case OpIMake:
+ return rewriteValuegeneric_OpIMake(v)
+ case OpInterCall:
+ return rewriteValuegeneric_OpInterCall(v)
+ case OpInterLECall:
+ return rewriteValuegeneric_OpInterLECall(v)
+ case OpIsInBounds:
+ return rewriteValuegeneric_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValuegeneric_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValuegeneric_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValuegeneric_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValuegeneric_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValuegeneric_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValuegeneric_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValuegeneric_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValuegeneric_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValuegeneric_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValuegeneric_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValuegeneric_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValuegeneric_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValuegeneric_OpLess16(v)
+ case OpLess16U:
+ return rewriteValuegeneric_OpLess16U(v)
+ case OpLess32:
+ return rewriteValuegeneric_OpLess32(v)
+ case OpLess32F:
+ return rewriteValuegeneric_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValuegeneric_OpLess32U(v)
+ case OpLess64:
+ return rewriteValuegeneric_OpLess64(v)
+ case OpLess64F:
+ return rewriteValuegeneric_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValuegeneric_OpLess64U(v)
+ case OpLess8:
+ return rewriteValuegeneric_OpLess8(v)
+ case OpLess8U:
+ return rewriteValuegeneric_OpLess8U(v)
+ case OpLoad:
+ return rewriteValuegeneric_OpLoad(v)
+ case OpLsh16x16:
+ return rewriteValuegeneric_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValuegeneric_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValuegeneric_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValuegeneric_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValuegeneric_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValuegeneric_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValuegeneric_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValuegeneric_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValuegeneric_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValuegeneric_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValuegeneric_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValuegeneric_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValuegeneric_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValuegeneric_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValuegeneric_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValuegeneric_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValuegeneric_OpMod16(v)
+ case OpMod16u:
+ return rewriteValuegeneric_OpMod16u(v)
+ case OpMod32:
+ return rewriteValuegeneric_OpMod32(v)
+ case OpMod32u:
+ return rewriteValuegeneric_OpMod32u(v)
+ case OpMod64:
+ return rewriteValuegeneric_OpMod64(v)
+ case OpMod64u:
+ return rewriteValuegeneric_OpMod64u(v)
+ case OpMod8:
+ return rewriteValuegeneric_OpMod8(v)
+ case OpMod8u:
+ return rewriteValuegeneric_OpMod8u(v)
+ case OpMove:
+ return rewriteValuegeneric_OpMove(v)
+ case OpMul16:
+ return rewriteValuegeneric_OpMul16(v)
+ case OpMul32:
+ return rewriteValuegeneric_OpMul32(v)
+ case OpMul32F:
+ return rewriteValuegeneric_OpMul32F(v)
+ case OpMul64:
+ return rewriteValuegeneric_OpMul64(v)
+ case OpMul64F:
+ return rewriteValuegeneric_OpMul64F(v)
+ case OpMul8:
+ return rewriteValuegeneric_OpMul8(v)
+ case OpNeg16:
+ return rewriteValuegeneric_OpNeg16(v)
+ case OpNeg32:
+ return rewriteValuegeneric_OpNeg32(v)
+ case OpNeg32F:
+ return rewriteValuegeneric_OpNeg32F(v)
+ case OpNeg64:
+ return rewriteValuegeneric_OpNeg64(v)
+ case OpNeg64F:
+ return rewriteValuegeneric_OpNeg64F(v)
+ case OpNeg8:
+ return rewriteValuegeneric_OpNeg8(v)
+ case OpNeq16:
+ return rewriteValuegeneric_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValuegeneric_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValuegeneric_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValuegeneric_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValuegeneric_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValuegeneric_OpNeq8(v)
+ case OpNeqB:
+ return rewriteValuegeneric_OpNeqB(v)
+ case OpNeqInter:
+ return rewriteValuegeneric_OpNeqInter(v)
+ case OpNeqPtr:
+ return rewriteValuegeneric_OpNeqPtr(v)
+ case OpNeqSlice:
+ return rewriteValuegeneric_OpNeqSlice(v)
+ case OpNilCheck:
+ return rewriteValuegeneric_OpNilCheck(v)
+ case OpNot:
+ return rewriteValuegeneric_OpNot(v)
+ case OpOffPtr:
+ return rewriteValuegeneric_OpOffPtr(v)
+ case OpOr16:
+ return rewriteValuegeneric_OpOr16(v)
+ case OpOr32:
+ return rewriteValuegeneric_OpOr32(v)
+ case OpOr64:
+ return rewriteValuegeneric_OpOr64(v)
+ case OpOr8:
+ return rewriteValuegeneric_OpOr8(v)
+ case OpOrB:
+ return rewriteValuegeneric_OpOrB(v)
+ case OpPhi:
+ return rewriteValuegeneric_OpPhi(v)
+ case OpPtrIndex:
+ return rewriteValuegeneric_OpPtrIndex(v)
+ case OpRotateLeft16:
+ return rewriteValuegeneric_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValuegeneric_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValuegeneric_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValuegeneric_OpRotateLeft8(v)
+ case OpRound32F:
+ return rewriteValuegeneric_OpRound32F(v)
+ case OpRound64F:
+ return rewriteValuegeneric_OpRound64F(v)
+ case OpRsh16Ux16:
+ return rewriteValuegeneric_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValuegeneric_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValuegeneric_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValuegeneric_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValuegeneric_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValuegeneric_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValuegeneric_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValuegeneric_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValuegeneric_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValuegeneric_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValuegeneric_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValuegeneric_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValuegeneric_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValuegeneric_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValuegeneric_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValuegeneric_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValuegeneric_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValuegeneric_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValuegeneric_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValuegeneric_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValuegeneric_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValuegeneric_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValuegeneric_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValuegeneric_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValuegeneric_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValuegeneric_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValuegeneric_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValuegeneric_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValuegeneric_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValuegeneric_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValuegeneric_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValuegeneric_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValuegeneric_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValuegeneric_OpSelect1(v)
+ case OpSelectN:
+ return rewriteValuegeneric_OpSelectN(v)
+ case OpSignExt16to32:
+ return rewriteValuegeneric_OpSignExt16to32(v)
+ case OpSignExt16to64:
+ return rewriteValuegeneric_OpSignExt16to64(v)
+ case OpSignExt32to64:
+ return rewriteValuegeneric_OpSignExt32to64(v)
+ case OpSignExt8to16:
+ return rewriteValuegeneric_OpSignExt8to16(v)
+ case OpSignExt8to32:
+ return rewriteValuegeneric_OpSignExt8to32(v)
+ case OpSignExt8to64:
+ return rewriteValuegeneric_OpSignExt8to64(v)
+ case OpSliceCap:
+ return rewriteValuegeneric_OpSliceCap(v)
+ case OpSliceLen:
+ return rewriteValuegeneric_OpSliceLen(v)
+ case OpSlicePtr:
+ return rewriteValuegeneric_OpSlicePtr(v)
+ case OpSlicemask:
+ return rewriteValuegeneric_OpSlicemask(v)
+ case OpSqrt:
+ return rewriteValuegeneric_OpSqrt(v)
+ case OpStaticCall:
+ return rewriteValuegeneric_OpStaticCall(v)
+ case OpStaticLECall:
+ return rewriteValuegeneric_OpStaticLECall(v)
+ case OpStore:
+ return rewriteValuegeneric_OpStore(v)
+ case OpStringLen:
+ return rewriteValuegeneric_OpStringLen(v)
+ case OpStringPtr:
+ return rewriteValuegeneric_OpStringPtr(v)
+ case OpStructSelect:
+ return rewriteValuegeneric_OpStructSelect(v)
+ case OpSub16:
+ return rewriteValuegeneric_OpSub16(v)
+ case OpSub32:
+ return rewriteValuegeneric_OpSub32(v)
+ case OpSub32F:
+ return rewriteValuegeneric_OpSub32F(v)
+ case OpSub64:
+ return rewriteValuegeneric_OpSub64(v)
+ case OpSub64F:
+ return rewriteValuegeneric_OpSub64F(v)
+ case OpSub8:
+ return rewriteValuegeneric_OpSub8(v)
+ case OpTrunc16to8:
+ return rewriteValuegeneric_OpTrunc16to8(v)
+ case OpTrunc32to16:
+ return rewriteValuegeneric_OpTrunc32to16(v)
+ case OpTrunc32to8:
+ return rewriteValuegeneric_OpTrunc32to8(v)
+ case OpTrunc64to16:
+ return rewriteValuegeneric_OpTrunc64to16(v)
+ case OpTrunc64to32:
+ return rewriteValuegeneric_OpTrunc64to32(v)
+ case OpTrunc64to8:
+ return rewriteValuegeneric_OpTrunc64to8(v)
+ case OpXor16:
+ return rewriteValuegeneric_OpXor16(v)
+ case OpXor32:
+ return rewriteValuegeneric_OpXor32(v)
+ case OpXor64:
+ return rewriteValuegeneric_OpXor64(v)
+ case OpXor8:
+ return rewriteValuegeneric_OpXor8(v)
+ case OpZero:
+ return rewriteValuegeneric_OpZero(v)
+ case OpZeroExt16to32:
+ return rewriteValuegeneric_OpZeroExt16to32(v)
+ case OpZeroExt16to64:
+ return rewriteValuegeneric_OpZeroExt16to64(v)
+ case OpZeroExt32to64:
+ return rewriteValuegeneric_OpZeroExt32to64(v)
+ case OpZeroExt8to16:
+ return rewriteValuegeneric_OpZeroExt8to16(v)
+ case OpZeroExt8to32:
+ return rewriteValuegeneric_OpZeroExt8to32(v)
+ case OpZeroExt8to64:
+ return rewriteValuegeneric_OpZeroExt8to64(v)
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Add16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add16 <t> (Mul16 x y) (Mul16 x z))
+ // result: (Mul16 x (Add16 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul16)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Add16 (Const16 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Add16 (Const16 [1]) (Com16 x))
+ // result: (Neg16 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 1 || v_1.Op != OpCom16 {
+ continue
+ }
+ x := v_1.Args[0]
+ v.reset(OpNeg16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Add16 (Add16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Add16 i (Add16 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAdd16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add16 (Sub16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Add16 i (Sub16 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub16 {
+ continue
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpSub16, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Add16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
+ // result: (Add16 (Const16 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x))
+ // result: (Sub16 (Const16 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpSub16 {
+ continue
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Add32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add32 <t> (Mul32 x y) (Mul32 x z))
+ // result: (Mul32 x (Add32 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Add32 (Const32 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Add32 (Const32 [1]) (Com32 x))
+ // result: (Neg32 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 1 || v_1.Op != OpCom32 {
+ continue
+ }
+ x := v_1.Args[0]
+ v.reset(OpNeg32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Add32 (Add32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Add32 i (Add32 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAdd32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add32 (Sub32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Add32 i (Sub32 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub32 {
+ continue
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpSub32, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Add32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
+ // result: (Add32 (Const32 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x))
+ // result: (Sub32 (Const32 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpSub32 {
+ continue
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Add32F (Const32F [c]) (Const32F [d]))
+ // cond: c+d == c+d
+ // result: (Const32F [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32F {
+ continue
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ continue
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ if !(c+d == c+d) {
+ continue
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Add64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add64 <t> (Mul64 x y) (Mul64 x z))
+ // result: (Mul64 x (Add64 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Add64 (Const64 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Add64 (Const64 [1]) (Com64 x))
+ // result: (Neg64 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 1 || v_1.Op != OpCom64 {
+ continue
+ }
+ x := v_1.Args[0]
+ v.reset(OpNeg64)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Add64 (Add64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Add64 i (Add64 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAdd64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add64 (Sub64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Add64 i (Sub64 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub64 {
+ continue
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpSub64, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Add64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
+ // result: (Add64 (Const64 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x))
+ // result: (Sub64 (Const64 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpSub64 {
+ continue
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Add64F (Const64F [c]) (Const64F [d]))
+ // cond: c+d == c+d
+ // result: (Const64F [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64F {
+ continue
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ continue
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ if !(c+d == c+d) {
+ continue
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Add8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add8 <t> (Mul8 x y) (Mul8 x z))
+ // result: (Mul8 x (Add8 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul8)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Add8 (Const8 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Add8 (Const8 [1]) (Com8 x))
+ // result: (Neg8 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 1 || v_1.Op != OpCom8 {
+ continue
+ }
+ x := v_1.Args[0]
+ v.reset(OpNeg8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Add8 (Add8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Add8 i (Add8 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAdd8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add8 (Sub8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Add8 i (Sub8 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub8 {
+ continue
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpSub8, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Add8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
+ // result: (Add8 (Const8 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x))
+ // result: (Sub8 (Const8 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpSub8 {
+ continue
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAddPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AddPtr <t> x (Const64 [c]))
+ // result: (OffPtr <t> x [c])
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOffPtr)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (AddPtr <t> x (Const32 [c]))
+ // result: (OffPtr <t> x [int64(c)])
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOffPtr)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(int64(c))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpAnd16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (And16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c])))
+ // cond: c >= int64(16-ntz16(m))
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ m := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(16-ntz16(m))) {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c])))
+ // cond: c >= int64(16-nlz16(m))
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ m := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(16-nlz16(m))) {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And16 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (And16 (Const16 [-1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (And16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And16 x (And16 x y))
+ // result: (And16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAnd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAnd16)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And16 (And16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (And16 i (And16 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And16 (Const16 <t> [c]) (And16 (Const16 <t> [d]) x))
+ // result: (And16 (Const16 <t> [c&d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAnd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c & d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAnd32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (And32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (And32 (Const32 [m]) (Rsh32Ux64 _ (Const64 [c])))
+ // cond: c >= int64(32-ntz32(m))
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(32-ntz32(m))) {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And32 (Const32 [m]) (Lsh32x64 _ (Const64 [c])))
+ // cond: c >= int64(32-nlz32(m))
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(32-nlz32(m))) {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And32 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (And32 (Const32 [-1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (And32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And32 x (And32 x y))
+ // result: (And32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAnd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAnd32)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And32 (And32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (And32 i (And32 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And32 (Const32 <t> [c]) (And32 (Const32 <t> [d]) x))
+ // result: (And32 (Const32 <t> [c&d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAnd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAnd64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (And64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (And64 (Const64 [m]) (Rsh64Ux64 _ (Const64 [c])))
+ // cond: c >= int64(64-ntz64(m))
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(64-ntz64(m))) {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And64 (Const64 [m]) (Lsh64x64 _ (Const64 [c])))
+ // cond: c >= int64(64-nlz64(m))
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(64-nlz64(m))) {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And64 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (And64 (Const64 [-1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (And64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And64 x (And64 x y))
+ // result: (And64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAnd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAnd64)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And64 (And64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (And64 i (And64 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And64 (Const64 <t> [c]) (And64 (Const64 <t> [d]) x))
+ // result: (And64 (Const64 <t> [c&d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAnd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAnd8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (And8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (And8 (Const8 [m]) (Rsh8Ux64 _ (Const64 [c])))
+ // cond: c >= int64(8-ntz8(m))
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ m := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpRsh8Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(8-ntz8(m))) {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And8 (Const8 [m]) (Lsh8x64 _ (Const64 [c])))
+ // cond: c >= int64(8-nlz8(m))
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ m := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpLsh8x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(8-nlz8(m))) {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And8 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (And8 (Const8 [-1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (And8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And8 x (And8 x y))
+ // result: (And8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAnd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAnd8)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And8 (And8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (And8 i (And8 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And8 (Const8 <t> [c]) (And8 (Const8 <t> [d]) x))
+ // result: (And8 (Const8 <t> [c&d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAnd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c & d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAndB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AndB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: d >= c
+ // result: (Less64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: d >= c
+ // result: (Leq64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: d >= c
+ // result: (Less32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: d >= c
+ // result: (Leq32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: d >= c
+ // result: (Less16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: d >= c
+ // result: (Leq16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: d >= c
+ // result: (Less8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: d >= c
+ // result: (Leq8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Less64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Leq64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Less32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Leq32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Less16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Leq16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Less8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Leq8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(d) >= uint64(c)
+ // result: (Less64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(d) >= uint64(c)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(d) >= uint64(c)
+ // result: (Leq64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(d) >= uint64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(d) >= uint32(c)
+ // result: (Less32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(d) >= uint32(c)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(d) >= uint32(c)
+ // result: (Leq32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(d) >= uint32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(d) >= uint16(c)
+ // result: (Less16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(d) >= uint16(c)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(d) >= uint16(c)
+ // result: (Leq16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(d) >= uint16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(d) >= uint8(c)
+ // result: (Less8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(d) >= uint8(c)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(d) >= uint8(c)
+ // result: (Leq8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(d) >= uint8(c)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)
+ // result: (Less64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)
+ // result: (Leq64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)
+ // result: (Less32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)
+ // result: (Leq32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)
+ // result: (Less16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)
+ // result: (Leq16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)
+ // result: (Less8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)
+ // result: (Leq8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpArraySelect(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ArraySelect (ArrayMake1 x))
+ // result: x
+ for {
+ if v_0.Op != OpArrayMake1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (ArraySelect [0] (IData x))
+ // result: (IData x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpIData)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com16 (Com16 x))
+ // result: x
+ for {
+ if v_0.Op != OpCom16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Com16 (Const16 [c]))
+ // result: (Const16 [^c])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(^c)
+ return true
+ }
+ // match: (Com16 (Add16 (Const16 [-1]) x))
+ // result: (Neg16 x)
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst16 || auxIntToInt16(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpNeg16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com32 (Com32 x))
+ // result: x
+ for {
+ if v_0.Op != OpCom32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Com32 (Const32 [c]))
+ // result: (Const32 [^c])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(^c)
+ return true
+ }
+ // match: (Com32 (Add32 (Const32 [-1]) x))
+ // result: (Neg32 x)
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 || auxIntToInt32(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpNeg32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com64 (Com64 x))
+ // result: x
+ for {
+ if v_0.Op != OpCom64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Com64 (Const64 [c]))
+ // result: (Const64 [^c])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(^c)
+ return true
+ }
+ // match: (Com64 (Add64 (Const64 [-1]) x))
+ // result: (Neg64 x)
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpNeg64)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com8 (Com8 x))
+ // result: x
+ for {
+ if v_0.Op != OpCom8 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Com8 (Const8 [c]))
+ // result: (Const8 [^c])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(^c)
+ return true
+ }
+ // match: (Com8 (Add8 (Const8 [-1]) x))
+ // result: (Neg8 x)
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst8 || auxIntToInt8(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpNeg8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpConstInterface(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ConstInterface)
+ // result: (IMake (ConstNil <typ.Uintptr>) (ConstNil <typ.BytePtr>))
+ for {
+ v.reset(OpIMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, typ.Uintptr)
+ v1 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpConstSlice(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (ConstSlice)
+ // cond: config.PtrSize == 4
+ // result: (SliceMake (ConstNil <v.Type.Elem().PtrTo()>) (Const32 <typ.Int> [0]) (Const32 <typ.Int> [0]))
+ for {
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpSliceMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.Elem().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(v0, v1, v1)
+ return true
+ }
+ // match: (ConstSlice)
+ // cond: config.PtrSize == 8
+ // result: (SliceMake (ConstNil <v.Type.Elem().PtrTo()>) (Const64 <typ.Int> [0]) (Const64 <typ.Int> [0]))
+ for {
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpSliceMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.Elem().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(v0, v1, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpConstString(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ fe := b.Func.fe
+ typ := &b.Func.Config.Types
+ // match: (ConstString {str})
+ // cond: config.PtrSize == 4 && str == ""
+ // result: (StringMake (ConstNil) (Const32 <typ.Int> [0]))
+ for {
+ str := auxToString(v.Aux)
+ if !(config.PtrSize == 4 && str == "") {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (ConstString {str})
+ // cond: config.PtrSize == 8 && str == ""
+ // result: (StringMake (ConstNil) (Const64 <typ.Int> [0]))
+ for {
+ str := auxToString(v.Aux)
+ if !(config.PtrSize == 8 && str == "") {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (ConstString {str})
+ // cond: config.PtrSize == 4 && str != ""
+ // result: (StringMake (Addr <typ.BytePtr> {fe.StringData(str)} (SB)) (Const32 <typ.Int> [int32(len(str))]))
+ for {
+ str := auxToString(v.Aux)
+ if !(config.PtrSize == 4 && str != "") {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpAddr, typ.BytePtr)
+ v0.Aux = symToAux(fe.StringData(str))
+ v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int)
+ v2.AuxInt = int32ToAuxInt(int32(len(str)))
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (ConstString {str})
+ // cond: config.PtrSize == 8 && str != ""
+ // result: (StringMake (Addr <typ.BytePtr> {fe.StringData(str)} (SB)) (Const64 <typ.Int> [int64(len(str))]))
+ for {
+ str := auxToString(v.Aux)
+ if !(config.PtrSize == 8 && str != "") {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpAddr, typ.BytePtr)
+ v0.Aux = symToAux(fe.StringData(str))
+ v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.Int)
+ v2.AuxInt = int64ToAuxInt(int64(len(str)))
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpConvert(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Convert (Add64 (Convert ptr mem) off) mem)
+ // result: (AddPtr ptr off)
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConvert {
+ continue
+ }
+ mem := v_0_0.Args[1]
+ ptr := v_0_0.Args[0]
+ off := v_0_1
+ if mem != v_1 {
+ continue
+ }
+ v.reset(OpAddPtr)
+ v.AddArg2(ptr, off)
+ return true
+ }
+ break
+ }
+ // match: (Convert (Add32 (Convert ptr mem) off) mem)
+ // result: (AddPtr ptr off)
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConvert {
+ continue
+ }
+ mem := v_0_0.Args[1]
+ ptr := v_0_0.Args[0]
+ off := v_0_1
+ if mem != v_1 {
+ continue
+ }
+ v.reset(OpAddPtr)
+ v.AddArg2(ptr, off)
+ return true
+ }
+ break
+ }
+ // match: (Convert (Convert ptr mem) mem)
+ // result: ptr
+ for {
+ if v_0.Op != OpConvert {
+ break
+ }
+ mem := v_0.Args[1]
+ ptr := v_0.Args[0]
+ if mem != v_1 {
+ break
+ }
+ v.copyOf(ptr)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Ctz16 (Const16 [c]))
+ // cond: config.PtrSize == 4
+ // result: (Const32 [int32(ntz16(c))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(ntz16(c)))
+ return true
+ }
+ // match: (Ctz16 (Const16 [c]))
+ // cond: config.PtrSize == 8
+ // result: (Const64 [int64(ntz16(c))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(ntz16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Ctz32 (Const32 [c]))
+ // cond: config.PtrSize == 4
+ // result: (Const32 [int32(ntz32(c))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(ntz32(c)))
+ return true
+ }
+ // match: (Ctz32 (Const32 [c]))
+ // cond: config.PtrSize == 8
+ // result: (Const64 [int64(ntz32(c))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(ntz32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Ctz64 (Const64 [c]))
+ // cond: config.PtrSize == 4
+ // result: (Const32 [int32(ntz64(c))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(ntz64(c)))
+ return true
+ }
+ // match: (Ctz64 (Const64 [c]))
+ // cond: config.PtrSize == 8
+ // result: (Const64 [int64(ntz64(c))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Ctz8 (Const8 [c]))
+ // cond: config.PtrSize == 4
+ // result: (Const32 [int32(ntz8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(ntz8(c)))
+ return true
+ }
+ // match: (Ctz8 (Const8 [c]))
+ // cond: config.PtrSize == 8
+ // result: (Const64 [int64(ntz8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(ntz8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32Fto32 (Const32F [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32Fto64 (Const32F [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32Fto64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32Fto64F (Const32F [c]))
+ // result: (Const64F [float64(c)])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(float64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32to32F (Const32 [c]))
+ // result: (Const32F [float32(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(float32(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32to64F (Const32 [c]))
+ // result: (Const64F [float64(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(float64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64Fto32 (Const64F [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64Fto32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64Fto32F (Const64F [c]))
+ // result: (Const32F [float32(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(float32(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64Fto64 (Const64F [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64to32F (Const64 [c]))
+ // result: (Const32F [float32(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(float32(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64to64F (Const64 [c]))
+ // result: (Const64F [float64(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(float64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvtBoolToUint8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CvtBoolToUint8 (ConstBool [false]))
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != false {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (CvtBoolToUint8 (ConstBool [true]))
+ // result: (Const8 [1])
+ for {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != true {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 (Const16 [c]) (Const16 [d]))
+ // cond: d != 0
+ // result: (Const16 [c/d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div16 n (Const16 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo16(c)
+ // result: (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log16(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div16 <t> n (Const16 [c]))
+ // cond: c < 0 && c != -1<<15
+ // result: (Neg16 (Div16 <t> n (Const16 <t> [-c])))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(c < 0 && c != -1<<15) {
+ break
+ }
+ v.reset(OpNeg16)
+ v0 := b.NewValue0(v.Pos, OpDiv16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(-c)
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div16 <t> x (Const16 [-1<<15]))
+ // result: (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <typ.UInt64> [15]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != -1<<15 {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v1.AddArg(x)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(15)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div16 <t> n (Const16 [c]))
+ // cond: isPowerOfTwo16(c)
+ // result: (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [int64(16-log16(c))]))) (Const64 <typ.UInt64> [int64(log16(c))]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v1 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v2 := b.NewValue0(v.Pos, OpRsh16x64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(15)
+ v2.AddArg2(n, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(16 - log16(c)))
+ v1.AddArg2(v2, v4)
+ v0.AddArg2(n, v1)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(int64(log16(c)))
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div16 <t> x (Const16 [c]))
+ // cond: smagicOK16(c)
+ // result: (Sub16 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(smagic16(c).m)]) (SignExt16to32 x)) (Const64 <typ.UInt64> [16+smagic16(c).s])) (Rsh32x64 <t> (SignExt16to32 x) (Const64 <typ.UInt64> [31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(smagicOK16(c)) {
+ break
+ }
+ v.reset(OpSub16)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(smagic16(c).m))
+ v3 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(16 + smagic16(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(31)
+ v5.AddArg2(v3, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div16u (Const16 [c]) (Const16 [d]))
+ // cond: d != 0
+ // result: (Const16 [int16(uint16(c)/uint16(d))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(uint16(c) / uint16(d)))
+ return true
+ }
+ // match: (Div16u n (Const16 [c]))
+ // cond: isPowerOfTwo16(c)
+ // result: (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log16(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div16u x (Const16 [c]))
+ // cond: umagicOK16(c) && config.RegSize == 8
+ // result: (Trunc64to16 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<16+umagic16(c).m)]) (ZeroExt16to64 x)) (Const64 <typ.UInt64> [16+umagic16(c).s])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 8) {
+ break
+ }
+ v.reset(OpTrunc64to16)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(1<<16 + umagic16(c).m))
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s)
+ v0.AddArg2(v1, v4)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div16u x (Const16 [c]))
+ // cond: umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<15+umagic16(c).m/2)]) (ZeroExt16to32 x)) (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0) {
+ break
+ }
+ v.reset(OpTrunc32to16)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(1<<15 + umagic16(c).m/2))
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1)
+ v0.AddArg2(v1, v4)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div16u x (Const16 [c]))
+ // cond: umagicOK16(c) && config.RegSize == 4 && c&1 == 0
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<15+(umagic16(c).m+1)/2)]) (Rsh32Ux64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [16+umagic16(c).s-2])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 4 && c&1 == 0) {
+ break
+ }
+ v.reset(OpTrunc32to16)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(1<<15 + (umagic16(c).m+1)/2))
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(1)
+ v3.AddArg2(v4, v5)
+ v1.AddArg2(v2, v3)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 2)
+ v0.AddArg2(v1, v6)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div16u x (Const16 [c]))
+ // cond: umagicOK16(c) && config.RegSize == 4 && config.useAvg
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Avg32u (Lsh32x64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [16])) (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(umagic16(c).m)]) (ZeroExt16to32 x))) (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 4 && config.useAvg) {
+ break
+ }
+ v.reset(OpTrunc32to16)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x64, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v2.AddArg2(v3, v4)
+ v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(int32(umagic16(c).m))
+ v5.AddArg2(v6, v3)
+ v1.AddArg2(v2, v5)
+ v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v7.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1)
+ v0.AddArg2(v1, v7)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div32 (Const32 [c]) (Const32 [d]))
+ // cond: d != 0
+ // result: (Const32 [c/d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div32 n (Const32 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo32(c)
+ // result: (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log32(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div32 <t> n (Const32 [c]))
+ // cond: c < 0 && c != -1<<31
+ // result: (Neg32 (Div32 <t> n (Const32 <t> [-c])))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c < 0 && c != -1<<31) {
+ break
+ }
+ v.reset(OpNeg32)
+ v0 := b.NewValue0(v.Pos, OpDiv32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(-c)
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div32 <t> x (Const32 [-1<<31]))
+ // result: (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <typ.UInt64> [31]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != -1<<31 {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v1.AddArg(x)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(31)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div32 <t> n (Const32 [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [int64(32-log32(c))]))) (Const64 <typ.UInt64> [int64(log32(c))]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v1 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
+ v2 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(31)
+ v2.AddArg2(n, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(32 - log32(c)))
+ v1.AddArg2(v2, v4)
+ v0.AddArg2(n, v1)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(int64(log32(c)))
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div32 <t> x (Const32 [c]))
+ // cond: smagicOK32(c) && config.RegSize == 8
+ // result: (Sub32 <t> (Rsh64x64 <t> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(smagic32(c).m)]) (SignExt32to64 x)) (Const64 <typ.UInt64> [32+smagic32(c).s])) (Rsh64x64 <t> (SignExt32to64 x) (Const64 <typ.UInt64> [63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(smagicOK32(c) && config.RegSize == 8) {
+ break
+ }
+ v.reset(OpSub32)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(smagic32(c).m))
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(32 + smagic32(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(63)
+ v5.AddArg2(v3, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div32 <t> x (Const32 [c]))
+ // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul
+ // result: (Sub32 <t> (Rsh32x64 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int32(smagic32(c).m/2)]) x) (Const64 <typ.UInt64> [smagic32(c).s-1])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpSub32)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpHmul32, t)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(smagic32(c).m / 2))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(smagic32(c).s - 1)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(31)
+ v4.AddArg2(x, v5)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Div32 <t> x (Const32 [c]))
+ // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul
+ // result: (Sub32 <t> (Rsh32x64 <t> (Add32 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int32(smagic32(c).m)]) x) x) (Const64 <typ.UInt64> [smagic32(c).s])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpSub32)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpAdd32, t)
+ v2 := b.NewValue0(v.Pos, OpHmul32, t)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(smagic32(c).m))
+ v2.AddArg2(v3, x)
+ v1.AddArg2(v2, x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(smagic32(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(31)
+ v5.AddArg2(x, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Div32F (Const32F [c]) (Const32F [d]))
+ // cond: c/d == c/d
+ // result: (Const32F [c/d])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ break
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ if !(c/d == c/d) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div32F x (Const32F <t> [c]))
+ // cond: reciprocalExact32(c)
+ // result: (Mul32F x (Const32F <t> [1/c]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32F {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToFloat32(v_1.AuxInt)
+ if !(reciprocalExact32(c)) {
+ break
+ }
+ v.reset(OpMul32F)
+ v0 := b.NewValue0(v.Pos, OpConst32F, t)
+ v0.AuxInt = float32ToAuxInt(1 / c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div32u (Const32 [c]) (Const32 [d]))
+ // cond: d != 0
+ // result: (Const32 [int32(uint32(c)/uint32(d))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) / uint32(d)))
+ return true
+ }
+ // match: (Div32u n (Const32 [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log32(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul
+ // result: (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<31+umagic32(c).m/2)]) x) (Const64 <typ.UInt64> [umagic32(c).s-1]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(int32(1<<31 + umagic32(c).m/2))
+ v0.AddArg2(v1, x)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(umagic32(c).s - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul
+ // result: (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<31+(umagic32(c).m+1)/2)]) (Rsh32Ux64 <typ.UInt32> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic32(c).s-2]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(int32(1<<31 + (umagic32(c).m+1)/2))
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(1)
+ v2.AddArg2(x, v3)
+ v0.AddArg2(v1, v2)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(umagic32(c).s - 2)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul
+ // result: (Rsh32Ux64 <typ.UInt32> (Avg32u x (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(umagic32(c).m)]) x)) (Const64 <typ.UInt64> [umagic32(c).s-1]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(umagic32(c).m))
+ v1.AddArg2(v2, x)
+ v0.AddArg2(x, v1)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(umagic32(c).s - 1)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+umagic32(c).m/2)]) (ZeroExt32to64 x)) (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0) {
+ break
+ }
+ v.reset(OpTrunc64to32)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(1<<31 + umagic32(c).m/2))
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1)
+ v0.AddArg2(v1, v4)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 8 && c&1 == 0
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+(umagic32(c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [32+umagic32(c).s-2])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 8 && c&1 == 0) {
+ break
+ }
+ v.reset(OpTrunc64to32)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(1<<31 + (umagic32(c).m+1)/2))
+ v3 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(1)
+ v3.AddArg2(v4, v5)
+ v1.AddArg2(v2, v3)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 2)
+ v0.AddArg2(v1, v6)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 8 && config.useAvg
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Avg64u (Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32])) (Mul64 <typ.UInt64> (Const64 <typ.UInt32> [int64(umagic32(c).m)]) (ZeroExt32to64 x))) (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 8 && config.useAvg) {
+ break
+ }
+ v.reset(OpTrunc64to32)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v5 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt32)
+ v6.AuxInt = int64ToAuxInt(int64(umagic32(c).m))
+ v5.AddArg2(v6, v3)
+ v1.AddArg2(v2, v5)
+ v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v7.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1)
+ v0.AddArg2(v1, v7)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div64 (Const64 [c]) (Const64 [d]))
+ // cond: d != 0
+ // result: (Const64 [c/d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div64 n (Const64 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo64(c)
+ // result: (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div64 n (Const64 [-1<<63]))
+ // cond: isNonNegative(n)
+ // result: (Const64 [0])
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 || !(isNonNegative(n)) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Div64 <t> n (Const64 [c]))
+ // cond: c < 0 && c != -1<<63
+ // result: (Neg64 (Div64 <t> n (Const64 <t> [-c])))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c < 0 && c != -1<<63) {
+ break
+ }
+ v.reset(OpNeg64)
+ v0 := b.NewValue0(v.Pos, OpDiv64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(-c)
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div64 <t> x (Const64 [-1<<63]))
+ // result: (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <typ.UInt64> [63]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v1.AddArg(x)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div64 <t> n (Const64 [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [int64(64-log64(c))]))) (Const64 <typ.UInt64> [int64(log64(c))]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v1 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
+ v2 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(n, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(64 - log64(c)))
+ v1.AddArg2(v2, v4)
+ v0.AddArg2(n, v1)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(int64(log64(c)))
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div64 <t> x (Const64 [c]))
+ // cond: smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul
+ // result: (Sub64 <t> (Rsh64x64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic64(c).m/2)]) x) (Const64 <typ.UInt64> [smagic64(c).s-1])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpSub64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpHmul64, t)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(smagic64(c).m / 2))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(smagic64(c).s - 1)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v4.AddArg2(x, v5)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Div64 <t> x (Const64 [c]))
+ // cond: smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul
+ // result: (Sub64 <t> (Rsh64x64 <t> (Add64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic64(c).m)]) x) x) (Const64 <typ.UInt64> [smagic64(c).s])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpSub64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpAdd64, t)
+ v2 := b.NewValue0(v.Pos, OpHmul64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(int64(smagic64(c).m))
+ v2.AddArg2(v3, x)
+ v1.AddArg2(v2, x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(smagic64(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(63)
+ v5.AddArg2(x, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Div64F (Const64F [c]) (Const64F [d]))
+ // cond: c/d == c/d
+ // result: (Const64F [c/d])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ break
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ if !(c/d == c/d) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div64F x (Const64F <t> [c]))
+ // cond: reciprocalExact64(c)
+ // result: (Mul64F x (Const64F <t> [1/c]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64F {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToFloat64(v_1.AuxInt)
+ if !(reciprocalExact64(c)) {
+ break
+ }
+ v.reset(OpMul64F)
+ v0 := b.NewValue0(v.Pos, OpConst64F, t)
+ v0.AuxInt = float64ToAuxInt(1 / c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div64u (Const64 [c]) (Const64 [d]))
+ // cond: d != 0
+ // result: (Const64 [int64(uint64(c)/uint64(d))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d)))
+ return true
+ }
+ // match: (Div64u n (Const64 [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div64u n (Const64 [-1<<63]))
+ // result: (Rsh64Ux64 n (Const64 <typ.UInt64> [63]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div64u x (Const64 [c]))
+ // cond: c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul
+ // result: (Add64 (Add64 <typ.UInt64> (Add64 <typ.UInt64> (Lsh64x64 <typ.UInt64> (ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)]))) (Const64 <typ.UInt64> [32])) (ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)])))) (Mul64 <typ.UInt64> (ZeroExt32to64 <typ.UInt64> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)]))) (Const64 <typ.UInt64> [int64((1<<32)/c)]))) (ZeroExt32to64 (Div32u <typ.UInt32> (Add32 <typ.UInt32> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)])) (Mul32 <typ.UInt32> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)])) (Const32 <typ.UInt32> [int32((1<<32)%c)]))) (Const32 <typ.UInt32> [int32(c)]))))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul) {
+ break
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v7.AuxInt = int64ToAuxInt(32)
+ v6.AddArg2(x, v7)
+ v5.AddArg(v6)
+ v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(int32(c))
+ v4.AddArg2(v5, v8)
+ v3.AddArg(v4)
+ v2.AddArg2(v3, v7)
+ v9 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v10 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32)
+ v11.AddArg(x)
+ v10.AddArg2(v11, v8)
+ v9.AddArg(v10)
+ v1.AddArg2(v2, v9)
+ v12 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v13 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v14 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
+ v14.AddArg2(v5, v8)
+ v13.AddArg(v14)
+ v15 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v15.AuxInt = int64ToAuxInt(int64((1 << 32) / c))
+ v12.AddArg2(v13, v15)
+ v0.AddArg2(v1, v12)
+ v16 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v17 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32)
+ v18 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v19 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
+ v19.AddArg2(v11, v8)
+ v20 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v21 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v21.AuxInt = int32ToAuxInt(int32((1 << 32) % c))
+ v20.AddArg2(v14, v21)
+ v18.AddArg2(v19, v20)
+ v17.AddArg2(v18, v8)
+ v16.AddArg(v17)
+ v.AddArg2(v0, v16)
+ return true
+ }
+ // match: (Div64u x (Const64 [c]))
+ // cond: umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul
+ // result: (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+umagic64(c).m/2)]) x) (Const64 <typ.UInt64> [umagic64(c).s-1]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(int64(1<<63 + umagic64(c).m/2))
+ v0.AddArg2(v1, x)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(umagic64(c).s - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div64u x (Const64 [c]))
+ // cond: umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul
+ // result: (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+(umagic64(c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic64(c).s-2]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(int64(1<<63 + (umagic64(c).m+1)/2))
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(1)
+ v2.AddArg2(x, v3)
+ v0.AddArg2(v1, v2)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(umagic64(c).s - 2)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Div64u x (Const64 [c]))
+ // cond: umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul
+ // result: (Rsh64Ux64 <typ.UInt64> (Avg64u x (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(umagic64(c).m)]) x)) (Const64 <typ.UInt64> [umagic64(c).s-1]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(umagic64(c).m))
+ v1.AddArg2(v2, x)
+ v0.AddArg2(x, v1)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(umagic64(c).s - 1)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 (Const8 [c]) (Const8 [d]))
+ // cond: d != 0
+ // result: (Const8 [c/d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div8 n (Const8 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo8(c)
+ // result: (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log8(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div8 <t> n (Const8 [c]))
+ // cond: c < 0 && c != -1<<7
+ // result: (Neg8 (Div8 <t> n (Const8 <t> [-c])))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(c < 0 && c != -1<<7) {
+ break
+ }
+ v.reset(OpNeg8)
+ v0 := b.NewValue0(v.Pos, OpDiv8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(-c)
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div8 <t> x (Const8 [-1<<7 ]))
+ // result: (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <typ.UInt64> [7 ]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != -1<<7 {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v1.AddArg(x)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(7)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div8 <t> n (Const8 [c]))
+ // cond: isPowerOfTwo8(c)
+ // result: (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [int64( 8-log8(c))]))) (Const64 <typ.UInt64> [int64(log8(c))]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v1 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v2 := b.NewValue0(v.Pos, OpRsh8x64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(7)
+ v2.AddArg2(n, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(8 - log8(c)))
+ v1.AddArg2(v2, v4)
+ v0.AddArg2(n, v1)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(int64(log8(c)))
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div8 <t> x (Const8 [c]))
+ // cond: smagicOK8(c)
+ // result: (Sub8 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(smagic8(c).m)]) (SignExt8to32 x)) (Const64 <typ.UInt64> [8+smagic8(c).s])) (Rsh32x64 <t> (SignExt8to32 x) (Const64 <typ.UInt64> [31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(smagicOK8(c)) {
+ break
+ }
+ v.reset(OpSub8)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(smagic8(c).m))
+ v3 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(8 + smagic8(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(31)
+ v5.AddArg2(v3, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u (Const8 [c]) (Const8 [d]))
+ // cond: d != 0
+ // result: (Const8 [int8(uint8(c)/uint8(d))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(uint8(c) / uint8(d)))
+ return true
+ }
+ // match: (Div8u n (Const8 [c]))
+ // cond: isPowerOfTwo8(c)
+ // result: (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log8(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div8u x (Const8 [c]))
+ // cond: umagicOK8(c)
+ // result: (Trunc32to8 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<8+umagic8(c).m)]) (ZeroExt8to32 x)) (Const64 <typ.UInt64> [8+umagic8(c).s])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(umagicOK8(c)) {
+ break
+ }
+ v.reset(OpTrunc32to8)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(1<<8 + umagic8(c).m))
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(8 + umagic8(c).s)
+ v0.AddArg2(v1, v4)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Eq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
+ // result: (Eq16 (Const16 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq16 (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 (Mod16u x (Const16 [c])) (Const16 [0]))
+ // cond: x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32u <typ.UInt32> (ZeroExt16to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint16(c))])) (Const32 <typ.UInt32> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMod16u {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(uint16(c)))
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 (Mod16 x (Const16 [c])) (Const16 [0]))
+ // cond: x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32 <typ.Int32> (SignExt16to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMod16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt16to64 x)) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc64to16 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt16to64 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s && x.Op != OpConst16 && udivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x)) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc32to16 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc32to16 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = mul_1.Args[1]
+ mul_1_0 := mul_1.Args[0]
+ if mul_1_0.Op != OpZeroExt16to32 || x != mul_1_0.Args[0] {
+ continue
+ }
+ mul_1_1 := mul_1.Args[1]
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 && x.Op != OpConst16 && udivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x))) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc32to16 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAvg32u {
+ continue
+ }
+ _ = v_1_1_0_0.Args[1]
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_1_1_0_0_0.Args[1]
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpZeroExt16to32 || x != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1]
+ if v_1_1_0_0_0_1.Op != OpConst64 || auxIntToInt64(v_1_1_0_0_0_1.AuxInt) != 16 {
+ continue
+ }
+ mul := v_1_1_0_0.Args[1]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Sub16 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt16to32 x)) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic16(c).m) && s == 16+smagic16(c).s && x.Op != OpConst16 && sdivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Add16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(sdivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(sdivisible16(c).a)]) ) (Const16 <typ.UInt16> [int16(16-sdivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(sdivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub16 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpSignExt16to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ v_1_1_1_0 := v_1_1_1.Args[0]
+ if v_1_1_1_0.Op != OpSignExt16to32 || x != v_1_1_1_0.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic16(c).m) && s == 16+smagic16(c).s && x.Op != OpConst16 && sdivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(sdivisible16(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(sdivisible16(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v5.AuxInt = int16ToAuxInt(int16(16 - sdivisible16(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v6.AuxInt = int16ToAuxInt(int16(sdivisible16(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 n (Lsh16x64 (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 15 && kbar == 16 - k
+ // result: (Eq16 (And16 <t> n (Const16 <t> [1<<uint(k)-1])) (Const16 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh16x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd16 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh16x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 15 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 15 && kbar == 16-k) {
+ continue
+ }
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq16 s:(Sub16 x y) (Const16 [0]))
+ // cond: s.Uses == 1
+ // result: (Eq16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub16 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpEq16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y]))
+ // cond: oneBit16(y)
+ // result: (Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd16 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst16 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || v_1.Type != t || auxIntToInt16(v_1.AuxInt) != y || !(oneBit16(y)) {
+ continue
+ }
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Eq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
+ // result: (Eq32 (Const32 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq32 (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) x) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ mul := v_1_1.Args[0]
+ if mul.Op != OpHmul32u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 <typ.UInt32> [m]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ mul := v_1_1.Args[0]
+ if mul.Op != OpHmul32u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 || mul_0.Type != typ.UInt32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = mul_1.Args[1]
+ if x != mul_1.Args[0] {
+ continue
+ }
+ mul_1_1 := mul_1.Args[1]
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 (Avg32u x mul:(Hmul32u (Const32 [m]) x)) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic32(c).m) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAvg32u {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ if x != v_1_1_0.Args[0] {
+ continue
+ }
+ mul := v_1_1_0.Args[1]
+ if mul.Op != OpHmul32u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic32(c).m) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x)) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc64to32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc64to32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = mul_1.Args[1]
+ mul_1_0 := mul_1.Args[0]
+ if mul_1_0.Op != OpZeroExt32to64 || x != mul_1_0.Args[0] {
+ continue
+ }
+ mul_1_1 := mul_1.Args[1]
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x))) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc64to32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAvg64u {
+ continue
+ }
+ _ = v_1_1_0_0.Args[1]
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_1_1_0_0_0.Args[1]
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpZeroExt32to64 || x != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1]
+ if v_1_1_0_0_0_1.Op != OpConst64 || auxIntToInt64(v_1_1_0_0_0_1.AuxInt) != 32 {
+ continue
+ }
+ mul := v_1_1_0_0.Args[1]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh64x64 mul:(Mul64 (Const64 [m]) (SignExt32to64 x)) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic32(c).m) && s == 32+smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub32 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpSignExt32to64 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ v_1_1_1_0 := v_1_1_1.Args[0]
+ if v_1_1_1_0.Op != OpSignExt32to64 || x != v_1_1_1_0.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic32(c).m) && s == 32+smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 mul:(Hmul32 (Const32 [m]) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 && x.Op != OpConst32 && sdivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub32 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpHmul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ if x != v_1_1_1.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 && x.Op != OpConst32 && sdivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 (Const32 [m]) x) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m) && s == smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub32 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1_1_0_0.Args[1]
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ v_1_1_0_0_1 := v_1_1_0_0.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_1_0_0_0, v_1_1_0_0_1 = _i2+1, v_1_1_0_0_1, v_1_1_0_0_0 {
+ mul := v_1_1_0_0_0
+ if mul.Op != OpHmul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, mul_0, mul_1 = _i3+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if x != mul_1 || x != v_1_1_0_0_1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ if x != v_1_1_1.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m) && s == smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 n (Lsh32x64 (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 31 && kbar == 32 - k
+ // result: (Eq32 (And32 <t> n (Const32 <t> [1<<uint(k)-1])) (Const32 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd32 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh32x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 31 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 31 && kbar == 32-k) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq32 s:(Sub32 x y) (Const32 [0]))
+ // cond: s.Uses == 1
+ // result: (Eq32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub32 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpEq32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y]))
+ // cond: oneBit32(y)
+ // result: (Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd32 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst32 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt32(v_0_1.AuxInt)
+ if v_1.Op != OpConst32 || v_1.Type != t || auxIntToInt32(v_1.AuxInt) != y || !(oneBit32(y)) {
+ continue
+ }
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Eq32F (Const32F [c]) (Const32F [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32F {
+ continue
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ continue
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64 x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Eq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
+ // result: (Eq64 (Const64 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq64 (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) x) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ mul := v_1_1.Args[0]
+ if mul.Op != OpHmul64u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 && x.Op != OpConst64 && udivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ mul := v_1_1.Args[0]
+ if mul.Op != OpHmul64u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = mul_1.Args[1]
+ if x != mul_1.Args[0] {
+ continue
+ }
+ mul_1_1 := mul_1.Args[1]
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 && x.Op != OpConst64 && udivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 (Avg64u x mul:(Hmul64u (Const64 [m]) x)) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic64(c).m) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAvg64u {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ if x != v_1_1_0.Args[0] {
+ continue
+ }
+ mul := v_1_1_0.Args[1]
+ if mul.Op != OpHmul64u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic64(c).m) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 mul:(Hmul64 (Const64 [m]) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 && x.Op != OpConst64 && sdivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Add64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(sdivisible64(c).m)]) x) (Const64 <typ.UInt64> [int64(sdivisible64(c).a)]) ) (Const64 <typ.UInt64> [64-sdivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(sdivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpHmul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ if x != v_1_1_1.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 && x.Op != OpConst64 && sdivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(64 - sdivisible64(c).k)
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 (Const64 [m]) x) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m) && s == smagic64(c).s && x.Op != OpConst64 && sdivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Add64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(sdivisible64(c).m)]) x) (Const64 <typ.UInt64> [int64(sdivisible64(c).a)]) ) (Const64 <typ.UInt64> [64-sdivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(sdivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1_1_0_0.Args[1]
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ v_1_1_0_0_1 := v_1_1_0_0.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_1_0_0_0, v_1_1_0_0_1 = _i2+1, v_1_1_0_0_1, v_1_1_0_0_0 {
+ mul := v_1_1_0_0_0
+ if mul.Op != OpHmul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, mul_0, mul_1 = _i3+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if x != mul_1 || x != v_1_1_0_0_1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ if x != v_1_1_1.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m) && s == smagic64(c).s && x.Op != OpConst64 && sdivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(64 - sdivisible64(c).k)
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 n (Lsh64x64 (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 63 && kbar == 64 - k
+ // result: (Eq64 (And64 <t> n (Const64 <t> [1<<uint(k)-1])) (Const64 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd64 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh64x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 63 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 63 && kbar == 64-k) {
+ continue
+ }
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq64 s:(Sub64 x y) (Const64 [0]))
+ // cond: s.Uses == 1
+ // result: (Eq64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub64 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpEq64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y]))
+ // cond: oneBit64(y)
+ // result: (Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd64 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst64 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 || v_1.Type != t || auxIntToInt64(v_1.AuxInt) != y || !(oneBit64(y)) {
+ continue
+ }
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Eq64F (Const64F [c]) (Const64F [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64F {
+ continue
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ continue
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Eq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
+ // result: (Eq8 (Const8 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq8 (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 (Mod8u x (Const8 [c])) (Const8 [0]))
+ // cond: x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32u <typ.UInt32> (ZeroExt8to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint8(c))])) (Const32 <typ.UInt32> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMod8u {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(uint8(c)))
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 (Mod8 x (Const8 [c])) (Const8 [0]))
+ // cond: x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32 <typ.Int32> (SignExt8to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMod8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 x (Mul8 (Const8 [c]) (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt8to32 x)) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s && x.Op != OpConst8 && udivisibleOK8(c)
+ // result: (Leq8U (RotateLeft8 <typ.UInt8> (Mul8 <typ.UInt8> (Const8 <typ.UInt8> [int8(udivisible8(c).m)]) x) (Const8 <typ.UInt8> [int8(8-udivisible8(c).k)]) ) (Const8 <typ.UInt8> [int8(udivisible8(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc32to8 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt8to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s && x.Op != OpConst8 && udivisibleOK8(c)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8)
+ v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v2.AuxInt = int8ToAuxInt(int8(udivisible8(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v3.AuxInt = int8ToAuxInt(int8(8 - udivisible8(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v4.AuxInt = int8ToAuxInt(int8(udivisible8(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq8 x (Mul8 (Const8 [c]) (Sub8 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt8to32 x)) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic8(c).m) && s == 8+smagic8(c).s && x.Op != OpConst8 && sdivisibleOK8(c)
+ // result: (Leq8U (RotateLeft8 <typ.UInt8> (Add8 <typ.UInt8> (Mul8 <typ.UInt8> (Const8 <typ.UInt8> [int8(sdivisible8(c).m)]) x) (Const8 <typ.UInt8> [int8(sdivisible8(c).a)]) ) (Const8 <typ.UInt8> [int8(8-sdivisible8(c).k)]) ) (Const8 <typ.UInt8> [int8(sdivisible8(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub8 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpSignExt8to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ v_1_1_1_0 := v_1_1_1.Args[0]
+ if v_1_1_1_0.Op != OpSignExt8to32 || x != v_1_1_1_0.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic8(c).m) && s == 8+smagic8(c).s && x.Op != OpConst8 && sdivisibleOK8(c)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8)
+ v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8)
+ v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v3.AuxInt = int8ToAuxInt(int8(sdivisible8(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v4.AuxInt = int8ToAuxInt(int8(sdivisible8(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v5.AuxInt = int8ToAuxInt(int8(8 - sdivisible8(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v6.AuxInt = int8ToAuxInt(int8(sdivisible8(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq8 n (Lsh8x64 (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 7 && kbar == 8 - k
+ // result: (Eq8 (And8 <t> n (Const8 <t> [1<<uint(k)-1])) (Const8 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh8x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh8x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd8 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh8x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 7 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 7 && kbar == 8-k) {
+ continue
+ }
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq8 s:(Sub8 x y) (Const8 [0]))
+ // cond: s.Uses == 1
+ // result: (Eq8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub8 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpEq8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y]))
+ // cond: oneBit8(y)
+ // result: (Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd8 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst8 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || v_1.Type != t || auxIntToInt8(v_1.AuxInt) != y || !(oneBit8(y)) {
+ continue
+ }
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EqB (ConstBool [c]) (ConstBool [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool {
+ continue
+ }
+ c := auxIntToBool(v_0.AuxInt)
+ if v_1.Op != OpConstBool {
+ continue
+ }
+ d := auxIntToBool(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (EqB (ConstBool [false]) x)
+ // result: (Not x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != false {
+ continue
+ }
+ x := v_1
+ v.reset(OpNot)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (EqB (ConstBool [true]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != true {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEqInter(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqInter x y)
+ // result: (EqPtr (ITab x) (ITab y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpEqPtr)
+ v0 := b.NewValue0(v.Pos, OpITab, typ.Uintptr)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpITab, typ.Uintptr)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (EqPtr (Addr {a} _) (Addr {b} _))
+ // result: (ConstBool [a == b])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddr {
+ continue
+ }
+ a := auxToSym(v_0.Aux)
+ if v_1.Op != OpAddr {
+ continue
+ }
+ b := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(a == b)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Addr {a} _) (OffPtr [o] (Addr {b} _)))
+ // result: (ConstBool [a == b && o == 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddr {
+ continue
+ }
+ a := auxToSym(v_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ b := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(a == b && o == 0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr [o1] (Addr {a} _)) (OffPtr [o2] (Addr {b} _)))
+ // result: (ConstBool [a == b && o1 == o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ continue
+ }
+ a := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ b := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(a == b && o1 == o2)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (LocalAddr {a} _ _) (LocalAddr {b} _ _))
+ // result: (ConstBool [a == b])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr {
+ continue
+ }
+ a := auxToSym(v_0.Aux)
+ if v_1.Op != OpLocalAddr {
+ continue
+ }
+ b := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(a == b)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (LocalAddr {a} _ _) (OffPtr [o] (LocalAddr {b} _ _)))
+ // result: (ConstBool [a == b && o == 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr {
+ continue
+ }
+ a := auxToSym(v_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpLocalAddr {
+ continue
+ }
+ b := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(a == b && o == 0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr [o1] (LocalAddr {a} _ _)) (OffPtr [o2] (LocalAddr {b} _ _)))
+ // result: (ConstBool [a == b && o1 == o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr {
+ continue
+ }
+ a := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpLocalAddr {
+ continue
+ }
+ b := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(a == b && o1 == o2)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr [o1] p1) p2)
+ // cond: isSamePtr(p1, p2)
+ // result: (ConstBool [o1 == 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ p2 := v_1
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(o1 == 0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr [o1] p1) (OffPtr [o2] p2))
+ // cond: isSamePtr(p1, p2)
+ // result: (ConstBool [o1 == o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(o1 == o2)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (LocalAddr _ _) (Addr _))
+ // result: (ConstBool [false])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr || v_1.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr (LocalAddr _ _)) (Addr _))
+ // result: (ConstBool [false])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr || v_1.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (LocalAddr _ _) (OffPtr (Addr _)))
+ // result: (ConstBool [false])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr || v_1.Op != OpOffPtr {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _)))
+ // result: (ConstBool [false])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr || v_1.Op != OpOffPtr {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (AddPtr p1 o1) p2)
+ // cond: isSamePtr(p1, p2)
+ // result: (Not (IsNonNil o1))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddPtr {
+ continue
+ }
+ o1 := v_0.Args[1]
+ p1 := v_0.Args[0]
+ p2 := v_1
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
+ v0.AddArg(o1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Const32 [0]) p)
+ // result: (Not (IsNonNil p))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ p := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Const64 [0]) p)
+ // result: (Not (IsNonNil p))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ p := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (ConstNil) p)
+ // result: (Not (IsNonNil p))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstNil {
+ continue
+ }
+ p := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEqSlice(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqSlice x y)
+ // result: (EqPtr (SlicePtr x) (SlicePtr y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpEqPtr)
+ v0 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpIMake(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IMake typ (StructMake1 val))
+ // result: (IMake typ val)
+ for {
+ typ := v_0
+ if v_1.Op != OpStructMake1 {
+ break
+ }
+ val := v_1.Args[0]
+ v.reset(OpIMake)
+ v.AddArg2(typ, val)
+ return true
+ }
+ // match: (IMake typ (ArrayMake1 val))
+ // result: (IMake typ val)
+ for {
+ typ := v_0
+ if v_1.Op != OpArrayMake1 {
+ break
+ }
+ val := v_1.Args[0]
+ v.reset(OpIMake)
+ v.AddArg2(typ, val)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpInterCall(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (InterCall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem)
+ // cond: devirt(v, auxCall, itab, off) != nil
+ // result: (StaticCall [int32(argsize)] {devirt(v, auxCall, itab, off)} mem)
+ for {
+ argsize := auxIntToInt32(v.AuxInt)
+ auxCall := auxToCall(v.Aux)
+ if v_0.Op != OpLoad {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpOffPtr {
+ break
+ }
+ off := auxIntToInt64(v_0_0.AuxInt)
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpITab {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpIMake {
+ break
+ }
+ v_0_0_0_0_0 := v_0_0_0_0.Args[0]
+ if v_0_0_0_0_0.Op != OpAddr {
+ break
+ }
+ itab := auxToSym(v_0_0_0_0_0.Aux)
+ v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
+ if v_0_0_0_0_0_0.Op != OpSB {
+ break
+ }
+ mem := v_1
+ if !(devirt(v, auxCall, itab, off) != nil) {
+ break
+ }
+ v.reset(OpStaticCall)
+ v.AuxInt = int32ToAuxInt(int32(argsize))
+ v.Aux = callToAux(devirt(v, auxCall, itab, off))
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpInterLECall(v *Value) bool {
+ // match: (InterLECall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) ___)
+ // cond: devirtLESym(v, auxCall, itab, off) != nil
+ // result: devirtLECall(v, devirtLESym(v, auxCall, itab, off))
+ for {
+ if len(v.Args) < 1 {
+ break
+ }
+ auxCall := auxToCall(v.Aux)
+ v_0 := v.Args[0]
+ if v_0.Op != OpLoad {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpOffPtr {
+ break
+ }
+ off := auxIntToInt64(v_0_0.AuxInt)
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpITab {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpIMake {
+ break
+ }
+ v_0_0_0_0_0 := v_0_0_0_0.Args[0]
+ if v_0_0_0_0_0.Op != OpAddr {
+ break
+ }
+ itab := auxToSym(v_0_0_0_0_0.Aux)
+ v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
+ if v_0_0_0_0_0_0.Op != OpSB || !(devirtLESym(v, auxCall, itab, off) != nil) {
+ break
+ }
+ v.copyOf(devirtLECall(v, devirtLESym(v, auxCall, itab, off)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IsInBounds (ZeroExt8to32 _) (Const32 [c]))
+ // cond: (1 << 8) <= c
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to32 || v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !((1 << 8) <= c) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt8to64 _) (Const64 [c]))
+ // cond: (1 << 8) <= c
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to64 || v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !((1 << 8) <= c) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt16to32 _) (Const32 [c]))
+ // cond: (1 << 16) <= c
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to32 || v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !((1 << 16) <= c) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt16to64 _) (Const64 [c]))
+ // cond: (1 << 16) <= c
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to64 || v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !((1 << 16) <= c) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (IsInBounds (And8 (Const8 [c]) _) (Const8 [d]))
+ // cond: 0 <= c && c < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(0 <= c && c < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt8to16 (And8 (Const8 [c]) _)) (Const16 [d]))
+ // cond: 0 <= c && int16(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to16 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd8 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(0 <= c && int16(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt8to32 (And8 (Const8 [c]) _)) (Const32 [d]))
+ // cond: 0 <= c && int32(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd8 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && int32(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt8to64 (And8 (Const8 [c]) _)) (Const64 [d]))
+ // cond: 0 <= c && int64(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd8 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && int64(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (And16 (Const16 [c]) _) (Const16 [d]))
+ // cond: 0 <= c && c < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd16 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(0 <= c && c < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d]))
+ // cond: 0 <= c && int32(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd16 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && int32(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt16to64 (And16 (Const16 [c]) _)) (Const64 [d]))
+ // cond: 0 <= c && int64(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd16 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && int64(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (And32 (Const32 [c]) _) (Const32 [d]))
+ // cond: 0 <= c && c < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt32to64 (And32 (Const32 [c]) _)) (Const64 [d]))
+ // cond: 0 <= c && int64(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt32to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd32 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && int64(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (And64 (Const64 [c]) _) (Const64 [d]))
+ // cond: 0 <= c && c < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && c < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [0 <= c && c < d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(0 <= c && c < d)
+ return true
+ }
+ // match: (IsInBounds (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [0 <= c && c < d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(0 <= c && c < d)
+ return true
+ }
+ // match: (IsInBounds (Mod32u _ y) y)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpMod32u {
+ break
+ }
+ y := v_0.Args[1]
+ if y != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Mod64u _ y) y)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpMod64u {
+ break
+ }
+ y := v_0.Args[1]
+ if y != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt8to64 (Rsh8Ux64 _ (Const64 [c]))) (Const64 [d]))
+ // cond: 0 < c && c < 8 && 1<<uint( 8-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 8 && 1<<uint(8-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt8to32 (Rsh8Ux64 _ (Const64 [c]))) (Const32 [d]))
+ // cond: 0 < c && c < 8 && 1<<uint( 8-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 < c && c < 8 && 1<<uint(8-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt8to16 (Rsh8Ux64 _ (Const64 [c]))) (Const16 [d]))
+ // cond: 0 < c && c < 8 && 1<<uint( 8-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to16 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(0 < c && c < 8 && 1<<uint(8-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Rsh8Ux64 _ (Const64 [c])) (Const64 [d]))
+ // cond: 0 < c && c < 8 && 1<<uint( 8-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 8 && 1<<uint(8-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt16to64 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d]))
+ // cond: 0 < c && c < 16 && 1<<uint(16-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 16 && 1<<uint(16-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt16to32 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d]))
+ // cond: 0 < c && c < 16 && 1<<uint(16-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 16 && 1<<uint(16-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Rsh16Ux64 _ (Const64 [c])) (Const64 [d]))
+ // cond: 0 < c && c < 16 && 1<<uint(16-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 16 && 1<<uint(16-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt32to64 (Rsh32Ux64 _ (Const64 [c]))) (Const64 [d]))
+ // cond: 0 < c && c < 32 && 1<<uint(32-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt32to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 32 && 1<<uint(32-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Rsh32Ux64 _ (Const64 [c])) (Const64 [d]))
+ // cond: 0 < c && c < 32 && 1<<uint(32-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 32 && 1<<uint(32-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Rsh64Ux64 _ (Const64 [c])) (Const64 [d]))
+ // cond: 0 < c && c < 64 && 1<<uint(64-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 64 && 1<<uint(64-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (IsNonNil (ConstNil))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpConstNil {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (IsNonNil (Const32 [c]))
+ // result: (ConstBool [c != 0])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != 0)
+ return true
+ }
+ // match: (IsNonNil (Const64 [c]))
+ // result: (ConstBool [c != 0])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != 0)
+ return true
+ }
+ // match: (IsNonNil (Addr _))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAddr {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsNonNil (LocalAddr _ _))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpLocalAddr {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IsSliceInBounds x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d]))
+ // cond: 0 <= c && c <= d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c <= d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d]))
+ // cond: 0 <= c && c <= d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && c <= d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsSliceInBounds (Const32 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsSliceInBounds (Const64 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsSliceInBounds (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [0 <= c && c <= d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(0 <= c && c <= d)
+ return true
+ }
+ // match: (IsSliceInBounds (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [0 <= c && c <= d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(0 <= c && c <= d)
+ return true
+ }
+ // match: (IsSliceInBounds (SliceLen x) (SliceCap x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpSliceLen {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpSliceCap || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq16 (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ // match: (Leq16 (Const16 [0]) (And16 _ (Const16 [c])))
+ // cond: c >= 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 || v_1.Op != OpAnd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= 0) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (Leq16 (Const16 [0]) (Rsh16Ux64 _ (Const64 [c])))
+ // cond: c > 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 || v_1.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq16U (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [uint16(c) <= uint16(d)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint16(c) <= uint16(d))
+ return true
+ }
+ // match: (Leq16U (Const16 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq32 (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ // match: (Leq32 (Const32 [0]) (And32 _ (Const32 [c])))
+ // cond: c >= 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 || v_1.Op != OpAnd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= 0) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (Leq32 (Const32 [0]) (Rsh32Ux64 _ (Const64 [c])))
+ // cond: c > 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 || v_1.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq32F (Const32F [c]) (Const32F [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ break
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq32U (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [uint32(c) <= uint32(d)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint32(c) <= uint32(d))
+ return true
+ }
+ // match: (Leq32U (Const32 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq64 (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ // match: (Leq64 (Const64 [0]) (And64 _ (Const64 [c])))
+ // cond: c >= 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 || v_1.Op != OpAnd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= 0) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (Leq64 (Const64 [0]) (Rsh64Ux64 _ (Const64 [c])))
+ // cond: c > 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 || v_1.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq64F (Const64F [c]) (Const64F [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ break
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq64U (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [uint64(c) <= uint64(d)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint64(c) <= uint64(d))
+ return true
+ }
+ // match: (Leq64U (Const64 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq8 (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ // match: (Leq8 (Const8 [0]) (And8 _ (Const8 [c])))
+ // cond: c >= 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 || v_1.Op != OpAnd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= 0) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (Leq8 (Const8 [0]) (Rsh8Ux64 _ (Const64 [c])))
+ // cond: c > 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 || v_1.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq8U (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [ uint8(c) <= uint8(d)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint8(c) <= uint8(d))
+ return true
+ }
+ // match: (Leq8U (Const8 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less16 (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less16U (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [uint16(c) < uint16(d)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint16(c) < uint16(d))
+ return true
+ }
+ // match: (Less16U _ (Const16 [0]))
+ // result: (ConstBool [false])
+ for {
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less32 (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less32F (Const32F [c]) (Const32F [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ break
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less32U (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [uint32(c) < uint32(d)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint32(c) < uint32(d))
+ return true
+ }
+ // match: (Less32U _ (Const32 [0]))
+ // result: (ConstBool [false])
+ for {
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64 (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64F (Const64F [c]) (Const64F [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ break
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64U (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [uint64(c) < uint64(d)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint64(c) < uint64(d))
+ return true
+ }
+ // match: (Less64U _ (Const64 [0]))
+ // result: (ConstBool [false])
+ for {
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less8 (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less8U (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [ uint8(c) < uint8(d)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint8(c) < uint8(d))
+ return true
+ }
+ // match: (Less8U _ (Const8 [0]))
+ // result: (ConstBool [false])
+ for {
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ fe := b.Func.fe
+ // match: (Load <t1> p1 (Store {t2} p2 x _))
+ // cond: isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size()
+ // result: x
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ x := v_1.Args[1]
+ p2 := v_1.Args[0]
+ if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 x _)))
+ // cond: isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p3, t3.Size(), p2, t2.Size())
+ // result: x
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ x := v_1_2.Args[1]
+ p3 := v_1_2.Args[0]
+ if !(isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p3, t3.Size(), p2, t2.Size())) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _))))
+ // cond: isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p4, t4.Size(), p2, t2.Size()) && disjoint(p4, t4.Size(), p3, t3.Size())
+ // result: x
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ v_1_2_2 := v_1_2.Args[2]
+ if v_1_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(v_1_2_2.Aux)
+ x := v_1_2_2.Args[1]
+ p4 := v_1_2_2.Args[0]
+ if !(isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p4, t4.Size(), p2, t2.Size()) && disjoint(p4, t4.Size(), p3, t3.Size())) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _)))))
+ // cond: isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p5, t5.Size(), p2, t2.Size()) && disjoint(p5, t5.Size(), p3, t3.Size()) && disjoint(p5, t5.Size(), p4, t4.Size())
+ // result: x
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ v_1_2_2 := v_1_2.Args[2]
+ if v_1_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(v_1_2_2.Aux)
+ _ = v_1_2_2.Args[2]
+ p4 := v_1_2_2.Args[0]
+ v_1_2_2_2 := v_1_2_2.Args[2]
+ if v_1_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(v_1_2_2_2.Aux)
+ x := v_1_2_2_2.Args[1]
+ p5 := v_1_2_2_2.Args[0]
+ if !(isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p5, t5.Size(), p2, t2.Size()) && disjoint(p5, t5.Size(), p3, t3.Size()) && disjoint(p5, t5.Size(), p4, t4.Size())) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 (Const64 [x]) _))
+ // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))
+ // result: (Const64F [math.Float64frombits(uint64(x))])
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[1]
+ p2 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ x := auxIntToInt64(v_1_1.AuxInt)
+ if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(x)))
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _))
+ // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))
+ // result: (Const32F [math.Float32frombits(uint32(x))])
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[1]
+ p2 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ break
+ }
+ x := auxIntToInt32(v_1_1.AuxInt)
+ if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(math.Float32frombits(uint32(x)))
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 (Const64F [x]) _))
+ // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1)
+ // result: (Const64 [int64(math.Float64bits(x))])
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[1]
+ p2 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64F {
+ break
+ }
+ x := auxIntToFloat64(v_1_1.AuxInt)
+ if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitInt(t1)) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(math.Float64bits(x)))
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 (Const32F [x]) _))
+ // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1)
+ // result: (Const32 [int32(math.Float32bits(x))])
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[1]
+ p2 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32F {
+ break
+ }
+ x := auxIntToFloat32(v_1_1.AuxInt)
+ if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitInt(t1)) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(math.Float32bits(x)))
+ return true
+ }
+ // match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ mem:(Zero [n] p3 _)))
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())
+ // result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p3) mem)
+ for {
+ t1 := v.Type
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ mem := v_1.Args[2]
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p3 := mem.Args[0]
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())) {
+ break
+ }
+ b = mem.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t1)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type)
+ v1.AuxInt = int64ToAuxInt(o1)
+ v1.AddArg(p3)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ mem:(Zero [n] p4 _))))
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())
+ // result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p4) mem)
+ for {
+ t1 := v.Type
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ mem := v_1_2.Args[2]
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p4 := mem.Args[0]
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())) {
+ break
+ }
+ b = mem.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t1)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type)
+ v1.AuxInt = int64ToAuxInt(o1)
+ v1.AddArg(p4)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ mem:(Zero [n] p5 _)))))
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())
+ // result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p5) mem)
+ for {
+ t1 := v.Type
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ v_1_2_2 := v_1_2.Args[2]
+ if v_1_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(v_1_2_2.Aux)
+ _ = v_1_2_2.Args[2]
+ p4 := v_1_2_2.Args[0]
+ mem := v_1_2_2.Args[2]
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p5 := mem.Args[0]
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())) {
+ break
+ }
+ b = mem.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t1)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type)
+ v1.AuxInt = int64ToAuxInt(o1)
+ v1.AddArg(p5)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ mem:(Zero [n] p6 _))))))
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())
+ // result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p6) mem)
+ for {
+ t1 := v.Type
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ v_1_2_2 := v_1_2.Args[2]
+ if v_1_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(v_1_2_2.Aux)
+ _ = v_1_2_2.Args[2]
+ p4 := v_1_2_2.Args[0]
+ v_1_2_2_2 := v_1_2_2.Args[2]
+ if v_1_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(v_1_2_2_2.Aux)
+ _ = v_1_2_2_2.Args[2]
+ p5 := v_1_2_2_2.Args[0]
+ mem := v_1_2_2_2.Args[2]
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p6 := mem.Args[0]
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())) {
+ break
+ }
+ b = mem.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t1)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type)
+ v1.AuxInt = int64ToAuxInt(o1)
+ v1.AddArg(p6)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: t1.IsBoolean() && isSamePtr(p1, p2) && n >= o + 1
+ // result: (ConstBool [false])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(t1.IsBoolean() && isSamePtr(p1, p2) && n >= o+1) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is8BitInt(t1) && isSamePtr(p1, p2) && n >= o + 1
+ // result: (Const8 [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is8BitInt(t1) && isSamePtr(p1, p2) && n >= o+1) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is16BitInt(t1) && isSamePtr(p1, p2) && n >= o + 2
+ // result: (Const16 [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is16BitInt(t1) && isSamePtr(p1, p2) && n >= o+2) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is32BitInt(t1) && isSamePtr(p1, p2) && n >= o + 4
+ // result: (Const32 [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is32BitInt(t1) && isSamePtr(p1, p2) && n >= o+4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is64BitInt(t1) && isSamePtr(p1, p2) && n >= o + 8
+ // result: (Const64 [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is64BitInt(t1) && isSamePtr(p1, p2) && n >= o+8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is32BitFloat(t1) && isSamePtr(p1, p2) && n >= o + 4
+ // result: (Const32F [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is32BitFloat(t1) && isSamePtr(p1, p2) && n >= o+4) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is64BitFloat(t1) && isSamePtr(p1, p2) && n >= o + 8
+ // result: (Const64F [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is64BitFloat(t1) && isSamePtr(p1, p2) && n >= o+8) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t> _ _)
+ // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)
+ // result: (StructMake0)
+ for {
+ t := v.Type
+ if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake0)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)
+ // result: (StructMake1 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake1)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0))
+ v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v1.AuxInt = int64ToAuxInt(0)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)
+ // result: (StructMake2 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake2)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0))
+ v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v1.AuxInt = int64ToAuxInt(0)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1))
+ v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v3.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v3.AddArg(ptr)
+ v2.AddArg2(v3, mem)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)
+ // result: (StructMake3 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake3)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0))
+ v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v1.AuxInt = int64ToAuxInt(0)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1))
+ v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v3.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v3.AddArg(ptr)
+ v2.AddArg2(v3, mem)
+ v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2))
+ v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v5.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v5.AddArg(ptr)
+ v4.AddArg2(v5, mem)
+ v.AddArg3(v0, v2, v4)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)
+ // result: (StructMake4 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem) (Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake4)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0))
+ v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v1.AuxInt = int64ToAuxInt(0)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1))
+ v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v3.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v3.AddArg(ptr)
+ v2.AddArg2(v3, mem)
+ v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2))
+ v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v5.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v5.AddArg(ptr)
+ v4.AddArg2(v5, mem)
+ v6 := b.NewValue0(v.Pos, OpLoad, t.FieldType(3))
+ v7 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo())
+ v7.AuxInt = int64ToAuxInt(t.FieldOff(3))
+ v7.AddArg(ptr)
+ v6.AddArg2(v7, mem)
+ v.AddArg4(v0, v2, v4, v6)
+ return true
+ }
+ // match: (Load <t> _ _)
+ // cond: t.IsArray() && t.NumElem() == 0
+ // result: (ArrayMake0)
+ for {
+ t := v.Type
+ if !(t.IsArray() && t.NumElem() == 0) {
+ break
+ }
+ v.reset(OpArrayMake0)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)
+ // result: (ArrayMake1 (Load <t.Elem()> ptr mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpArrayMake1)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.Elem())
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x16 <t> x (Const16 [c]))
+ // result: (Lsh16x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x32 <t> x (Const32 [c]))
+ // result: (Lsh16x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x32 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 (Const16 [c]) (Const64 [d]))
+ // result: (Const16 [c << uint64(d)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (Lsh16x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Lsh16x64 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh16x64 <t> (Lsh16x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Lsh16x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Lsh16x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x8 <t> x (Const8 [c]))
+ // result: (Lsh16x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x8 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x16 <t> x (Const16 [c]))
+ // result: (Lsh32x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x16 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x32 <t> x (Const32 [c]))
+ // result: (Lsh32x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 (Const32 [c]) (Const64 [d]))
+ // result: (Const32 [c << uint64(d)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (Lsh32x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Lsh32x64 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh32x64 <t> (Lsh32x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Lsh32x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Lsh32x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x8 <t> x (Const8 [c]))
+ // result: (Lsh32x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x8 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x16 <t> x (Const16 [c]))
+ // result: (Lsh64x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x16 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x32 <t> x (Const32 [c]))
+ // result: (Lsh64x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x32 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c << uint64(d)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (Lsh64x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Lsh64x64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 64
+ // result: (Const64 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 <t> (Lsh64x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Lsh64x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Lsh64x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x8 <t> x (Const8 [c]))
+ // result: (Lsh64x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x8 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x16 <t> x (Const16 [c]))
+ // result: (Lsh8x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x16 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x32 <t> x (Const32 [c]))
+ // result: (Lsh8x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x32 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 (Const8 [c]) (Const64 [d]))
+ // result: (Const8 [c << uint64(d)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (Lsh8x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Lsh8x64 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh8x64 <t> (Lsh8x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Lsh8x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpLsh8x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Lsh8x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLsh8x64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x8 <t> x (Const8 [c]))
+ // result: (Lsh8x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod16 (Const16 [c]) (Const16 [d]))
+ // cond: d != 0
+ // result: (Const16 [c % d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c % d)
+ return true
+ }
+ // match: (Mod16 <t> n (Const16 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo16(c)
+ // result: (And16 n (Const16 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod16 <t> n (Const16 [c]))
+ // cond: c < 0 && c != -1<<15
+ // result: (Mod16 <t> n (Const16 <t> [-c]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(c < 0 && c != -1<<15) {
+ break
+ }
+ v.reset(OpMod16)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(-c)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod16 <t> x (Const16 [c]))
+ // cond: x.Op != OpConst16 && (c > 0 || c == -1<<15)
+ // result: (Sub16 x (Mul16 <t> (Div16 <t> x (Const16 <t> [c])) (Const16 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(x.Op != OpConst16 && (c > 0 || c == -1<<15)) {
+ break
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpMul16, t)
+ v1 := b.NewValue0(v.Pos, OpDiv16, t)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod16u (Const16 [c]) (Const16 [d]))
+ // cond: d != 0
+ // result: (Const16 [int16(uint16(c) % uint16(d))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(uint16(c) % uint16(d)))
+ return true
+ }
+ // match: (Mod16u <t> n (Const16 [c]))
+ // cond: isPowerOfTwo16(c)
+ // result: (And16 n (Const16 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod16u <t> x (Const16 [c]))
+ // cond: x.Op != OpConst16 && c > 0 && umagicOK16(c)
+ // result: (Sub16 x (Mul16 <t> (Div16u <t> x (Const16 <t> [c])) (Const16 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(x.Op != OpConst16 && c > 0 && umagicOK16(c)) {
+ break
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpMul16, t)
+ v1 := b.NewValue0(v.Pos, OpDiv16u, t)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod32 (Const32 [c]) (Const32 [d]))
+ // cond: d != 0
+ // result: (Const32 [c % d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c % d)
+ return true
+ }
+ // match: (Mod32 <t> n (Const32 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo32(c)
+ // result: (And32 n (Const32 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod32 <t> n (Const32 [c]))
+ // cond: c < 0 && c != -1<<31
+ // result: (Mod32 <t> n (Const32 <t> [-c]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c < 0 && c != -1<<31) {
+ break
+ }
+ v.reset(OpMod32)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(-c)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod32 <t> x (Const32 [c]))
+ // cond: x.Op != OpConst32 && (c > 0 || c == -1<<31)
+ // result: (Sub32 x (Mul32 <t> (Div32 <t> x (Const32 <t> [c])) (Const32 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(x.Op != OpConst32 && (c > 0 || c == -1<<31)) {
+ break
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpMul32, t)
+ v1 := b.NewValue0(v.Pos, OpDiv32, t)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod32u (Const32 [c]) (Const32 [d]))
+ // cond: d != 0
+ // result: (Const32 [int32(uint32(c) % uint32(d))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) % uint32(d)))
+ return true
+ }
+ // match: (Mod32u <t> n (Const32 [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (And32 n (Const32 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod32u <t> x (Const32 [c]))
+ // cond: x.Op != OpConst32 && c > 0 && umagicOK32(c)
+ // result: (Sub32 x (Mul32 <t> (Div32u <t> x (Const32 <t> [c])) (Const32 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(x.Op != OpConst32 && c > 0 && umagicOK32(c)) {
+ break
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpMul32, t)
+ v1 := b.NewValue0(v.Pos, OpDiv32u, t)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod64 (Const64 [c]) (Const64 [d]))
+ // cond: d != 0
+ // result: (Const64 [c % d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c % d)
+ return true
+ }
+ // match: (Mod64 <t> n (Const64 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo64(c)
+ // result: (And64 n (Const64 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod64 n (Const64 [-1<<63]))
+ // cond: isNonNegative(n)
+ // result: n
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 || !(isNonNegative(n)) {
+ break
+ }
+ v.copyOf(n)
+ return true
+ }
+ // match: (Mod64 <t> n (Const64 [c]))
+ // cond: c < 0 && c != -1<<63
+ // result: (Mod64 <t> n (Const64 <t> [-c]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c < 0 && c != -1<<63) {
+ break
+ }
+ v.reset(OpMod64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(-c)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod64 <t> x (Const64 [c]))
+ // cond: x.Op != OpConst64 && (c > 0 || c == -1<<63)
+ // result: (Sub64 x (Mul64 <t> (Div64 <t> x (Const64 <t> [c])) (Const64 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(x.Op != OpConst64 && (c > 0 || c == -1<<63)) {
+ break
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpMul64, t)
+ v1 := b.NewValue0(v.Pos, OpDiv64, t)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod64u (Const64 [c]) (Const64 [d]))
+ // cond: d != 0
+ // result: (Const64 [int64(uint64(c) % uint64(d))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d)))
+ return true
+ }
+ // match: (Mod64u <t> n (Const64 [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (And64 n (Const64 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod64u <t> n (Const64 [-1<<63]))
+ // result: (And64 n (Const64 <t> [1<<63-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 {
+ break
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(1<<63 - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod64u <t> x (Const64 [c]))
+ // cond: x.Op != OpConst64 && c > 0 && umagicOK64(c)
+ // result: (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(x.Op != OpConst64 && c > 0 && umagicOK64(c)) {
+ break
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpMul64, t)
+ v1 := b.NewValue0(v.Pos, OpDiv64u, t)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod8 (Const8 [c]) (Const8 [d]))
+ // cond: d != 0
+ // result: (Const8 [c % d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c % d)
+ return true
+ }
+ // match: (Mod8 <t> n (Const8 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo8(c)
+ // result: (And8 n (Const8 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod8 <t> n (Const8 [c]))
+ // cond: c < 0 && c != -1<<7
+ // result: (Mod8 <t> n (Const8 <t> [-c]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(c < 0 && c != -1<<7) {
+ break
+ }
+ v.reset(OpMod8)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(-c)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod8 <t> x (Const8 [c]))
+ // cond: x.Op != OpConst8 && (c > 0 || c == -1<<7)
+ // result: (Sub8 x (Mul8 <t> (Div8 <t> x (Const8 <t> [c])) (Const8 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(x.Op != OpConst8 && (c > 0 || c == -1<<7)) {
+ break
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpMul8, t)
+ v1 := b.NewValue0(v.Pos, OpDiv8, t)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod8u (Const8 [c]) (Const8 [d]))
+ // cond: d != 0
+ // result: (Const8 [int8(uint8(c) % uint8(d))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(uint8(c) % uint8(d)))
+ return true
+ }
+ // match: (Mod8u <t> n (Const8 [c]))
+ // cond: isPowerOfTwo8(c)
+ // result: (And8 n (Const8 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod8u <t> x (Const8 [c]))
+ // cond: x.Op != OpConst8 && c > 0 && umagicOK8( c)
+ // result: (Sub8 x (Mul8 <t> (Div8u <t> x (Const8 <t> [c])) (Const8 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(x.Op != OpConst8 && c > 0 && umagicOK8(c)) {
+ break
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpMul8, t)
+ v1 := b.NewValue0(v.Pos, OpDiv8u, t)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _))
+ // cond: isSamePtr(src, dst2)
+ // result: (Zero {t} [n] dst1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src := v_1
+ mem := v_2
+ if mem.Op != OpZero || auxIntToInt64(mem.AuxInt) != n || auxToType(mem.Aux) != t {
+ break
+ }
+ dst2 := mem.Args[0]
+ if !(isSamePtr(src, dst2)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg2(dst1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _)))
+ // cond: isSamePtr(src, dst0)
+ // result: (Zero {t} [n] dst1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpZero || auxIntToInt64(mem_0.AuxInt) != n || auxToType(mem_0.Aux) != t {
+ break
+ }
+ dst0 := mem_0.Args[0]
+ if !(isSamePtr(src, dst0)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg2(dst1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst (Addr {sym} (SB)) mem)
+ // cond: symIsROZero(sym)
+ // result: (Zero {t} [n] dst mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpAddr {
+ break
+ }
+ sym := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ mem := v_2
+ if !(symIsROZero(sym)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg2(dst, mem)
+ return true
+ }
+ // match: (Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem))
+ // cond: isSamePtr(dst1, dst2) && store.Uses == 1 && n >= o2 + t2.Size() && disjoint(src1, n, op, t2.Size()) && clobber(store)
+ // result: (Move {t1} [n] dst1 src1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ store := v_2
+ if store.Op != OpStore {
+ break
+ }
+ t2 := auxToType(store.Aux)
+ mem := store.Args[2]
+ op := store.Args[0]
+ if op.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op.AuxInt)
+ dst2 := op.Args[0]
+ if !(isSamePtr(dst1, dst2) && store.Uses == 1 && n >= o2+t2.Size() && disjoint(src1, n, op, t2.Size()) && clobber(store)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t1)
+ v.AddArg3(dst1, src1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem))
+ // cond: move.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move)
+ // result: (Move {t} [n] dst1 src1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ move := v_2
+ if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t {
+ break
+ }
+ mem := move.Args[2]
+ dst2 := move.Args[0]
+ if !(move.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg3(dst1, src1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
+ // cond: move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move, vardef)
+ // result: (Move {t} [n] dst1 src1 (VarDef {x} mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ vardef := v_2
+ if vardef.Op != OpVarDef {
+ break
+ }
+ x := auxToSym(vardef.Aux)
+ move := vardef.Args[0]
+ if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t {
+ break
+ }
+ mem := move.Args[2]
+ dst2 := move.Args[0]
+ if !(move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move, vardef)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem)
+ v0.Aux = symToAux(x)
+ v0.AddArg(mem)
+ v.AddArg3(dst1, src1, v0)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem))
+ // cond: zero.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero)
+ // result: (Move {t} [n] dst1 src1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ zero := v_2
+ if zero.Op != OpZero || auxIntToInt64(zero.AuxInt) != n || auxToType(zero.Aux) != t {
+ break
+ }
+ mem := zero.Args[1]
+ dst2 := zero.Args[0]
+ if !(zero.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg3(dst1, src1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem)))
+ // cond: zero.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero, vardef)
+ // result: (Move {t} [n] dst1 src1 (VarDef {x} mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ vardef := v_2
+ if vardef.Op != OpVarDef {
+ break
+ }
+ x := auxToSym(vardef.Aux)
+ zero := vardef.Args[0]
+ if zero.Op != OpZero || auxIntToInt64(zero.AuxInt) != n || auxToType(zero.Aux) != t {
+ break
+ }
+ mem := zero.Args[1]
+ dst2 := zero.Args[0]
+ if !(zero.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero, vardef)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem)
+ v0.Aux = symToAux(x)
+ v0.AddArg(mem)
+ v.AddArg3(dst1, src1, v0)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _)))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size() + t3.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ op2 := mem.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ d2 := mem_2.Args[1]
+ op3 := mem_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ if auxIntToInt64(op3.AuxInt) != 0 {
+ break
+ }
+ p3 := op3.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size()+t3.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(0)
+ v2.AddArg(dst)
+ v1.AddArg3(v2, d2, mem)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ op2 := mem.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ op3 := mem_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ d3 := mem_2_2.Args[1]
+ op4 := mem_2_2.Args[0]
+ if op4.Op != OpOffPtr {
+ break
+ }
+ tt4 := op4.Type
+ if auxIntToInt64(op4.AuxInt) != 0 {
+ break
+ }
+ p4 := op4.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(0)
+ v4.AddArg(dst)
+ v3.AddArg3(v4, d3, mem)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3 (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _)))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ op2 := mem.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ op3 := mem_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ op4 := mem_2_2.Args[0]
+ if op4.Op != OpOffPtr {
+ break
+ }
+ tt4 := op4.Type
+ o4 := auxIntToInt64(op4.AuxInt)
+ p4 := op4.Args[0]
+ d3 := mem_2_2.Args[1]
+ mem_2_2_2 := mem_2_2.Args[2]
+ if mem_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_2_2_2.Aux)
+ d4 := mem_2_2_2.Args[1]
+ op5 := mem_2_2_2.Args[0]
+ if op5.Op != OpOffPtr {
+ break
+ }
+ tt5 := op5.Type
+ if auxIntToInt64(op5.AuxInt) != 0 {
+ break
+ }
+ p5 := op5.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()+t5.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t5)
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
+ v6.AuxInt = int64ToAuxInt(0)
+ v6.AddArg(dst)
+ v5.AddArg3(v6, d4, mem)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size() + t3.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ op2 := mem_0.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ d2 := mem_0_2.Args[1]
+ op3 := mem_0_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ if auxIntToInt64(op3.AuxInt) != 0 {
+ break
+ }
+ p3 := op3.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size()+t3.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(0)
+ v2.AddArg(dst)
+ v1.AddArg3(v2, d2, mem)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _)))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ op2 := mem_0.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ op3 := mem_0_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ d3 := mem_0_2_2.Args[1]
+ op4 := mem_0_2_2.Args[0]
+ if op4.Op != OpOffPtr {
+ break
+ }
+ tt4 := op4.Type
+ if auxIntToInt64(op4.AuxInt) != 0 {
+ break
+ }
+ p4 := op4.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(0)
+ v4.AddArg(dst)
+ v3.AddArg3(v4, d3, mem)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3 (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _))))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ op2 := mem_0.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ op3 := mem_0_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ _ = mem_0_2_2.Args[2]
+ op4 := mem_0_2_2.Args[0]
+ if op4.Op != OpOffPtr {
+ break
+ }
+ tt4 := op4.Type
+ o4 := auxIntToInt64(op4.AuxInt)
+ p4 := op4.Args[0]
+ d3 := mem_0_2_2.Args[1]
+ mem_0_2_2_2 := mem_0_2_2.Args[2]
+ if mem_0_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_0_2_2_2.Aux)
+ d4 := mem_0_2_2_2.Args[1]
+ op5 := mem_0_2_2_2.Args[0]
+ if op5.Op != OpOffPtr {
+ break
+ }
+ tt5 := op5.Type
+ if auxIntToInt64(op5.AuxInt) != 0 {
+ break
+ }
+ p5 := op5.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()+t5.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t5)
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
+ v6.AuxInt = int64ToAuxInt(0)
+ v6.AddArg(dst)
+ v5.AddArg3(v6, d4, mem)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Zero {t3} [n] p3 _)))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2 + t2.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Zero {t1} [n] dst mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ op2 := mem.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpZero || auxIntToInt64(mem_2.AuxInt) != n {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ p3 := mem_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2+t2.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(n)
+ v1.Aux = typeToAux(t1)
+ v1.AddArg2(dst, mem)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Zero {t4} [n] p4 _))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2 + t2.Size() && n >= o3 + t3.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Zero {t1} [n] dst mem)))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0.Type
+ o2 := auxIntToInt64(mem_0.AuxInt)
+ p2 := mem_0.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ mem_2_0 := mem_2.Args[0]
+ if mem_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_2_0.Type
+ o3 := auxIntToInt64(mem_2_0.AuxInt)
+ p3 := mem_2_0.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpZero || auxIntToInt64(mem_2_2.AuxInt) != n {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ p4 := mem_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2+t2.Size() && n >= o3+t3.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v3.AuxInt = int64ToAuxInt(n)
+ v3.Aux = typeToAux(t1)
+ v3.AddArg2(dst, mem)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Zero {t5} [n] p5 _)))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Zero {t1} [n] dst mem))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0.Type
+ o2 := auxIntToInt64(mem_0.AuxInt)
+ p2 := mem_0.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ mem_2_0 := mem_2.Args[0]
+ if mem_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_2_0.Type
+ o3 := auxIntToInt64(mem_2_0.AuxInt)
+ p3 := mem_2_0.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ mem_2_2_0 := mem_2_2.Args[0]
+ if mem_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt4 := mem_2_2_0.Type
+ o4 := auxIntToInt64(mem_2_2_0.AuxInt)
+ p4 := mem_2_2_0.Args[0]
+ d3 := mem_2_2.Args[1]
+ mem_2_2_2 := mem_2_2.Args[2]
+ if mem_2_2_2.Op != OpZero || auxIntToInt64(mem_2_2_2.AuxInt) != n {
+ break
+ }
+ t5 := auxToType(mem_2_2_2.Aux)
+ p5 := mem_2_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v5.AuxInt = int64ToAuxInt(n)
+ v5.Aux = typeToAux(t1)
+ v5.AddArg2(dst, mem)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Store {t5} (OffPtr <tt5> [o5] p5) d4 (Zero {t6} [n] p6 _))))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() && n >= o5 + t5.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [o5] dst) d4 (Zero {t1} [n] dst mem)))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0.Type
+ o2 := auxIntToInt64(mem_0.AuxInt)
+ p2 := mem_0.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ mem_2_0 := mem_2.Args[0]
+ if mem_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_2_0.Type
+ o3 := auxIntToInt64(mem_2_0.AuxInt)
+ p3 := mem_2_0.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ mem_2_2_0 := mem_2_2.Args[0]
+ if mem_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt4 := mem_2_2_0.Type
+ o4 := auxIntToInt64(mem_2_2_0.AuxInt)
+ p4 := mem_2_2_0.Args[0]
+ d3 := mem_2_2.Args[1]
+ mem_2_2_2 := mem_2_2.Args[2]
+ if mem_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_2_2_2.Aux)
+ _ = mem_2_2_2.Args[2]
+ mem_2_2_2_0 := mem_2_2_2.Args[0]
+ if mem_2_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt5 := mem_2_2_2_0.Type
+ o5 := auxIntToInt64(mem_2_2_2_0.AuxInt)
+ p5 := mem_2_2_2_0.Args[0]
+ d4 := mem_2_2_2.Args[1]
+ mem_2_2_2_2 := mem_2_2_2.Args[2]
+ if mem_2_2_2_2.Op != OpZero || auxIntToInt64(mem_2_2_2_2.AuxInt) != n {
+ break
+ }
+ t6 := auxToType(mem_2_2_2_2.Aux)
+ p6 := mem_2_2_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size() && n >= o5+t5.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t5)
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
+ v6.AuxInt = int64ToAuxInt(o5)
+ v6.AddArg(dst)
+ v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v7.AuxInt = int64ToAuxInt(n)
+ v7.Aux = typeToAux(t1)
+ v7.AddArg2(dst, mem)
+ v5.AddArg3(v6, d4, v7)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Zero {t3} [n] p3 _))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2 + t2.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Zero {t1} [n] dst mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ op2 := mem_0.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpZero || auxIntToInt64(mem_0_2.AuxInt) != n {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ p3 := mem_0_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2+t2.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(n)
+ v1.Aux = typeToAux(t1)
+ v1.AddArg2(dst, mem)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Zero {t4} [n] p4 _)))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2 + t2.Size() && n >= o3 + t3.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Zero {t1} [n] dst mem)))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ mem_0_0 := mem_0.Args[0]
+ if mem_0_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0_0.Type
+ o2 := auxIntToInt64(mem_0_0.AuxInt)
+ p2 := mem_0_0.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ mem_0_2_0 := mem_0_2.Args[0]
+ if mem_0_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_0_2_0.Type
+ o3 := auxIntToInt64(mem_0_2_0.AuxInt)
+ p3 := mem_0_2_0.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpZero || auxIntToInt64(mem_0_2_2.AuxInt) != n {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ p4 := mem_0_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2+t2.Size() && n >= o3+t3.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v3.AuxInt = int64ToAuxInt(n)
+ v3.Aux = typeToAux(t1)
+ v3.AddArg2(dst, mem)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Zero {t5} [n] p5 _))))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Zero {t1} [n] dst mem))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ mem_0_0 := mem_0.Args[0]
+ if mem_0_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0_0.Type
+ o2 := auxIntToInt64(mem_0_0.AuxInt)
+ p2 := mem_0_0.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ mem_0_2_0 := mem_0_2.Args[0]
+ if mem_0_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_0_2_0.Type
+ o3 := auxIntToInt64(mem_0_2_0.AuxInt)
+ p3 := mem_0_2_0.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ _ = mem_0_2_2.Args[2]
+ mem_0_2_2_0 := mem_0_2_2.Args[0]
+ if mem_0_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt4 := mem_0_2_2_0.Type
+ o4 := auxIntToInt64(mem_0_2_2_0.AuxInt)
+ p4 := mem_0_2_2_0.Args[0]
+ d3 := mem_0_2_2.Args[1]
+ mem_0_2_2_2 := mem_0_2_2.Args[2]
+ if mem_0_2_2_2.Op != OpZero || auxIntToInt64(mem_0_2_2_2.AuxInt) != n {
+ break
+ }
+ t5 := auxToType(mem_0_2_2_2.Aux)
+ p5 := mem_0_2_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v5.AuxInt = int64ToAuxInt(n)
+ v5.Aux = typeToAux(t1)
+ v5.AddArg2(dst, mem)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Store {t5} (OffPtr <tt5> [o5] p5) d4 (Zero {t6} [n] p6 _)))))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() && n >= o5 + t5.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [o5] dst) d4 (Zero {t1} [n] dst mem)))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ mem_0_0 := mem_0.Args[0]
+ if mem_0_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0_0.Type
+ o2 := auxIntToInt64(mem_0_0.AuxInt)
+ p2 := mem_0_0.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ mem_0_2_0 := mem_0_2.Args[0]
+ if mem_0_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_0_2_0.Type
+ o3 := auxIntToInt64(mem_0_2_0.AuxInt)
+ p3 := mem_0_2_0.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ _ = mem_0_2_2.Args[2]
+ mem_0_2_2_0 := mem_0_2_2.Args[0]
+ if mem_0_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt4 := mem_0_2_2_0.Type
+ o4 := auxIntToInt64(mem_0_2_2_0.AuxInt)
+ p4 := mem_0_2_2_0.Args[0]
+ d3 := mem_0_2_2.Args[1]
+ mem_0_2_2_2 := mem_0_2_2.Args[2]
+ if mem_0_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_0_2_2_2.Aux)
+ _ = mem_0_2_2_2.Args[2]
+ mem_0_2_2_2_0 := mem_0_2_2_2.Args[0]
+ if mem_0_2_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt5 := mem_0_2_2_2_0.Type
+ o5 := auxIntToInt64(mem_0_2_2_2_0.AuxInt)
+ p5 := mem_0_2_2_2_0.Args[0]
+ d4 := mem_0_2_2_2.Args[1]
+ mem_0_2_2_2_2 := mem_0_2_2_2.Args[2]
+ if mem_0_2_2_2_2.Op != OpZero || auxIntToInt64(mem_0_2_2_2_2.AuxInt) != n {
+ break
+ }
+ t6 := auxToType(mem_0_2_2_2_2.Aux)
+ p6 := mem_0_2_2_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size() && n >= o5+t5.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t5)
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
+ v6.AuxInt = int64ToAuxInt(o5)
+ v6.AddArg(dst)
+ v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v7.AuxInt = int64ToAuxInt(n)
+ v7.Aux = typeToAux(t1)
+ v7.AddArg2(dst, mem)
+ v5.AddArg3(v6, d4, v7)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _))
+ // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ // result: (Move {t1} [s] dst src midmem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ tmp1 := v_1
+ midmem := v_2
+ if midmem.Op != OpMove || auxIntToInt64(midmem.AuxInt) != s {
+ break
+ }
+ t2 := auxToType(midmem.Aux)
+ src := midmem.Args[1]
+ tmp2 := midmem.Args[0]
+ if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.Aux = typeToAux(t1)
+ v.AddArg3(dst, src, midmem)
+ return true
+ }
+ // match: (Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _)))
+ // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ // result: (Move {t1} [s] dst src midmem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ tmp1 := v_1
+ midmem := v_2
+ if midmem.Op != OpVarDef {
+ break
+ }
+ midmem_0 := midmem.Args[0]
+ if midmem_0.Op != OpMove || auxIntToInt64(midmem_0.AuxInt) != s {
+ break
+ }
+ t2 := auxToType(midmem_0.Aux)
+ src := midmem_0.Args[1]
+ tmp2 := midmem_0.Args[0]
+ if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.Aux = typeToAux(t1)
+ v.AddArg3(dst, src, midmem)
+ return true
+ }
+ // match: (Move dst src mem)
+ // cond: isSamePtr(dst, src)
+ // result: mem
+ for {
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(isSamePtr(dst, src)) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 (Const16 [1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 (Const16 [-1]) x)
+ // result: (Neg16 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpNeg16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 <t> n (Const16 [c]))
+ // cond: isPowerOfTwo16(c)
+ // result: (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(c)]))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isPowerOfTwo16(c)) {
+ continue
+ }
+ v.reset(OpLsh16x64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log16(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 <t> n (Const16 [c]))
+ // cond: t.IsSigned() && isPowerOfTwo16(-c)
+ // result: (Neg16 (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(-c)])))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(t.IsSigned() && isPowerOfTwo16(-c)) {
+ continue
+ }
+ v.reset(OpNeg16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(log16(-c))
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 (Mul16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Mul16 i (Mul16 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpMul16)
+ v0 := b.NewValue0(v.Pos, OpMul16, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul16 (Const16 <t> [c]) (Mul16 (Const16 <t> [d]) x))
+ // result: (Mul16 (Const16 <t> [c*d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpMul16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c * d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 (Const32 [1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 (Const32 [-1]) x)
+ // result: (Neg32 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpNeg32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 <t> n (Const32 [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(c)]))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ continue
+ }
+ v.reset(OpLsh32x64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log32(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 <t> n (Const32 [c]))
+ // cond: t.IsSigned() && isPowerOfTwo32(-c)
+ // result: (Neg32 (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(-c)])))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(t.IsSigned() && isPowerOfTwo32(-c)) {
+ continue
+ }
+ v.reset(OpNeg32)
+ v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(log32(-c))
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x))
+ // result: (Add32 (Const32 <t> [c*d]) (Mul32 <t> (Const32 <t> [c]) x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 || v_1.Type != t {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c * d)
+ v1 := b.NewValue0(v.Pos, OpMul32, t)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(c)
+ v1.AddArg2(v2, x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 (Mul32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Mul32 i (Mul32 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpMul32)
+ v0 := b.NewValue0(v.Pos, OpMul32, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul32 (Const32 <t> [c]) (Mul32 (Const32 <t> [d]) x))
+ // result: (Mul32 (Const32 <t> [c*d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpMul32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mul32F (Const32F [c]) (Const32F [d]))
+ // cond: c*d == c*d
+ // result: (Const32F [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32F {
+ continue
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ continue
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ if !(c*d == c*d) {
+ continue
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul32F x (Const32F [1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst32F || auxIntToFloat32(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul32F x (Const32F [-1]))
+ // result: (Neg32F x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst32F || auxIntToFloat32(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpNeg32F)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul32F x (Const32F [2]))
+ // result: (Add32F x x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst32F || auxIntToFloat32(v_1.AuxInt) != 2 {
+ continue
+ }
+ v.reset(OpAdd32F)
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 (Const64 [1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 (Const64 [-1]) x)
+ // result: (Neg64 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpNeg64)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 <t> n (Const64 [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(c)]))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpLsh64x64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 <t> n (Const64 [c]))
+ // cond: t.IsSigned() && isPowerOfTwo64(-c)
+ // result: (Neg64 (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(-c)])))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(t.IsSigned() && isPowerOfTwo64(-c)) {
+ continue
+ }
+ v.reset(OpNeg64)
+ v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(log64(-c))
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x))
+ // result: (Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 || v_1.Type != t {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c * d)
+ v1 := b.NewValue0(v.Pos, OpMul64, t)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(c)
+ v1.AddArg2(v2, x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 (Mul64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Mul64 i (Mul64 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpMul64)
+ v0 := b.NewValue0(v.Pos, OpMul64, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul64 (Const64 <t> [c]) (Mul64 (Const64 <t> [d]) x))
+ // result: (Mul64 (Const64 <t> [c*d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpMul64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c * d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mul64F (Const64F [c]) (Const64F [d]))
+ // cond: c*d == c*d
+ // result: (Const64F [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64F {
+ continue
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ continue
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ if !(c*d == c*d) {
+ continue
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul64F x (Const64F [1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst64F || auxIntToFloat64(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul64F x (Const64F [-1]))
+ // result: (Neg64F x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst64F || auxIntToFloat64(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpNeg64F)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul64F x (Const64F [2]))
+ // result: (Add64F x x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst64F || auxIntToFloat64(v_1.AuxInt) != 2 {
+ continue
+ }
+ v.reset(OpAdd64F)
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 (Const8 [1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 (Const8 [-1]) x)
+ // result: (Neg8 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpNeg8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 <t> n (Const8 [c]))
+ // cond: isPowerOfTwo8(c)
+ // result: (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(c)]))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isPowerOfTwo8(c)) {
+ continue
+ }
+ v.reset(OpLsh8x64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log8(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 <t> n (Const8 [c]))
+ // cond: t.IsSigned() && isPowerOfTwo8(-c)
+ // result: (Neg8 (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(-c)])))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(t.IsSigned() && isPowerOfTwo8(-c)) {
+ continue
+ }
+ v.reset(OpNeg8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(log8(-c))
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 (Mul8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Mul8 i (Mul8 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpMul8)
+ v0 := b.NewValue0(v.Pos, OpMul8, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul8 (Const8 <t> [c]) (Mul8 (Const8 <t> [d]) x))
+ // result: (Mul8 (Const8 <t> [c*d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpMul8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c * d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg16 (Const16 [c]))
+ // result: (Const16 [-c])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-c)
+ return true
+ }
+ // match: (Neg16 (Sub16 x y))
+ // result: (Sub16 y x)
+ for {
+ if v_0.Op != OpSub16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSub16)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Neg16 (Neg16 x))
+ // result: x
+ for {
+ if v_0.Op != OpNeg16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Neg16 <t> (Com16 x))
+ // result: (Add16 (Const16 <t> [1]) x)
+ for {
+ t := v.Type
+ if v_0.Op != OpCom16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg32 (Const32 [c]))
+ // result: (Const32 [-c])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-c)
+ return true
+ }
+ // match: (Neg32 (Sub32 x y))
+ // result: (Sub32 y x)
+ for {
+ if v_0.Op != OpSub32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSub32)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Neg32 (Neg32 x))
+ // result: x
+ for {
+ if v_0.Op != OpNeg32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Neg32 <t> (Com32 x))
+ // result: (Add32 (Const32 <t> [1]) x)
+ for {
+ t := v.Type
+ if v_0.Op != OpCom32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg32F (Const32F [c]))
+ // cond: c != 0
+ // result: (Const32F [-c])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg64 (Const64 [c]))
+ // result: (Const64 [-c])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ // match: (Neg64 (Sub64 x y))
+ // result: (Sub64 y x)
+ for {
+ if v_0.Op != OpSub64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSub64)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Neg64 (Neg64 x))
+ // result: x
+ for {
+ if v_0.Op != OpNeg64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Neg64 <t> (Com64 x))
+ // result: (Add64 (Const64 <t> [1]) x)
+ for {
+ t := v.Type
+ if v_0.Op != OpCom64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg64F (Const64F [c]))
+ // cond: c != 0
+ // result: (Const64F [-c])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg8 (Const8 [c]))
+ // result: (Const8 [-c])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-c)
+ return true
+ }
+ // match: (Neg8 (Sub8 x y))
+ // result: (Sub8 y x)
+ for {
+ if v_0.Op != OpSub8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSub8)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Neg8 (Neg8 x))
+ // result: x
+ for {
+ if v_0.Op != OpNeg8 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Neg8 <t> (Com8 x))
+ // result: (Add8 (Const8 <t> [1]) x)
+ for {
+ t := v.Type
+ if v_0.Op != OpCom8 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
+ // result: (Neq16 (Const16 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq16 (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (Neq16 n (Lsh16x64 (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 15 && kbar == 16 - k
+ // result: (Neq16 (And16 <t> n (Const16 <t> [1<<uint(k)-1])) (Const16 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh16x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd16 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh16x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 15 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 15 && kbar == 16-k) {
+ continue
+ }
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq16 s:(Sub16 x y) (Const16 [0]))
+ // cond: s.Uses == 1
+ // result: (Neq16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub16 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpNeq16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y]))
+ // cond: oneBit16(y)
+ // result: (Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd16 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst16 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || v_1.Type != t || auxIntToInt16(v_1.AuxInt) != y || !(oneBit16(y)) {
+ continue
+ }
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Neq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
+ // result: (Neq32 (Const32 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq32 (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (Neq32 n (Lsh32x64 (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 31 && kbar == 32 - k
+ // result: (Neq32 (And32 <t> n (Const32 <t> [1<<uint(k)-1])) (Const32 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd32 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh32x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 31 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 31 && kbar == 32-k) {
+ continue
+ }
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq32 s:(Sub32 x y) (Const32 [0]))
+ // cond: s.Uses == 1
+ // result: (Neq32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub32 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpNeq32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y]))
+ // cond: oneBit32(y)
+ // result: (Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd32 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst32 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt32(v_0_1.AuxInt)
+ if v_1.Op != OpConst32 || v_1.Type != t || auxIntToInt32(v_1.AuxInt) != y || !(oneBit32(y)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Neq32F (Const32F [c]) (Const32F [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32F {
+ continue
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ continue
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Neq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
+ // result: (Neq64 (Const64 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq64 (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (Neq64 n (Lsh64x64 (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 63 && kbar == 64 - k
+ // result: (Neq64 (And64 <t> n (Const64 <t> [1<<uint(k)-1])) (Const64 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd64 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh64x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 63 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 63 && kbar == 64-k) {
+ continue
+ }
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq64 s:(Sub64 x y) (Const64 [0]))
+ // cond: s.Uses == 1
+ // result: (Neq64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub64 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpNeq64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y]))
+ // cond: oneBit64(y)
+ // result: (Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd64 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst64 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 || v_1.Type != t || auxIntToInt64(v_1.AuxInt) != y || !(oneBit64(y)) {
+ continue
+ }
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Neq64F (Const64F [c]) (Const64F [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64F {
+ continue
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ continue
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
+ // result: (Neq8 (Const8 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq8 (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (Neq8 n (Lsh8x64 (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 7 && kbar == 8 - k
+ // result: (Neq8 (And8 <t> n (Const8 <t> [1<<uint(k)-1])) (Const8 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh8x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh8x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd8 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh8x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 7 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 7 && kbar == 8-k) {
+ continue
+ }
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq8 s:(Sub8 x y) (Const8 [0]))
+ // cond: s.Uses == 1
+ // result: (Neq8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub8 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpNeq8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y]))
+ // cond: oneBit8(y)
+ // result: (Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd8 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst8 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || v_1.Type != t || auxIntToInt8(v_1.AuxInt) != y || !(oneBit8(y)) {
+ continue
+ }
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NeqB (ConstBool [c]) (ConstBool [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool {
+ continue
+ }
+ c := auxIntToBool(v_0.AuxInt)
+ if v_1.Op != OpConstBool {
+ continue
+ }
+ d := auxIntToBool(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (NeqB (ConstBool [false]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != false {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (NeqB (ConstBool [true]) x)
+ // result: (Not x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != true {
+ continue
+ }
+ x := v_1
+ v.reset(OpNot)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (NeqB (Not x) (Not y))
+ // result: (NeqB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNot {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpNot {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpNeqB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeqInter(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqInter x y)
+ // result: (NeqPtr (ITab x) (ITab y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNeqPtr)
+ v0 := b.NewValue0(v.Pos, OpITab, typ.Uintptr)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpITab, typ.Uintptr)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NeqPtr x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (NeqPtr (Addr {a} _) (Addr {b} _))
+ // result: (ConstBool [a != b])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddr {
+ continue
+ }
+ a := auxToSym(v_0.Aux)
+ if v_1.Op != OpAddr {
+ continue
+ }
+ b := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(a != b)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Addr {a} _) (OffPtr [o] (Addr {b} _)))
+ // result: (ConstBool [a != b || o != 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddr {
+ continue
+ }
+ a := auxToSym(v_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ b := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(a != b || o != 0)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr [o1] (Addr {a} _)) (OffPtr [o2] (Addr {b} _)))
+ // result: (ConstBool [a != b || o1 != o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ continue
+ }
+ a := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ b := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(a != b || o1 != o2)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (LocalAddr {a} _ _) (LocalAddr {b} _ _))
+ // result: (ConstBool [a != b])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr {
+ continue
+ }
+ a := auxToSym(v_0.Aux)
+ if v_1.Op != OpLocalAddr {
+ continue
+ }
+ b := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(a != b)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (LocalAddr {a} _ _) (OffPtr [o] (LocalAddr {b} _ _)))
+ // result: (ConstBool [a != b || o != 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr {
+ continue
+ }
+ a := auxToSym(v_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpLocalAddr {
+ continue
+ }
+ b := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(a != b || o != 0)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr [o1] (LocalAddr {a} _ _)) (OffPtr [o2] (LocalAddr {b} _ _)))
+ // result: (ConstBool [a != b || o1 != o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr {
+ continue
+ }
+ a := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpLocalAddr {
+ continue
+ }
+ b := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(a != b || o1 != o2)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr [o1] p1) p2)
+ // cond: isSamePtr(p1, p2)
+ // result: (ConstBool [o1 != 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ p2 := v_1
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(o1 != 0)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr [o1] p1) (OffPtr [o2] p2))
+ // cond: isSamePtr(p1, p2)
+ // result: (ConstBool [o1 != o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(o1 != o2)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (LocalAddr _ _) (Addr _))
+ // result: (ConstBool [true])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr || v_1.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr (LocalAddr _ _)) (Addr _))
+ // result: (ConstBool [true])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr || v_1.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (LocalAddr _ _) (OffPtr (Addr _)))
+ // result: (ConstBool [true])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr || v_1.Op != OpOffPtr {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _)))
+ // result: (ConstBool [true])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr || v_1.Op != OpOffPtr {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (AddPtr p1 o1) p2)
+ // cond: isSamePtr(p1, p2)
+ // result: (IsNonNil o1)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddPtr {
+ continue
+ }
+ o1 := v_0.Args[1]
+ p1 := v_0.Args[0]
+ p2 := v_1
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpIsNonNil)
+ v.AddArg(o1)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Const32 [0]) p)
+ // result: (IsNonNil p)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ p := v_1
+ v.reset(OpIsNonNil)
+ v.AddArg(p)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Const64 [0]) p)
+ // result: (IsNonNil p)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ p := v_1
+ v.reset(OpIsNonNil)
+ v.AddArg(p)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (ConstNil) p)
+ // result: (IsNonNil p)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstNil {
+ continue
+ }
+ p := v_1
+ v.reset(OpIsNonNil)
+ v.AddArg(p)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeqSlice(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqSlice x y)
+ // result: (NeqPtr (SlicePtr x) (SlicePtr y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNeqPtr)
+ v0 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpNilCheck(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ fe := b.Func.fe
+ // match: (NilCheck (GetG mem) mem)
+ // result: mem
+ for {
+ if v_0.Op != OpGetG {
+ break
+ }
+ mem := v_0.Args[0]
+ if mem != v_1 {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _)
+ // cond: isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ // result: (Invalid)
+ for {
+ if v_0.Op != OpLoad {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpOffPtr {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpSP {
+ break
+ }
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpStaticCall {
+ break
+ }
+ sym := auxToCall(v_0_1.Aux)
+ if !(isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
+ break
+ }
+ v.reset(OpInvalid)
+ return true
+ }
+ // match: (NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _)
+ // cond: isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ // result: (Invalid)
+ for {
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLoad {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpOffPtr {
+ break
+ }
+ c := auxIntToInt64(v_0_0_0.AuxInt)
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpSP {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpStaticCall {
+ break
+ }
+ sym := auxToCall(v_0_0_1.Aux)
+ if !(isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
+ break
+ }
+ v.reset(OpInvalid)
+ return true
+ }
+ // match: (NilCheck (SelectN [0] call:(StaticLECall _ _)) (SelectN [1] call))
+ // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ // result: (Invalid)
+ for {
+ if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 || v_1.Op != OpSelectN || auxIntToInt64(v_1.AuxInt) != 1 || call != v_1.Args[0] || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
+ break
+ }
+ v.reset(OpInvalid)
+ return true
+ }
+ // match: (NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) (SelectN [1] call))
+ // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ // result: (Invalid)
+ for {
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelectN || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 || v_1.Op != OpSelectN || auxIntToInt64(v_1.AuxInt) != 1 || call != v_1.Args[0] || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
+ break
+ }
+ v.reset(OpInvalid)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not (ConstBool [c]))
+ // result: (ConstBool [!c])
+ for {
+ if v_0.Op != OpConstBool {
+ break
+ }
+ c := auxIntToBool(v_0.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(!c)
+ return true
+ }
+ // match: (Not (Eq64 x y))
+ // result: (Neq64 x y)
+ for {
+ if v_0.Op != OpEq64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq64)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq32 x y))
+ // result: (Neq32 x y)
+ for {
+ if v_0.Op != OpEq32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq32)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq16 x y))
+ // result: (Neq16 x y)
+ for {
+ if v_0.Op != OpEq16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq16)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq8 x y))
+ // result: (Neq8 x y)
+ for {
+ if v_0.Op != OpEq8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq8)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (EqB x y))
+ // result: (NeqB x y)
+ for {
+ if v_0.Op != OpEqB {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeqB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (EqPtr x y))
+ // result: (NeqPtr x y)
+ for {
+ if v_0.Op != OpEqPtr {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeqPtr)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq64F x y))
+ // result: (Neq64F x y)
+ for {
+ if v_0.Op != OpEq64F {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq64F)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq32F x y))
+ // result: (Neq32F x y)
+ for {
+ if v_0.Op != OpEq32F {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq32F)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq64 x y))
+ // result: (Eq64 x y)
+ for {
+ if v_0.Op != OpNeq64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq64)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq32 x y))
+ // result: (Eq32 x y)
+ for {
+ if v_0.Op != OpNeq32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq32)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq16 x y))
+ // result: (Eq16 x y)
+ for {
+ if v_0.Op != OpNeq16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq16)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq8 x y))
+ // result: (Eq8 x y)
+ for {
+ if v_0.Op != OpNeq8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq8)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (NeqB x y))
+ // result: (EqB x y)
+ for {
+ if v_0.Op != OpNeqB {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEqB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (NeqPtr x y))
+ // result: (EqPtr x y)
+ for {
+ if v_0.Op != OpNeqPtr {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEqPtr)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq64F x y))
+ // result: (Eq64F x y)
+ for {
+ if v_0.Op != OpNeq64F {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq64F)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq32F x y))
+ // result: (Eq32F x y)
+ for {
+ if v_0.Op != OpNeq32F {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq32F)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Less64 x y))
+ // result: (Leq64 y x)
+ for {
+ if v_0.Op != OpLess64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq64)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less32 x y))
+ // result: (Leq32 y x)
+ for {
+ if v_0.Op != OpLess32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq32)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less16 x y))
+ // result: (Leq16 y x)
+ for {
+ if v_0.Op != OpLess16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq16)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less8 x y))
+ // result: (Leq8 y x)
+ for {
+ if v_0.Op != OpLess8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq8)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less64U x y))
+ // result: (Leq64U y x)
+ for {
+ if v_0.Op != OpLess64U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq64U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less32U x y))
+ // result: (Leq32U y x)
+ for {
+ if v_0.Op != OpLess32U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq32U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less16U x y))
+ // result: (Leq16U y x)
+ for {
+ if v_0.Op != OpLess16U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq16U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less8U x y))
+ // result: (Leq8U y x)
+ for {
+ if v_0.Op != OpLess8U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq8U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq64 x y))
+ // result: (Less64 y x)
+ for {
+ if v_0.Op != OpLeq64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess64)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq32 x y))
+ // result: (Less32 y x)
+ for {
+ if v_0.Op != OpLeq32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess32)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq16 x y))
+ // result: (Less16 y x)
+ for {
+ if v_0.Op != OpLeq16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess16)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq8 x y))
+ // result: (Less8 y x)
+ for {
+ if v_0.Op != OpLeq8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess8)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq64U x y))
+ // result: (Less64U y x)
+ for {
+ if v_0.Op != OpLeq64U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess64U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq32U x y))
+ // result: (Less32U y x)
+ for {
+ if v_0.Op != OpLeq32U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess32U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq16U x y))
+ // result: (Less16U y x)
+ for {
+ if v_0.Op != OpLeq16U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess16U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq8U x y))
+ // result: (Less8U y x)
+ for {
+ if v_0.Op != OpLeq8U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess8U)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr (OffPtr p [b]) [a])
+ // result: (OffPtr p [a+b])
+ for {
+ a := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ b := auxIntToInt64(v_0.AuxInt)
+ p := v_0.Args[0]
+ v.reset(OpOffPtr)
+ v.AuxInt = int64ToAuxInt(a + b)
+ v.AddArg(p)
+ return true
+ }
+ // match: (OffPtr p [0])
+ // cond: v.Type.Compare(p.Type) == types.CMPeq
+ // result: p
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ p := v_0
+ if !(v.Type.Compare(p.Type) == types.CMPeq) {
+ break
+ }
+ v.copyOf(p)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpOr16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Or16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (Or16 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Or16 (Const16 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Or16 (Const16 [-1]) _)
+ // result: (Const16 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or16 x (Or16 x y))
+ // result: (Or16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpOr16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpOr16)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or16 (And16 x (Const16 [c2])) (Const16 <t> [c1]))
+ // cond: ^(c1 | c2) == 0
+ // result: (Or16 (Const16 <t> [c1]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst16 {
+ continue
+ }
+ c2 := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ t := v_1.Type
+ c1 := auxIntToInt16(v_1.AuxInt)
+ if !(^(c1 | c2) == 0) {
+ continue
+ }
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or16 (Or16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Or16 i (Or16 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOr16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpOr16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or16 (Const16 <t> [c]) (Or16 (Const16 <t> [d]) x))
+ // result: (Or16 (Const16 <t> [c|d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpOr16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c | d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOr32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Or32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (Or32 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Or32 (Const32 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Or32 (Const32 [-1]) _)
+ // result: (Const32 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or32 x (Or32 x y))
+ // result: (Or32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpOr32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpOr32)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or32 (And32 x (Const32 [c2])) (Const32 <t> [c1]))
+ // cond: ^(c1 | c2) == 0
+ // result: (Or32 (Const32 <t> [c1]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst32 {
+ continue
+ }
+ c2 := auxIntToInt32(v_0_1.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ t := v_1.Type
+ c1 := auxIntToInt32(v_1.AuxInt)
+ if !(^(c1 | c2) == 0) {
+ continue
+ }
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or32 (Or32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Or32 i (Or32 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOr32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpOr32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or32 (Const32 <t> [c]) (Or32 (Const32 <t> [d]) x))
+ // result: (Or32 (Const32 <t> [c|d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpOr32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOr64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Or64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (Or64 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Or64 (Const64 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Or64 (Const64 [-1]) _)
+ // result: (Const64 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or64 x (Or64 x y))
+ // result: (Or64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpOr64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpOr64)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or64 (And64 x (Const64 [c2])) (Const64 <t> [c1]))
+ // cond: ^(c1 | c2) == 0
+ // result: (Or64 (Const64 <t> [c1]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst64 {
+ continue
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ t := v_1.Type
+ c1 := auxIntToInt64(v_1.AuxInt)
+ if !(^(c1 | c2) == 0) {
+ continue
+ }
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or64 (Or64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Or64 i (Or64 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOr64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpOr64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or64 (Const64 <t> [c]) (Or64 (Const64 <t> [d]) x))
+ // result: (Or64 (Const64 <t> [c|d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpOr64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c | d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOr8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Or8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (Or8 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Or8 (Const8 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Or8 (Const8 [-1]) _)
+ // result: (Const8 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or8 x (Or8 x y))
+ // result: (Or8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpOr8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpOr8)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or8 (And8 x (Const8 [c2])) (Const8 <t> [c1]))
+ // cond: ^(c1 | c2) == 0
+ // result: (Or8 (Const8 <t> [c1]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst8 {
+ continue
+ }
+ c2 := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ t := v_1.Type
+ c1 := auxIntToInt8(v_1.AuxInt)
+ if !(^(c1 | c2) == 0) {
+ continue
+ }
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or8 (Or8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Or8 i (Or8 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOr8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpOr8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or8 (Const8 <t> [c]) (Or8 (Const8 <t> [d]) x))
+ // result: (Or8 (Const8 <t> [c|d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpOr8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c | d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOrB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (OrB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: c >= d
+ // result: (Less64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: c >= d
+ // result: (Leq64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: c >= d
+ // result: (Less32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: c >= d
+ // result: (Leq32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: c >= d
+ // result: (Less16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: c >= d
+ // result: (Leq16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: c >= d
+ // result: (Less8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: c >= d
+ // result: (Leq8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Less64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Leq64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Less32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Leq32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Less16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Leq16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Less8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Leq8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d)
+ // result: (Less64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d)
+ // result: (Leq64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d)
+ // result: (Less32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d)
+ // result: (Leq32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d)
+ // result: (Less16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d)
+ // result: (Leq16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d)
+ // result: (Less8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d)
+ // result: (Leq8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)
+ // result: (Less64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)
+ // result: (Leq64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)
+ // result: (Less32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)
+ // result: (Leq32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)
+ // result: (Less16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)
+ // result: (Leq16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)
+ // result: (Less8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)
+ // result: (Leq8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpPhi(v *Value) bool {
+ // match: (Phi (Const8 [c]) (Const8 [c]))
+ // result: (Const8 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c)
+ return true
+ }
+ // match: (Phi (Const16 [c]) (Const16 [c]))
+ // result: (Const16 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c)
+ return true
+ }
+ // match: (Phi (Const32 [c]) (Const32 [c]))
+ // result: (Const32 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (Phi (Const64 [c]) (Const64 [c]))
+ // result: (Const64 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpPtrIndex(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (PtrIndex <t> ptr idx)
+ // cond: config.PtrSize == 4 && is32Bit(t.Elem().Size())
+ // result: (AddPtr ptr (Mul32 <typ.Int> idx (Const32 <typ.Int> [int32(t.Elem().Size())])))
+ for {
+ t := v.Type
+ ptr := v_0
+ idx := v_1
+ if !(config.PtrSize == 4 && is32Bit(t.Elem().Size())) {
+ break
+ }
+ v.reset(OpAddPtr)
+ v0 := b.NewValue0(v.Pos, OpMul32, typ.Int)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
+ v1.AuxInt = int32ToAuxInt(int32(t.Elem().Size()))
+ v0.AddArg2(idx, v1)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+ // match: (PtrIndex <t> ptr idx)
+ // cond: config.PtrSize == 8
+ // result: (AddPtr ptr (Mul64 <typ.Int> idx (Const64 <typ.Int> [t.Elem().Size()])))
+ for {
+ t := v.Type
+ ptr := v_0
+ idx := v_1
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpAddPtr)
+ v0 := b.NewValue0(v.Pos, OpMul64, typ.Int)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
+ v1.AuxInt = int64ToAuxInt(t.Elem().Size())
+ v0.AddArg2(idx, v1)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft16 x (Const16 [c]))
+ // cond: c%16 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(c%16 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft32 x (Const32 [c]))
+ // cond: c%32 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%32 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft64 x (Const64 [c]))
+ // cond: c%64 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%64 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft8 x (Const8 [c]))
+ // cond: c%8 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(c%8 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRound32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Round32F x:(Const32F))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpConst32F {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRound64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Round64F x:(Const64F))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpConst64F {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux16 <t> x (Const16 [c]))
+ // result: (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux32 <t> x (Const32 [c]))
+ // result: (Rsh16Ux64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux32 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 (Const16 [c]) (Const64 [d]))
+ // result: (Const16 [int16(uint16(c) >> uint64(d))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(uint16(c) >> uint64(d)))
+ return true
+ }
+ // match: (Rsh16Ux64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh16Ux64 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16Ux64 <t> (Rsh16Ux64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh16Ux64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux64 (Rsh16x64 x _) (Const64 <t> [15]))
+ // result: (Rsh16Ux64 x (Const64 <t> [15]))
+ for {
+ if v_0.Op != OpRsh16x64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 15 {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(15)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Rsh16Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8]))
+ // result: (ZeroExt8to16 (Trunc16to8 <typ.UInt8> x))
+ for {
+ if v_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 8 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ v.reset(OpZeroExt8to16)
+ v0 := b.NewValue0(v.Pos, OpTrunc16to8, typ.UInt8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux8 <t> x (Const8 [c]))
+ // result: (Rsh16Ux64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux8 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x16 <t> x (Const16 [c]))
+ // result: (Rsh16x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x32 <t> x (Const32 [c]))
+ // result: (Rsh16x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x32 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 (Const16 [c]) (Const64 [d]))
+ // result: (Const16 [c >> uint64(d)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c >> uint64(d))
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh16x64 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16x64 <t> (Rsh16x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh16x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8]))
+ // result: (SignExt8to16 (Trunc16to8 <typ.Int8> x))
+ for {
+ if v_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 8 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ v.reset(OpSignExt8to16)
+ v0 := b.NewValue0(v.Pos, OpTrunc16to8, typ.Int8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x8 <t> x (Const8 [c]))
+ // result: (Rsh16x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x8 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux16 <t> x (Const16 [c]))
+ // result: (Rsh32Ux64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux16 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux32 <t> x (Const32 [c]))
+ // result: (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 (Const32 [c]) (Const64 [d]))
+ // result: (Const32 [int32(uint32(c) >> uint64(d))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ return true
+ }
+ // match: (Rsh32Ux64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh32Ux64 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32Ux64 <t> (Rsh32Ux64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh32Ux64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 (Rsh32x64 x _) (Const64 <t> [31]))
+ // result: (Rsh32Ux64 x (Const64 <t> [31]))
+ for {
+ if v_0.Op != OpRsh32x64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 31 {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(31)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Rsh32Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24]))
+ // result: (ZeroExt8to32 (Trunc32to8 <typ.UInt8> x))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 24 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 24 {
+ break
+ }
+ v.reset(OpZeroExt8to32)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to8, typ.UInt8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16]))
+ // result: (ZeroExt16to32 (Trunc32to16 <typ.UInt16> x))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 16 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ v.reset(OpZeroExt16to32)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to16, typ.UInt16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux8 <t> x (Const8 [c]))
+ // result: (Rsh32Ux64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux8 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x16 <t> x (Const16 [c]))
+ // result: (Rsh32x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x16 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 <t> x (Const32 [c]))
+ // result: (Rsh32x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 (Const32 [c]) (Const64 [d]))
+ // result: (Const32 [c >> uint64(d)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh32x64 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32x64 <t> (Rsh32x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh32x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24]))
+ // result: (SignExt8to32 (Trunc32to8 <typ.Int8> x))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 24 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 24 {
+ break
+ }
+ v.reset(OpSignExt8to32)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to8, typ.Int8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16]))
+ // result: (SignExt16to32 (Trunc32to16 <typ.Int16> x))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 16 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ v.reset(OpSignExt16to32)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to16, typ.Int16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x8 <t> x (Const8 [c]))
+ // result: (Rsh32x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x8 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux16 <t> x (Const16 [c]))
+ // result: (Rsh64Ux64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux16 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux32 <t> x (Const32 [c]))
+ // result: (Rsh64Ux64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux32 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [int64(uint64(c) >> uint64(d))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ return true
+ }
+ // match: (Rsh64Ux64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh64Ux64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 64
+ // result: (Const64 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 <t> (Rsh64Ux64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh64Ux64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Rsh64x64 x _) (Const64 <t> [63]))
+ // result: (Rsh64Ux64 x (Const64 <t> [63]))
+ for {
+ if v_0.Op != OpRsh64x64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Rsh64Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56]))
+ // result: (ZeroExt8to64 (Trunc64to8 <typ.UInt8> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 56 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 56 {
+ break
+ }
+ v.reset(OpZeroExt8to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to8, typ.UInt8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48]))
+ // result: (ZeroExt16to64 (Trunc64to16 <typ.UInt16> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 48 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 48 {
+ break
+ }
+ v.reset(OpZeroExt16to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to16, typ.UInt16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32]))
+ // result: (ZeroExt32to64 (Trunc64to32 <typ.UInt32> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 32 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v.reset(OpZeroExt32to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux8 <t> x (Const8 [c]))
+ // result: (Rsh64Ux64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux8 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x16 <t> x (Const16 [c]))
+ // result: (Rsh64x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x16 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x32 <t> x (Const32 [c]))
+ // result: (Rsh64x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x32 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c >> uint64(d)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ return true
+ }
+ // match: (Rsh64x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh64x64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64x64 <t> (Rsh64x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh64x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56]))
+ // result: (SignExt8to64 (Trunc64to8 <typ.Int8> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 56 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 56 {
+ break
+ }
+ v.reset(OpSignExt8to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to8, typ.Int8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48]))
+ // result: (SignExt16to64 (Trunc64to16 <typ.Int16> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 48 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 48 {
+ break
+ }
+ v.reset(OpSignExt16to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to16, typ.Int16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32]))
+ // result: (SignExt32to64 (Trunc64to32 <typ.Int32> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 32 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v.reset(OpSignExt32to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x8 <t> x (Const8 [c]))
+ // result: (Rsh64x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x8 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux16 <t> x (Const16 [c]))
+ // result: (Rsh8Ux64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux16 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux32 <t> x (Const32 [c]))
+ // result: (Rsh8Ux64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux32 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 (Const8 [c]) (Const64 [d]))
+ // result: (Const8 [int8(uint8(c) >> uint64(d))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(uint8(c) >> uint64(d)))
+ return true
+ }
+ // match: (Rsh8Ux64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh8Ux64 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8Ux64 <t> (Rsh8Ux64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh8Ux64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux64 (Rsh8x64 x _) (Const64 <t> [7] ))
+ // result: (Rsh8Ux64 x (Const64 <t> [7] ))
+ for {
+ if v_0.Op != OpRsh8x64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 7 {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(7)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Rsh8Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpLsh8x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux8 <t> x (Const8 [c]))
+ // result: (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x16 <t> x (Const16 [c]))
+ // result: (Rsh8x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x16 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x32 <t> x (Const32 [c]))
+ // result: (Rsh8x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x32 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x64 (Const8 [c]) (Const64 [d]))
+ // result: (Const8 [c >> uint64(d)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c >> uint64(d))
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh8x64 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8x64 <t> (Rsh8x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh8x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh8x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x8 <t> x (Const8 [c]))
+ // result: (Rsh8x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select0 (Div128u (Const64 [0]) lo y))
+ // result: (Div64u lo y)
+ for {
+ if v_0.Op != OpDiv128u {
+ break
+ }
+ y := v_0.Args[2]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpDiv64u)
+ v.AddArg2(lo, y)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select1 (Div128u (Const64 [0]) lo y))
+ // result: (Mod64u lo y)
+ for {
+ if v_0.Op != OpDiv128u {
+ break
+ }
+ y := v_0.Args[2]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpMod64u)
+ v.AddArg2(lo, y)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSelectN(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SelectN [0] (MakeResult a ___))
+ // result: a
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpMakeResult || len(v_0.Args) < 1 {
+ break
+ }
+ a := v_0.Args[0]
+ v.copyOf(a)
+ return true
+ }
+ // match: (SelectN [1] (MakeResult a b ___))
+ // result: b
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpMakeResult || len(v_0.Args) < 2 {
+ break
+ }
+ b := v_0.Args[1]
+ v.copyOf(b)
+ return true
+ }
+ // match: (SelectN [2] (MakeResult a b c ___))
+ // result: c
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpMakeResult || len(v_0.Args) < 3 {
+ break
+ }
+ c := v_0.Args[2]
+ v.copyOf(c)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticLECall {sym} dst src (Const64 [sz]) mem))
+ // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
+ // result: (Move {dst.Type.Elem()} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticLECall || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpConst64 {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(dst.Type.Elem())
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticLECall {sym} dst src (Const32 [sz]) mem))
+ // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
+ // result: (Move {dst.Type.Elem()} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticLECall || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpConst32 {
+ break
+ }
+ sz := auxIntToInt32(call_2.AuxInt)
+ if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(dst.Type.Elem())
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt16to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt16to32 (Const16 [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ // match: (SignExt16to32 (Trunc32to16 x:(Rsh32x64 _ (Const64 [s]))))
+ // cond: s >= 16
+ // result: x
+ for {
+ if v_0.Op != OpTrunc32to16 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh32x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 16) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt16to64 (Const16 [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (SignExt16to64 (Trunc64to16 x:(Rsh64x64 _ (Const64 [s]))))
+ // cond: s >= 48
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to16 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 48) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt32to64 (Const32 [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (SignExt32to64 (Trunc64to32 x:(Rsh64x64 _ (Const64 [s]))))
+ // cond: s >= 32
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to32 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 32) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt8to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt8to16 (Const8 [c]))
+ // result: (Const16 [int16(c)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ return true
+ }
+ // match: (SignExt8to16 (Trunc16to8 x:(Rsh16x64 _ (Const64 [s]))))
+ // cond: s >= 8
+ // result: x
+ for {
+ if v_0.Op != OpTrunc16to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh16x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt8to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt8to32 (Const8 [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ // match: (SignExt8to32 (Trunc32to8 x:(Rsh32x64 _ (Const64 [s]))))
+ // cond: s >= 24
+ // result: x
+ for {
+ if v_0.Op != OpTrunc32to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh32x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 24) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt8to64 (Const8 [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (SignExt8to64 (Trunc64to8 x:(Rsh64x64 _ (Const64 [s]))))
+ // cond: s >= 56
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 56) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSliceCap(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SliceCap (SliceMake _ _ (Const64 <t> [c])))
+ // result: (Const64 <t> [c])
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[2]
+ v_0_2 := v_0.Args[2]
+ if v_0_2.Op != OpConst64 {
+ break
+ }
+ t := v_0_2.Type
+ c := auxIntToInt64(v_0_2.AuxInt)
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ // match: (SliceCap (SliceMake _ _ (Const32 <t> [c])))
+ // result: (Const32 <t> [c])
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[2]
+ v_0_2 := v_0.Args[2]
+ if v_0_2.Op != OpConst32 {
+ break
+ }
+ t := v_0_2.Type
+ c := auxIntToInt32(v_0_2.AuxInt)
+ v.reset(OpConst32)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (SliceCap (SliceMake _ _ (SliceCap x)))
+ // result: (SliceCap x)
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[2]
+ v_0_2 := v_0.Args[2]
+ if v_0_2.Op != OpSliceCap {
+ break
+ }
+ x := v_0_2.Args[0]
+ v.reset(OpSliceCap)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SliceCap (SliceMake _ _ (SliceLen x)))
+ // result: (SliceLen x)
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[2]
+ v_0_2 := v_0.Args[2]
+ if v_0_2.Op != OpSliceLen {
+ break
+ }
+ x := v_0_2.Args[0]
+ v.reset(OpSliceLen)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSliceLen(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SliceLen (SliceMake _ (Const64 <t> [c]) _))
+ // result: (Const64 <t> [c])
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ t := v_0_1.Type
+ c := auxIntToInt64(v_0_1.AuxInt)
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ // match: (SliceLen (SliceMake _ (Const32 <t> [c]) _))
+ // result: (Const32 <t> [c])
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst32 {
+ break
+ }
+ t := v_0_1.Type
+ c := auxIntToInt32(v_0_1.AuxInt)
+ v.reset(OpConst32)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (SliceLen (SliceMake _ (SliceLen x) _))
+ // result: (SliceLen x)
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpSliceLen {
+ break
+ }
+ x := v_0_1.Args[0]
+ v.reset(OpSliceLen)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSlicePtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SlicePtr (SliceMake (SlicePtr x) _ _))
+ // result: (SlicePtr x)
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSlicePtr {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpSlicePtr)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Slicemask (Const32 [x]))
+ // cond: x > 0
+ // result: (Const32 [-1])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (Slicemask (Const32 [0]))
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Slicemask (Const64 [x]))
+ // cond: x > 0
+ // result: (Const64 [-1])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (Slicemask (Const64 [0]))
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSqrt(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Sqrt (Const64F [c]))
+ // cond: !math.IsNaN(math.Sqrt(c))
+ // result: (Const64F [math.Sqrt(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if !(!math.IsNaN(math.Sqrt(c))) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(math.Sqrt(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStaticCall(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (StaticCall {sym} s1:(Store _ (Const64 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem))))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)
+ // result: (Move {t.Elem()} [int64(sz)] dst src mem)
+ for {
+ sym := auxToCall(v.Aux)
+ s1 := v_0
+ if s1.Op != OpStore {
+ break
+ }
+ _ = s1.Args[2]
+ s1_1 := s1.Args[1]
+ if s1_1.Op != OpConst64 {
+ break
+ }
+ sz := auxIntToInt64(s1_1.AuxInt)
+ s2 := s1.Args[2]
+ if s2.Op != OpStore {
+ break
+ }
+ _ = s2.Args[2]
+ src := s2.Args[1]
+ s3 := s2.Args[2]
+ if s3.Op != OpStore {
+ break
+ }
+ t := auxToType(s3.Aux)
+ mem := s3.Args[2]
+ dst := s3.Args[1]
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(t.Elem())
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem))))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)
+ // result: (Move {t.Elem()} [int64(sz)] dst src mem)
+ for {
+ sym := auxToCall(v.Aux)
+ s1 := v_0
+ if s1.Op != OpStore {
+ break
+ }
+ _ = s1.Args[2]
+ s1_1 := s1.Args[1]
+ if s1_1.Op != OpConst32 {
+ break
+ }
+ sz := auxIntToInt32(s1_1.AuxInt)
+ s2 := s1.Args[2]
+ if s2.Op != OpStore {
+ break
+ }
+ _ = s2.Args[2]
+ src := s2.Args[1]
+ s3 := s2.Args[2]
+ if s3.Op != OpStore {
+ break
+ }
+ t := auxToType(s3.Aux)
+ mem := s3.Args[2]
+ dst := s3.Args[1]
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(t.Elem())
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (StaticCall {sym} x)
+ // cond: needRaceCleanup(sym, v)
+ // result: x
+ for {
+ sym := auxToCall(v.Aux)
+ x := v_0
+ if !(needRaceCleanup(sym, v)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStaticLECall(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon)
+ // result: (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ sptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAddr {
+ break
+ }
+ scon := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 1 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpEq8, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
+ v1.AddArg2(sptr, mem)
+ v2 := b.NewValue0(v.Pos, OpConst8, typ.Int8)
+ v2.AuxInt = int8ToAuxInt(int8(read8(scon, 0)))
+ v0.AddArg2(v1, v2)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ fe := b.Func.fe
+ // match: (Store {t1} p1 (Load <t2> p2 mem) mem)
+ // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size()
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ t2 := v_1.Type
+ mem := v_1.Args[1]
+ p2 := v_1.Args[0]
+ if mem != v_2 || !(isSamePtr(p1, p2) && t2.Size() == t1.Size()) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ oldmem))
+ // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ t2 := v_1.Type
+ oldmem := v_1.Args[1]
+ p2 := v_1.Args[0]
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p3 := mem.Args[0]
+ if oldmem != mem.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem)))
+ // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ t2 := v_1.Type
+ oldmem := v_1.Args[1]
+ p2 := v_1.Args[0]
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p3 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ p4 := mem_2.Args[0]
+ if oldmem != mem_2.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem))))
+ // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size()) && disjoint(p1, t1.Size(), p5, t5.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ t2 := v_1.Type
+ oldmem := v_1.Args[1]
+ p2 := v_1.Args[0]
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p3 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ p4 := mem_2.Args[0]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ p5 := mem_2_2.Args[0]
+ if oldmem != mem_2_2.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size()) && disjoint(p1, t1.Size(), p5, t5.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _))
+ // cond: isConstZero(x) && o >= 0 && t.Size() + o <= n && isSamePtr(p1, p2)
+ // result: mem
+ for {
+ t := auxToType(v.Aux)
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ x := v_1
+ mem := v_2
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p2 := mem.Args[0]
+ if !(isConstZero(x) && o >= 0 && t.Size()+o <= n && isSamePtr(p1, p2)) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _)))
+ // cond: isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p3) && disjoint(op, t1.Size(), p2, t2.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ x := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p2 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem_2.AuxInt)
+ p3 := mem_2.Args[0]
+ if !(isConstZero(x) && o1 >= 0 && t1.Size()+o1 <= n && isSamePtr(p1, p3) && disjoint(op, t1.Size(), p2, t2.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _))))
+ // cond: isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p4) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ x := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p2 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ p3 := mem_2.Args[0]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem_2_2.AuxInt)
+ p4 := mem_2_2.Args[0]
+ if !(isConstZero(x) && o1 >= 0 && t1.Size()+o1 <= n && isSamePtr(p1, p4) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _)))))
+ // cond: isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p5) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ x := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p2 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ p3 := mem_2.Args[0]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ p4 := mem_2_2.Args[0]
+ mem_2_2_2 := mem_2_2.Args[2]
+ if mem_2_2_2.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem_2_2_2.AuxInt)
+ p5 := mem_2_2_2.Args[0]
+ if !(isConstZero(x) && o1 >= 0 && t1.Size()+o1 <= n && isSamePtr(p1, p5) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store _ (StructMake0) mem)
+ // result: mem
+ for {
+ if v_1.Op != OpStructMake0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store dst (StructMake1 <t> f0) mem)
+ // result: (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake1 {
+ break
+ }
+ t := v_1.Type
+ f0 := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(0))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v0.AuxInt = int64ToAuxInt(0)
+ v0.AddArg(dst)
+ v.AddArg3(v0, f0, mem)
+ return true
+ }
+ // match: (Store dst (StructMake2 <t> f0 f1) mem)
+ // result: (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake2 {
+ break
+ }
+ t := v_1.Type
+ f1 := v_1.Args[1]
+ f0 := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(1))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(0))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v2.AuxInt = int64ToAuxInt(0)
+ v2.AddArg(dst)
+ v1.AddArg3(v2, f0, mem)
+ v.AddArg3(v0, f1, v1)
+ return true
+ }
+ // match: (Store dst (StructMake3 <t> f0 f1 f2) mem)
+ // result: (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake3 {
+ break
+ }
+ t := v_1.Type
+ f2 := v_1.Args[2]
+ f0 := v_1.Args[0]
+ f1 := v_1.Args[1]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(2))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(1))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v2.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t.FieldType(0))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v4.AuxInt = int64ToAuxInt(0)
+ v4.AddArg(dst)
+ v3.AddArg3(v4, f0, mem)
+ v1.AddArg3(v2, f1, v3)
+ v.AddArg3(v0, f2, v1)
+ return true
+ }
+ // match: (Store dst (StructMake4 <t> f0 f1 f2 f3) mem)
+ // result: (Store {t.FieldType(3)} (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst) f3 (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem))))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake4 {
+ break
+ }
+ t := v_1.Type
+ f3 := v_1.Args[3]
+ f0 := v_1.Args[0]
+ f1 := v_1.Args[1]
+ f2 := v_1.Args[2]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(3))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(3))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(2))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v2.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t.FieldType(1))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v4.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t.FieldType(0))
+ v6 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v6.AuxInt = int64ToAuxInt(0)
+ v6.AddArg(dst)
+ v5.AddArg3(v6, f0, mem)
+ v3.AddArg3(v4, f1, v5)
+ v1.AddArg3(v2, f2, v3)
+ v.AddArg3(v0, f3, v1)
+ return true
+ }
+ // match: (Store {t} dst (Load src mem) mem)
+ // cond: !fe.CanSSA(t)
+ // result: (Move {t} [t.Size()] dst src mem)
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ mem := v_1.Args[1]
+ src := v_1.Args[0]
+ if mem != v_2 || !(!fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(t.Size())
+ v.Aux = typeToAux(t)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Store {t} dst (Load src mem) (VarDef {x} mem))
+ // cond: !fe.CanSSA(t)
+ // result: (Move {t} [t.Size()] dst src (VarDef {x} mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ mem := v_1.Args[1]
+ src := v_1.Args[0]
+ if v_2.Op != OpVarDef {
+ break
+ }
+ x := auxToSym(v_2.Aux)
+ if mem != v_2.Args[0] || !(!fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(t.Size())
+ v.Aux = typeToAux(t)
+ v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem)
+ v0.Aux = symToAux(x)
+ v0.AddArg(mem)
+ v.AddArg3(dst, src, v0)
+ return true
+ }
+ // match: (Store _ (ArrayMake0) mem)
+ // result: mem
+ for {
+ if v_1.Op != OpArrayMake0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store dst (ArrayMake1 e) mem)
+ // result: (Store {e.Type} dst e mem)
+ for {
+ dst := v_0
+ if v_1.Op != OpArrayMake1 {
+ break
+ }
+ e := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(e.Type)
+ v.AddArg3(dst, e, mem)
+ return true
+ }
+ // match: (Store (Load (OffPtr [c] (SP)) mem) x mem)
+ // cond: isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize
+ // result: mem
+ for {
+ if v_0.Op != OpLoad {
+ break
+ }
+ mem := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpOffPtr {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpSP {
+ break
+ }
+ x := v_1
+ if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store (OffPtr (Load (OffPtr [c] (SP)) mem)) x mem)
+ // cond: isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize
+ // result: mem
+ for {
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLoad {
+ break
+ }
+ mem := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpOffPtr {
+ break
+ }
+ c := auxIntToInt64(v_0_0_0.AuxInt)
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpSP {
+ break
+ }
+ x := v_1
+ if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call))
+ // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")
+ // result: mem
+ for {
+ if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ x := v_1
+ mem := v_2
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call))
+ // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")
+ // result: mem
+ for {
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelectN || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ x := v_1
+ mem := v_2
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Move [n] p3 _ mem)))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr || auxIntToInt64(op2.AuxInt) != 0 {
+ break
+ }
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpMove {
+ break
+ }
+ n := auxIntToInt64(m3.AuxInt)
+ mem := m3.Args[2]
+ p3 := m3.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v0.AddArg3(op2, d2, mem)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Move [n] p4 _ mem))))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpStore {
+ break
+ }
+ t3 := auxToType(m3.Aux)
+ _ = m3.Args[2]
+ op3 := m3.Args[0]
+ if op3.Op != OpOffPtr || auxIntToInt64(op3.AuxInt) != 0 {
+ break
+ }
+ p3 := op3.Args[0]
+ d3 := m3.Args[1]
+ m4 := m3.Args[2]
+ if m4.Op != OpMove {
+ break
+ }
+ n := auxIntToInt64(m4.AuxInt)
+ mem := m4.Args[2]
+ p4 := m4.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v1.AddArg3(op3, d3, mem)
+ v0.AddArg3(op2, d2, v1)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Move [n] p5 _ mem)))))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size() + t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpStore {
+ break
+ }
+ t3 := auxToType(m3.Aux)
+ _ = m3.Args[2]
+ op3 := m3.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d3 := m3.Args[1]
+ m4 := m3.Args[2]
+ if m4.Op != OpStore {
+ break
+ }
+ t4 := auxToType(m4.Aux)
+ _ = m4.Args[2]
+ op4 := m4.Args[0]
+ if op4.Op != OpOffPtr || auxIntToInt64(op4.AuxInt) != 0 {
+ break
+ }
+ p4 := op4.Args[0]
+ d4 := m4.Args[1]
+ m5 := m4.Args[2]
+ if m5.Op != OpMove {
+ break
+ }
+ n := auxIntToInt64(m5.AuxInt)
+ mem := m5.Args[2]
+ p5 := m5.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size()+t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v2.Aux = typeToAux(t4)
+ v2.AddArg3(op4, d4, mem)
+ v1.AddArg3(op3, d3, v2)
+ v0.AddArg3(op2, d2, v1)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Zero [n] p3 mem)))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr || auxIntToInt64(op2.AuxInt) != 0 {
+ break
+ }
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(m3.AuxInt)
+ mem := m3.Args[1]
+ p3 := m3.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v0.AddArg3(op2, d2, mem)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Zero [n] p4 mem))))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpStore {
+ break
+ }
+ t3 := auxToType(m3.Aux)
+ _ = m3.Args[2]
+ op3 := m3.Args[0]
+ if op3.Op != OpOffPtr || auxIntToInt64(op3.AuxInt) != 0 {
+ break
+ }
+ p3 := op3.Args[0]
+ d3 := m3.Args[1]
+ m4 := m3.Args[2]
+ if m4.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(m4.AuxInt)
+ mem := m4.Args[1]
+ p4 := m4.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v1.AddArg3(op3, d3, mem)
+ v0.AddArg3(op2, d2, v1)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Zero [n] p5 mem)))))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size() + t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpStore {
+ break
+ }
+ t3 := auxToType(m3.Aux)
+ _ = m3.Args[2]
+ op3 := m3.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d3 := m3.Args[1]
+ m4 := m3.Args[2]
+ if m4.Op != OpStore {
+ break
+ }
+ t4 := auxToType(m4.Aux)
+ _ = m4.Args[2]
+ op4 := m4.Args[0]
+ if op4.Op != OpOffPtr || auxIntToInt64(op4.AuxInt) != 0 {
+ break
+ }
+ p4 := op4.Args[0]
+ d4 := m4.Args[1]
+ m5 := m4.Args[2]
+ if m5.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(m5.AuxInt)
+ mem := m5.Args[1]
+ p5 := m5.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size()+t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v2.Aux = typeToAux(t4)
+ v2.AddArg3(op4, d4, mem)
+ v1.AddArg3(op3, d3, v2)
+ v0.AddArg3(op2, d2, v1)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStringLen(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (StringLen (StringMake _ (Const64 <t> [c])))
+ // result: (Const64 <t> [c])
+ for {
+ if v_0.Op != OpStringMake {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ t := v_0_1.Type
+ c := auxIntToInt64(v_0_1.AuxInt)
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStringPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (StringPtr (StringMake (Addr <t> {s} base) _))
+ // result: (Addr <t> {s} base)
+ for {
+ if v_0.Op != OpStringMake {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ break
+ }
+ t := v_0_0.Type
+ s := auxToSym(v_0_0.Aux)
+ base := v_0_0.Args[0]
+ v.reset(OpAddr)
+ v.Type = t
+ v.Aux = symToAux(s)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStructSelect(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ fe := b.Func.fe
+ // match: (StructSelect (StructMake1 x))
+ // result: x
+ for {
+ if v_0.Op != OpStructMake1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake2 x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake2 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake2 _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake2 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake3 x _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake3 _ x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [2] (StructMake3 _ _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[2]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake4 x _ _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake4 _ x _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [2] (StructMake4 _ _ x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[2]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [3] (StructMake4 _ _ _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[3]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [i] x:(Load <t> ptr mem))
+ // cond: !fe.CanSSA(t)
+ // result: @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
+ for {
+ i := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(!fe.CanSSA(t)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo())
+ v1.AuxInt = int64ToAuxInt(t.FieldOff(int(i)))
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (StructSelect [0] (IData x))
+ // result: (IData x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpIData)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c-d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c - d)
+ return true
+ }
+ // match: (Sub16 x (Const16 <t> [c]))
+ // cond: x.Op != OpConst16
+ // result: (Add16 (Const16 <t> [-c]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(x.Op != OpConst16) {
+ break
+ }
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(-c)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub16 <t> (Mul16 x y) (Mul16 x z))
+ // result: (Mul16 x (Sub16 <t> y z))
+ for {
+ t := v.Type
+ if v_0.Op != OpMul16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul16)
+ v0 := b.NewValue0(v.Pos, OpSub16, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub16 x x)
+ // result: (Const16 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Sub16 (Add16 x y) x)
+ // result: y
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if x != v_1 {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 (Add16 x y) y)
+ // result: x
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if y != v_1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 x (Sub16 i:(Const16 <t>) z))
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Sub16 (Add16 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpSub16 {
+ break
+ }
+ z := v_1.Args[1]
+ i := v_1.Args[0]
+ if i.Op != OpConst16 {
+ break
+ }
+ t := i.Type
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ break
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ // match: (Sub16 x (Add16 z i:(Const16 <t>)))
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Sub16 (Sub16 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z := v_1_0
+ i := v_1_1
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpSub16, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 (Sub16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Sub16 i (Add16 <t> z x))
+ for {
+ if v_0.Op != OpSub16 {
+ break
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst16 {
+ break
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ break
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ // match: (Sub16 (Add16 z i:(Const16 <t>)) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Add16 i (Sub16 <t> z x))
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z := v_0_0
+ i := v_0_1
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpSub16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x))
+ // result: (Add16 (Const16 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpSub16 {
+ break
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ break
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
+ // result: (Sub16 (Const16 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c-d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c - d)
+ return true
+ }
+ // match: (Sub32 x (Const32 <t> [c]))
+ // cond: x.Op != OpConst32
+ // result: (Add32 (Const32 <t> [-c]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(x.Op != OpConst32) {
+ break
+ }
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(-c)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub32 <t> (Mul32 x y) (Mul32 x z))
+ // result: (Mul32 x (Sub32 <t> y z))
+ for {
+ t := v.Type
+ if v_0.Op != OpMul32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul32)
+ v0 := b.NewValue0(v.Pos, OpSub32, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub32 x x)
+ // result: (Const32 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Sub32 (Add32 x y) x)
+ // result: y
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if x != v_1 {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 (Add32 x y) y)
+ // result: x
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if y != v_1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 x (Sub32 i:(Const32 <t>) z))
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Sub32 (Add32 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpSub32 {
+ break
+ }
+ z := v_1.Args[1]
+ i := v_1.Args[0]
+ if i.Op != OpConst32 {
+ break
+ }
+ t := i.Type
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ break
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ // match: (Sub32 x (Add32 z i:(Const32 <t>)))
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Sub32 (Sub32 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z := v_1_0
+ i := v_1_1
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpSub32, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 (Sub32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Sub32 i (Add32 <t> z x))
+ for {
+ if v_0.Op != OpSub32 {
+ break
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst32 {
+ break
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ break
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ // match: (Sub32 (Add32 z i:(Const32 <t>)) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Add32 i (Sub32 <t> z x))
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z := v_0_0
+ i := v_0_1
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpSub32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x))
+ // result: (Add32 (Const32 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpSub32 {
+ break
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ break
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
+ // result: (Sub32 (Const32 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Sub32F (Const32F [c]) (Const32F [d]))
+ // cond: c-d == c-d
+ // result: (Const32F [c-d])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ break
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ if !(c-d == c-d) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(c - d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c-d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c - d)
+ return true
+ }
+ // match: (Sub64 x (Const64 <t> [c]))
+ // cond: x.Op != OpConst64
+ // result: (Add64 (Const64 <t> [-c]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(x.Op != OpConst64) {
+ break
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(-c)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub64 <t> (Mul64 x y) (Mul64 x z))
+ // result: (Mul64 x (Sub64 <t> y z))
+ for {
+ t := v.Type
+ if v_0.Op != OpMul64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul64)
+ v0 := b.NewValue0(v.Pos, OpSub64, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub64 x x)
+ // result: (Const64 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Sub64 (Add64 x y) x)
+ // result: y
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if x != v_1 {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 (Add64 x y) y)
+ // result: x
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if y != v_1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 x (Sub64 i:(Const64 <t>) z))
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Sub64 (Add64 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpSub64 {
+ break
+ }
+ z := v_1.Args[1]
+ i := v_1.Args[0]
+ if i.Op != OpConst64 {
+ break
+ }
+ t := i.Type
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ break
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ // match: (Sub64 x (Add64 z i:(Const64 <t>)))
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Sub64 (Sub64 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z := v_1_0
+ i := v_1_1
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpSub64, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 (Sub64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Sub64 i (Add64 <t> z x))
+ for {
+ if v_0.Op != OpSub64 {
+ break
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst64 {
+ break
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ break
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ // match: (Sub64 (Add64 z i:(Const64 <t>)) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Add64 i (Sub64 <t> z x))
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z := v_0_0
+ i := v_0_1
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpSub64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x))
+ // result: (Add64 (Const64 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpSub64 {
+ break
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ break
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
+ // result: (Sub64 (Const64 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Sub64F (Const64F [c]) (Const64F [d]))
+ // cond: c-d == c-d
+ // result: (Const64F [c-d])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ break
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ if !(c-d == c-d) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(c - d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c-d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c - d)
+ return true
+ }
+ // match: (Sub8 x (Const8 <t> [c]))
+ // cond: x.Op != OpConst8
+ // result: (Add8 (Const8 <t> [-c]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(x.Op != OpConst8) {
+ break
+ }
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(-c)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub8 <t> (Mul8 x y) (Mul8 x z))
+ // result: (Mul8 x (Sub8 <t> y z))
+ for {
+ t := v.Type
+ if v_0.Op != OpMul8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul8)
+ v0 := b.NewValue0(v.Pos, OpSub8, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub8 x x)
+ // result: (Const8 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Sub8 (Add8 x y) x)
+ // result: y
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if x != v_1 {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 (Add8 x y) y)
+ // result: x
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if y != v_1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 x (Sub8 i:(Const8 <t>) z))
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Sub8 (Add8 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpSub8 {
+ break
+ }
+ z := v_1.Args[1]
+ i := v_1.Args[0]
+ if i.Op != OpConst8 {
+ break
+ }
+ t := i.Type
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ break
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ // match: (Sub8 x (Add8 z i:(Const8 <t>)))
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Sub8 (Sub8 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z := v_1_0
+ i := v_1_1
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpSub8, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 (Sub8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Sub8 i (Add8 <t> z x))
+ for {
+ if v_0.Op != OpSub8 {
+ break
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst8 {
+ break
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ break
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ // match: (Sub8 (Add8 z i:(Const8 <t>)) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Add8 i (Sub8 <t> z x))
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z := v_0_0
+ i := v_0_1
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpSub8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x))
+ // result: (Add8 (Const8 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpSub8 {
+ break
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ break
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
+ // result: (Sub8 (Const8 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc16to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc16to8 (Const16 [c]))
+ // result: (Const8 [int8(c)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ return true
+ }
+ // match: (Trunc16to8 (ZeroExt8to16 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt8to16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc16to8 (SignExt8to16 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt8to16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc16to8 (And16 (Const16 [y]) x))
+ // cond: y&0xFF == 0xFF
+ // result: (Trunc16to8 x)
+ for {
+ if v_0.Op != OpAnd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ y := auxIntToInt16(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFF == 0xFF) {
+ continue
+ }
+ v.reset(OpTrunc16to8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc32to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc32to16 (Const32 [c]))
+ // result: (Const16 [int16(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ return true
+ }
+ // match: (Trunc32to16 (ZeroExt8to32 x))
+ // result: (ZeroExt8to16 x)
+ for {
+ if v_0.Op != OpZeroExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc32to16 (ZeroExt16to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt16to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to16 (SignExt8to32 x))
+ // result: (SignExt8to16 x)
+ for {
+ if v_0.Op != OpSignExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc32to16 (SignExt16to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt16to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to16 (And32 (Const32 [y]) x))
+ // cond: y&0xFFFF == 0xFFFF
+ // result: (Trunc32to16 x)
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ y := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFFFF == 0xFFFF) {
+ continue
+ }
+ v.reset(OpTrunc32to16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc32to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc32to8 (Const32 [c]))
+ // result: (Const8 [int8(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ return true
+ }
+ // match: (Trunc32to8 (ZeroExt8to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to8 (SignExt8to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to8 (And32 (Const32 [y]) x))
+ // cond: y&0xFF == 0xFF
+ // result: (Trunc32to8 x)
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ y := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFF == 0xFF) {
+ continue
+ }
+ v.reset(OpTrunc32to8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc64to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to16 (Const64 [c]))
+ // result: (Const16 [int16(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ return true
+ }
+ // match: (Trunc64to16 (ZeroExt8to64 x))
+ // result: (ZeroExt8to16 x)
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to16 (ZeroExt16to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to16 (SignExt8to64 x))
+ // result: (SignExt8to16 x)
+ for {
+ if v_0.Op != OpSignExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to16 (SignExt16to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to16 (And64 (Const64 [y]) x))
+ // cond: y&0xFFFF == 0xFFFF
+ // result: (Trunc64to16 x)
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ y := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFFFF == 0xFFFF) {
+ continue
+ }
+ v.reset(OpTrunc64to16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc64to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to32 (Const64 [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ // match: (Trunc64to32 (ZeroExt8to64 x))
+ // result: (ZeroExt8to32 x)
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt8to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (ZeroExt16to64 x))
+ // result: (ZeroExt16to32 x)
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt16to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (ZeroExt32to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt32to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to32 (SignExt8to64 x))
+ // result: (SignExt8to32 x)
+ for {
+ if v_0.Op != OpSignExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt8to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (SignExt16to64 x))
+ // result: (SignExt16to32 x)
+ for {
+ if v_0.Op != OpSignExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt16to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (SignExt32to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt32to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to32 (And64 (Const64 [y]) x))
+ // cond: y&0xFFFFFFFF == 0xFFFFFFFF
+ // result: (Trunc64to32 x)
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ y := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFFFFFFFF == 0xFFFFFFFF) {
+ continue
+ }
+ v.reset(OpTrunc64to32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc64to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to8 (Const64 [c]))
+ // result: (Const8 [int8(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ return true
+ }
+ // match: (Trunc64to8 (ZeroExt8to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to8 (SignExt8to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to8 (And64 (Const64 [y]) x))
+ // cond: y&0xFF == 0xFF
+ // result: (Trunc64to8 x)
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ y := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFF == 0xFF) {
+ continue
+ }
+ v.reset(OpTrunc64to8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpXor16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Xor16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 x x)
+ // result: (Const16 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Xor16 (Const16 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 x (Xor16 x y))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor16 (Xor16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Xor16 i (Xor16 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpXor16)
+ v0 := b.NewValue0(v.Pos, OpXor16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor16 (Const16 <t> [c]) (Xor16 (Const16 <t> [d]) x))
+ // result: (Xor16 (Const16 <t> [c^d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpXor16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpXor32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Xor32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 x x)
+ // result: (Const32 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Xor32 (Const32 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 x (Xor32 x y))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor32 (Xor32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Xor32 i (Xor32 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpXor32)
+ v0 := b.NewValue0(v.Pos, OpXor32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor32 (Const32 <t> [c]) (Xor32 (Const32 <t> [d]) x))
+ // result: (Xor32 (Const32 <t> [c^d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpXor32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpXor64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Xor64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 x x)
+ // result: (Const64 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Xor64 (Const64 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 x (Xor64 x y))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor64 (Xor64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Xor64 i (Xor64 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpXor64)
+ v0 := b.NewValue0(v.Pos, OpXor64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor64 (Const64 <t> [c]) (Xor64 (Const64 <t> [d]) x))
+ // result: (Xor64 (Const64 <t> [c^d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpXor64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpXor8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Xor8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 x x)
+ // result: (Const8 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Xor8 (Const8 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 x (Xor8 x y))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor8 (Xor8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Xor8 i (Xor8 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpXor8)
+ v0 := b.NewValue0(v.Pos, OpXor8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor8 (Const8 <t> [c]) (Xor8 (Const8 <t> [d]) x))
+ // result: (Xor8 (Const8 <t> [c^d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpXor8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Zero (Load (OffPtr [c] (SP)) mem) mem)
+ // cond: mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize
+ // result: mem
+ for {
+ if v_0.Op != OpLoad {
+ break
+ }
+ mem := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpOffPtr {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpSP || mem != v_1 || !(mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
+ // cond: isSameCall(call.Aux, "runtime.newobject")
+ // result: mem
+ for {
+ if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ mem := v_1
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isSameCall(call.Aux, "runtime.newobject")) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem))
+ // cond: isSamePtr(p1, p2) && store.Uses == 1 && n >= o2 + t2.Size() && clobber(store)
+ // result: (Zero {t1} [n] p1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ store := v_1
+ if store.Op != OpStore {
+ break
+ }
+ t2 := auxToType(store.Aux)
+ mem := store.Args[2]
+ store_0 := store.Args[0]
+ if store_0.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(store_0.AuxInt)
+ p2 := store_0.Args[0]
+ if !(isSamePtr(p1, p2) && store.Uses == 1 && n >= o2+t2.Size() && clobber(store)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t1)
+ v.AddArg2(p1, mem)
+ return true
+ }
+ // match: (Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem))
+ // cond: move.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move)
+ // result: (Zero {t} [n] dst1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ move := v_1
+ if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t {
+ break
+ }
+ mem := move.Args[2]
+ dst2 := move.Args[0]
+ if !(move.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg2(dst1, mem)
+ return true
+ }
+ // match: (Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
+ // cond: move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move, vardef)
+ // result: (Zero {t} [n] dst1 (VarDef {x} mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ vardef := v_1
+ if vardef.Op != OpVarDef {
+ break
+ }
+ x := auxToSym(vardef.Aux)
+ move := vardef.Args[0]
+ if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t {
+ break
+ }
+ mem := move.Args[2]
+ dst2 := move.Args[0]
+ if !(move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move, vardef)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem)
+ v0.Aux = symToAux(x)
+ v0.AddArg(mem)
+ v.AddArg2(dst1, v0)
+ return true
+ }
+ // match: (Zero {t} [s] dst1 zero:(Zero {t} [s] dst2 _))
+ // cond: isSamePtr(dst1, dst2)
+ // result: zero
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ zero := v_1
+ if zero.Op != OpZero || auxIntToInt64(zero.AuxInt) != s || auxToType(zero.Aux) != t {
+ break
+ }
+ dst2 := zero.Args[0]
+ if !(isSamePtr(dst1, dst2)) {
+ break
+ }
+ v.copyOf(zero)
+ return true
+ }
+ // match: (Zero {t} [s] dst1 vardef:(VarDef (Zero {t} [s] dst2 _)))
+ // cond: isSamePtr(dst1, dst2)
+ // result: vardef
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ vardef := v_1
+ if vardef.Op != OpVarDef {
+ break
+ }
+ vardef_0 := vardef.Args[0]
+ if vardef_0.Op != OpZero || auxIntToInt64(vardef_0.AuxInt) != s || auxToType(vardef_0.Aux) != t {
+ break
+ }
+ dst2 := vardef_0.Args[0]
+ if !(isSamePtr(dst1, dst2)) {
+ break
+ }
+ v.copyOf(vardef)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt16to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt16to32 (Const16 [c]))
+ // result: (Const32 [int32(uint16(c))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint16(c)))
+ return true
+ }
+ // match: (ZeroExt16to32 (Trunc32to16 x:(Rsh32Ux64 _ (Const64 [s]))))
+ // cond: s >= 16
+ // result: x
+ for {
+ if v_0.Op != OpTrunc32to16 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 16) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt16to64 (Const16 [c]))
+ // result: (Const64 [int64(uint16(c))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ // match: (ZeroExt16to64 (Trunc64to16 x:(Rsh64Ux64 _ (Const64 [s]))))
+ // cond: s >= 48
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to16 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 48) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt32to64 (Const32 [c]))
+ // result: (Const64 [int64(uint32(c))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (ZeroExt32to64 (Trunc64to32 x:(Rsh64Ux64 _ (Const64 [s]))))
+ // cond: s >= 32
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to32 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 32) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt8to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt8to16 (Const8 [c]))
+ // result: (Const16 [int16( uint8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(uint8(c)))
+ return true
+ }
+ // match: (ZeroExt8to16 (Trunc16to8 x:(Rsh16Ux64 _ (Const64 [s]))))
+ // cond: s >= 8
+ // result: x
+ for {
+ if v_0.Op != OpTrunc16to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt8to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt8to32 (Const8 [c]))
+ // result: (Const32 [int32( uint8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint8(c)))
+ return true
+ }
+ // match: (ZeroExt8to32 (Trunc32to8 x:(Rsh32Ux64 _ (Const64 [s]))))
+ // cond: s >= 24
+ // result: x
+ for {
+ if v_0.Op != OpTrunc32to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 24) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt8to64 (Const8 [c]))
+ // result: (Const64 [int64( uint8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ // match: (ZeroExt8to64 (Trunc64to8 x:(Rsh64Ux64 _ (Const64 [s]))))
+ // cond: s >= 56
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 56) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteBlockgeneric(b *Block) bool {
+ switch b.Kind {
+ case BlockIf:
+ // match: (If (Not cond) yes no)
+ // result: (If cond no yes)
+ for b.Controls[0].Op == OpNot {
+ v_0 := b.Controls[0]
+ cond := v_0.Args[0]
+ b.resetWithControl(BlockIf, cond)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (If (ConstBool [c]) yes no)
+ // cond: c
+ // result: (First yes no)
+ for b.Controls[0].Op == OpConstBool {
+ v_0 := b.Controls[0]
+ c := auxIntToBool(v_0.AuxInt)
+ if !(c) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (If (ConstBool [c]) yes no)
+ // cond: !c
+ // result: (First no yes)
+ for b.Controls[0].Op == OpConstBool {
+ v_0 := b.Controls[0]
+ c := auxIntToBool(v_0.AuxInt)
+ if !(!c) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
new file mode 100644
index 0000000..8facb91
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -0,0 +1,503 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "container/heap"
+ "sort"
+)
+
+const (
+ ScorePhi = iota // towards top of block
+ ScoreArg
+ ScoreNilCheck
+ ScoreReadTuple
+ ScoreVarDef
+ ScoreMemory
+ ScoreReadFlags
+ ScoreDefault
+ ScoreFlags
+ ScoreControl // towards bottom of block
+)
+
+type ValHeap struct {
+ a []*Value
+ score []int8
+}
+
+func (h ValHeap) Len() int { return len(h.a) }
+func (h ValHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] }
+
+func (h *ValHeap) Push(x interface{}) {
+ // Push and Pop use pointer receivers because they modify the slice's length,
+ // not just its contents.
+ v := x.(*Value)
+ h.a = append(h.a, v)
+}
+func (h *ValHeap) Pop() interface{} {
+ old := h.a
+ n := len(old)
+ x := old[n-1]
+ h.a = old[0 : n-1]
+ return x
+}
+func (h ValHeap) Less(i, j int) bool {
+ x := h.a[i]
+ y := h.a[j]
+ sx := h.score[x.ID]
+ sy := h.score[y.ID]
+ if c := sx - sy; c != 0 {
+ return c > 0 // higher score comes later.
+ }
+ if x.Pos != y.Pos { // Favor in-order line stepping
+ return x.Pos.After(y.Pos)
+ }
+ if x.Op != OpPhi {
+ if c := len(x.Args) - len(y.Args); c != 0 {
+ return c < 0 // smaller args comes later
+ }
+ }
+ if c := x.Uses - y.Uses; c != 0 {
+ return c < 0 // smaller uses come later
+ }
+ // These comparisons are fairly arbitrary.
+ // The goal here is stability in the face
+ // of unrelated changes elsewhere in the compiler.
+ if c := x.AuxInt - y.AuxInt; c != 0 {
+ return c > 0
+ }
+ if cmp := x.Type.Compare(y.Type); cmp != types.CMPeq {
+ return cmp == types.CMPgt
+ }
+ return x.ID > y.ID
+}
+
+func (op Op) isLoweredGetClosurePtr() bool {
+ switch op {
+ case OpAMD64LoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr, OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr,
+ Op386LoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, OpS390XLoweredGetClosurePtr, OpMIPSLoweredGetClosurePtr,
+ OpRISCV64LoweredGetClosurePtr, OpWasmLoweredGetClosurePtr:
+ return true
+ }
+ return false
+}
+
+// Schedule the Values in each Block. After this phase returns, the
+// order of b.Values matters and is the order in which those values
+// will appear in the assembly output. For now it generates a
+// reasonable valid schedule using a priority queue. TODO(khr):
+// schedule smarter.
+func schedule(f *Func) {
+ // For each value, the number of times it is used in the block
+ // by values that have not been scheduled yet.
+ uses := make([]int32, f.NumValues())
+
+ // reusable priority queue
+ priq := new(ValHeap)
+
+ // "priority" for a value
+ score := make([]int8, f.NumValues())
+
+ // scheduling order. We queue values in this list in reverse order.
+ // A constant bound allows this to be stack-allocated. 64 is
+ // enough to cover almost every schedule call.
+ order := make([]*Value, 0, 64)
+
+ // maps mem values to the next live memory value
+ nextMem := make([]*Value, f.NumValues())
+ // additional pretend arguments for each Value. Used to enforce load/store ordering.
+ additionalArgs := make([][]*Value, f.NumValues())
+
+ for _, b := range f.Blocks {
+ // Compute score. Larger numbers are scheduled closer to the end of the block.
+ for _, v := range b.Values {
+ switch {
+ case v.Op.isLoweredGetClosurePtr():
+ // We also score GetLoweredClosurePtr as early as possible to ensure that the
+ // context register is not stomped. GetLoweredClosurePtr should only appear
+ // in the entry block where there are no phi functions, so there is no
+ // conflict or ambiguity here.
+ if b != f.Entry {
+ f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String())
+ }
+ score[v.ID] = ScorePhi
+ case v.Op == OpAMD64LoweredNilCheck || v.Op == OpPPC64LoweredNilCheck ||
+ v.Op == OpARMLoweredNilCheck || v.Op == OpARM64LoweredNilCheck ||
+ v.Op == Op386LoweredNilCheck || v.Op == OpMIPS64LoweredNilCheck ||
+ v.Op == OpS390XLoweredNilCheck || v.Op == OpMIPSLoweredNilCheck ||
+ v.Op == OpRISCV64LoweredNilCheck || v.Op == OpWasmLoweredNilCheck:
+ // Nil checks must come before loads from the same address.
+ score[v.ID] = ScoreNilCheck
+ case v.Op == OpPhi:
+ // We want all the phis first.
+ score[v.ID] = ScorePhi
+ case v.Op == OpVarDef:
+ // We want all the vardefs next.
+ score[v.ID] = ScoreVarDef
+ case v.Op == OpArg:
+ // We want all the args as early as possible, for better debugging.
+ score[v.ID] = ScoreArg
+ case v.Type.IsMemory():
+ // Schedule stores as early as possible. This tends to
+ // reduce register pressure. It also helps make sure
+ // VARDEF ops are scheduled before the corresponding LEA.
+ score[v.ID] = ScoreMemory
+ case v.Op == OpSelect0 || v.Op == OpSelect1:
+ // Schedule the pseudo-op of reading part of a tuple
+ // immediately after the tuple-generating op, since
+ // this value is already live. This also removes its
+ // false dependency on the other part of the tuple.
+ // Also ensures tuple is never spilled.
+ score[v.ID] = ScoreReadTuple
+ case v.Type.IsFlags() || v.Type.IsTuple() && v.Type.FieldType(1).IsFlags():
+ // Schedule flag register generation as late as possible.
+ // This makes sure that we only have one live flags
+ // value at a time.
+ score[v.ID] = ScoreFlags
+ default:
+ score[v.ID] = ScoreDefault
+ // If we're reading flags, schedule earlier to keep flag lifetime short.
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ score[v.ID] = ScoreReadFlags
+ }
+ }
+ }
+ }
+ }
+
+ for _, b := range f.Blocks {
+ // Find store chain for block.
+ // Store chains for different blocks overwrite each other, so
+ // the calculated store chain is good only for this block.
+ for _, v := range b.Values {
+ if v.Op != OpPhi && v.Type.IsMemory() {
+ for _, w := range v.Args {
+ if w.Type.IsMemory() {
+ nextMem[w.ID] = v
+ }
+ }
+ }
+ }
+
+ // Compute uses.
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ // If a value is used by a phi, it does not induce
+ // a scheduling edge because that use is from the
+ // previous iteration.
+ continue
+ }
+ for _, w := range v.Args {
+ if w.Block == b {
+ uses[w.ID]++
+ }
+ // Any load must come before the following store.
+ if !v.Type.IsMemory() && w.Type.IsMemory() {
+ // v is a load.
+ s := nextMem[w.ID]
+ if s == nil || s.Block != b {
+ continue
+ }
+ additionalArgs[s.ID] = append(additionalArgs[s.ID], v)
+ uses[v.ID]++
+ }
+ }
+ }
+
+ for _, c := range b.ControlValues() {
+ // Force the control values to be scheduled at the end,
+ // unless they are phi values (which must be first).
+ // OpArg also goes first -- if it is stack it register allocates
+ // to a LoadReg, if it is register it is from the beginning anyway.
+ if c.Op == OpPhi || c.Op == OpArg {
+ continue
+ }
+ score[c.ID] = ScoreControl
+
+ // Schedule values dependent on the control values at the end.
+ // This reduces the number of register spills. We don't find
+ // all values that depend on the controls, just values with a
+ // direct dependency. This is cheaper and in testing there
+ // was no difference in the number of spills.
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ for _, a := range v.Args {
+ if a == c {
+ score[v.ID] = ScoreControl
+ }
+ }
+ }
+ }
+
+ }
+
+ // To put things into a priority queue
+ // The values that should come last are least.
+ priq.score = score
+ priq.a = priq.a[:0]
+
+ // Initialize priority queue with schedulable values.
+ for _, v := range b.Values {
+ if uses[v.ID] == 0 {
+ heap.Push(priq, v)
+ }
+ }
+
+ // Schedule highest priority value, update use counts, repeat.
+ order = order[:0]
+ tuples := make(map[ID][]*Value)
+ for priq.Len() > 0 {
+ // Find highest priority schedulable value.
+ // Note that schedule is assembled backwards.
+
+ v := heap.Pop(priq).(*Value)
+
+ // Add it to the schedule.
+ // Do not emit tuple-reading ops until we're ready to emit the tuple-generating op.
+ //TODO: maybe remove ReadTuple score above, if it does not help on performance
+ switch {
+ case v.Op == OpSelect0:
+ if tuples[v.Args[0].ID] == nil {
+ tuples[v.Args[0].ID] = make([]*Value, 2)
+ }
+ tuples[v.Args[0].ID][0] = v
+ case v.Op == OpSelect1:
+ if tuples[v.Args[0].ID] == nil {
+ tuples[v.Args[0].ID] = make([]*Value, 2)
+ }
+ tuples[v.Args[0].ID][1] = v
+ case v.Type.IsTuple() && tuples[v.ID] != nil:
+ if tuples[v.ID][1] != nil {
+ order = append(order, tuples[v.ID][1])
+ }
+ if tuples[v.ID][0] != nil {
+ order = append(order, tuples[v.ID][0])
+ }
+ delete(tuples, v.ID)
+ fallthrough
+ default:
+ order = append(order, v)
+ }
+
+ // Update use counts of arguments.
+ for _, w := range v.Args {
+ if w.Block != b {
+ continue
+ }
+ uses[w.ID]--
+ if uses[w.ID] == 0 {
+ // All uses scheduled, w is now schedulable.
+ heap.Push(priq, w)
+ }
+ }
+ for _, w := range additionalArgs[v.ID] {
+ uses[w.ID]--
+ if uses[w.ID] == 0 {
+ // All uses scheduled, w is now schedulable.
+ heap.Push(priq, w)
+ }
+ }
+ }
+ if len(order) != len(b.Values) {
+ f.Fatalf("schedule does not include all values in block %s", b)
+ }
+ for i := 0; i < len(b.Values); i++ {
+ b.Values[i] = order[len(b.Values)-1-i]
+ }
+ }
+
+ f.scheduled = true
+}
+
+// storeOrder orders values with respect to stores. That is,
+// if v transitively depends on store s, v is ordered after s,
+// otherwise v is ordered before s.
+// Specifically, values are ordered like
+// store1
+// NilCheck that depends on store1
+// other values that depends on store1
+// store2
+// NilCheck that depends on store2
+// other values that depends on store2
+// ...
+// The order of non-store and non-NilCheck values are undefined
+// (not necessarily dependency order). This should be cheaper
+// than a full scheduling as done above.
+// Note that simple dependency order won't work: there is no
+// dependency between NilChecks and values like IsNonNil.
+// Auxiliary data structures are passed in as arguments, so
+// that they can be allocated in the caller and be reused.
+// This function takes care of reset them.
+func storeOrder(values []*Value, sset *sparseSet, storeNumber []int32) []*Value {
+ if len(values) == 0 {
+ return values
+ }
+
+ f := values[0].Block.Func
+
+ // find all stores
+
+ // Members of values that are store values.
+ // A constant bound allows this to be stack-allocated. 64 is
+ // enough to cover almost every storeOrder call.
+ stores := make([]*Value, 0, 64)
+ hasNilCheck := false
+ sset.clear() // sset is the set of stores that are used in other values
+ for _, v := range values {
+ if v.Type.IsMemory() {
+ stores = append(stores, v)
+ if v.Op == OpInitMem || v.Op == OpPhi {
+ continue
+ }
+ sset.add(v.MemoryArg().ID) // record that v's memory arg is used
+ }
+ if v.Op == OpNilCheck {
+ hasNilCheck = true
+ }
+ }
+ if len(stores) == 0 || !hasNilCheck && f.pass.name == "nilcheckelim" {
+ // there is no store, the order does not matter
+ return values
+ }
+
+ // find last store, which is the one that is not used by other stores
+ var last *Value
+ for _, v := range stores {
+ if !sset.contains(v.ID) {
+ if last != nil {
+ f.Fatalf("two stores live simultaneously: %v and %v", v, last)
+ }
+ last = v
+ }
+ }
+
+ // We assign a store number to each value. Store number is the
+ // index of the latest store that this value transitively depends.
+ // The i-th store in the current block gets store number 3*i. A nil
+ // check that depends on the i-th store gets store number 3*i+1.
+ // Other values that depends on the i-th store gets store number 3*i+2.
+ // Special case: 0 -- unassigned, 1 or 2 -- the latest store it depends
+ // is in the previous block (or no store at all, e.g. value is Const).
+ // First we assign the number to all stores by walking back the store chain,
+ // then assign the number to other values in DFS order.
+ count := make([]int32, 3*(len(stores)+1))
+ sset.clear() // reuse sparse set to ensure that a value is pushed to stack only once
+ for n, w := len(stores), last; n > 0; n-- {
+ storeNumber[w.ID] = int32(3 * n)
+ count[3*n]++
+ sset.add(w.ID)
+ if w.Op == OpInitMem || w.Op == OpPhi {
+ if n != 1 {
+ f.Fatalf("store order is wrong: there are stores before %v", w)
+ }
+ break
+ }
+ w = w.MemoryArg()
+ }
+ var stack []*Value
+ for _, v := range values {
+ if sset.contains(v.ID) {
+ // in sset means v is a store, or already pushed to stack, or already assigned a store number
+ continue
+ }
+ stack = append(stack, v)
+ sset.add(v.ID)
+
+ for len(stack) > 0 {
+ w := stack[len(stack)-1]
+ if storeNumber[w.ID] != 0 {
+ stack = stack[:len(stack)-1]
+ continue
+ }
+ if w.Op == OpPhi {
+ // Phi value doesn't depend on store in the current block.
+ // Do this early to avoid dependency cycle.
+ storeNumber[w.ID] = 2
+ count[2]++
+ stack = stack[:len(stack)-1]
+ continue
+ }
+
+ max := int32(0) // latest store dependency
+ argsdone := true
+ for _, a := range w.Args {
+ if a.Block != w.Block {
+ continue
+ }
+ if !sset.contains(a.ID) {
+ stack = append(stack, a)
+ sset.add(a.ID)
+ argsdone = false
+ break
+ }
+ if storeNumber[a.ID]/3 > max {
+ max = storeNumber[a.ID] / 3
+ }
+ }
+ if !argsdone {
+ continue
+ }
+
+ n := 3*max + 2
+ if w.Op == OpNilCheck {
+ n = 3*max + 1
+ }
+ storeNumber[w.ID] = n
+ count[n]++
+ stack = stack[:len(stack)-1]
+ }
+ }
+
+ // convert count to prefix sum of counts: count'[i] = sum_{j<=i} count[i]
+ for i := range count {
+ if i == 0 {
+ continue
+ }
+ count[i] += count[i-1]
+ }
+ if count[len(count)-1] != int32(len(values)) {
+ f.Fatalf("storeOrder: value is missing, total count = %d, values = %v", count[len(count)-1], values)
+ }
+
+ // place values in count-indexed bins, which are in the desired store order
+ order := make([]*Value, len(values))
+ for _, v := range values {
+ s := storeNumber[v.ID]
+ order[count[s-1]] = v
+ count[s-1]++
+ }
+
+ // Order nil checks in source order. We want the first in source order to trigger.
+ // If two are on the same line, we don't really care which happens first.
+ // See issue 18169.
+ if hasNilCheck {
+ start := -1
+ for i, v := range order {
+ if v.Op == OpNilCheck {
+ if start == -1 {
+ start = i
+ }
+ } else {
+ if start != -1 {
+ sort.Sort(bySourcePos(order[start:i]))
+ start = -1
+ }
+ }
+ }
+ if start != -1 {
+ sort.Sort(bySourcePos(order[start:]))
+ }
+ }
+
+ return order
+}
+
+type bySourcePos []*Value
+
+func (s bySourcePos) Len() int { return len(s) }
+func (s bySourcePos) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s bySourcePos) Less(i, j int) bool { return s[i].Pos.Before(s[j].Pos) }
diff --git a/src/cmd/compile/internal/ssa/schedule_test.go b/src/cmd/compile/internal/ssa/schedule_test.go
new file mode 100644
index 0000000..f7177dd
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/schedule_test.go
@@ -0,0 +1,101 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestSchedule(t *testing.T) {
+ c := testConfig(t)
+ cases := []fun{
+ c.Fun("entry",
+ Bloc("entry",
+ Valu("mem0", OpInitMem, types.TypeMem, 0, nil),
+ Valu("ptr", OpConst64, c.config.Types.Int64, 0xABCD, nil),
+ Valu("v", OpConst64, c.config.Types.Int64, 12, nil),
+ Valu("mem1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem0"),
+ Valu("mem2", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem1"),
+ Valu("mem3", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "sum", "mem2"),
+ Valu("l1", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem1"),
+ Valu("l2", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem2"),
+ Valu("sum", OpAdd64, c.config.Types.Int64, 0, nil, "l1", "l2"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem3"))),
+ }
+ for _, c := range cases {
+ schedule(c.f)
+ if !isSingleLiveMem(c.f) {
+ t.Error("single-live-mem restriction not enforced by schedule for func:")
+ printFunc(c.f)
+ }
+ }
+}
+
+func isSingleLiveMem(f *Func) bool {
+ for _, b := range f.Blocks {
+ var liveMem *Value
+ for _, v := range b.Values {
+ for _, w := range v.Args {
+ if w.Type.IsMemory() {
+ if liveMem == nil {
+ liveMem = w
+ continue
+ }
+ if w != liveMem {
+ return false
+ }
+ }
+ }
+ if v.Type.IsMemory() {
+ liveMem = v
+ }
+ }
+ }
+ return true
+}
+
+func TestStoreOrder(t *testing.T) {
+ // In the function below, v2 depends on v3 and v4, v4 depends on v3, and v3 depends on store v5.
+ // storeOrder did not handle this case correctly.
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem0", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpAdd64, c.config.Types.Int64, 0, nil, "b", "c"), // v2
+ Valu("b", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem1"), // v3
+ Valu("c", OpNeg64, c.config.Types.Int64, 0, nil, "b"), // v4
+ Valu("mem1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem0"), // v5
+ Valu("mem2", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "a", "mem1"),
+ Valu("ptr", OpConst64, c.config.Types.Int64, 0xABCD, nil),
+ Valu("v", OpConst64, c.config.Types.Int64, 12, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem2")))
+
+ CheckFunc(fun.f)
+ order := storeOrder(fun.f.Blocks[0].Values, fun.f.newSparseSet(fun.f.NumValues()), make([]int32, fun.f.NumValues()))
+
+ // check that v2, v3, v4 is sorted after v5
+ var ai, bi, ci, si int
+ for i, v := range order {
+ switch v.ID {
+ case 2:
+ ai = i
+ case 3:
+ bi = i
+ case 4:
+ ci = i
+ case 5:
+ si = i
+ }
+ }
+ if ai < si || bi < si || ci < si {
+ t.Logf("Func: %s", fun.f)
+ t.Errorf("store order is wrong: got %v, want v2 v3 v4 after v5", order)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/shift_test.go b/src/cmd/compile/internal/ssa/shift_test.go
new file mode 100644
index 0000000..3876d8d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/shift_test.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestShiftConstAMD64(t *testing.T) {
+ c := testConfig(t)
+ fun := makeConstShiftFunc(c, 18, OpLsh64x64, c.config.Types.UInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+
+ fun = makeConstShiftFunc(c, 66, OpLsh64x64, c.config.Types.UInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+
+ fun = makeConstShiftFunc(c, 18, OpRsh64Ux64, c.config.Types.UInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+
+ fun = makeConstShiftFunc(c, 66, OpRsh64Ux64, c.config.Types.UInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+
+ fun = makeConstShiftFunc(c, 18, OpRsh64x64, c.config.Types.Int64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
+
+ fun = makeConstShiftFunc(c, 66, OpRsh64x64, c.config.Types.Int64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
+}
+
+func makeConstShiftFunc(c *Conf, amount int64, op Op, typ *types.Type) fun {
+ ptyp := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("SP", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("argptr", OpOffPtr, ptyp, 8, nil, "SP"),
+ Valu("resptr", OpOffPtr, ptyp, 16, nil, "SP"),
+ Valu("load", OpLoad, typ, 0, nil, "argptr", "mem"),
+ Valu("c", OpConst64, c.config.Types.UInt64, amount, nil),
+ Valu("shift", op, typ, 0, nil, "load", "c"),
+ Valu("store", OpStore, types.TypeMem, 0, c.config.Types.UInt64, "resptr", "shift", "mem"),
+ Exit("store")))
+ Compile(fun.f)
+ return fun
+}
+
+func TestShiftToExtensionAMD64(t *testing.T) {
+ c := testConfig(t)
+ // Test that eligible pairs of constant shifts are converted to extensions.
+ // For example:
+ // (uint64(x) << 32) >> 32 -> uint64(uint32(x))
+ ops := map[Op]int{
+ OpAMD64SHLQconst: 0, OpAMD64SHLLconst: 0,
+ OpAMD64SHRQconst: 0, OpAMD64SHRLconst: 0,
+ OpAMD64SARQconst: 0, OpAMD64SARLconst: 0,
+ }
+ tests := [...]struct {
+ amount int64
+ left, right Op
+ typ *types.Type
+ }{
+ // unsigned
+ {56, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64},
+ {48, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64},
+ {32, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64},
+ {24, OpLsh32x64, OpRsh32Ux64, c.config.Types.UInt32},
+ {16, OpLsh32x64, OpRsh32Ux64, c.config.Types.UInt32},
+ {8, OpLsh16x64, OpRsh16Ux64, c.config.Types.UInt16},
+ // signed
+ {56, OpLsh64x64, OpRsh64x64, c.config.Types.Int64},
+ {48, OpLsh64x64, OpRsh64x64, c.config.Types.Int64},
+ {32, OpLsh64x64, OpRsh64x64, c.config.Types.Int64},
+ {24, OpLsh32x64, OpRsh32x64, c.config.Types.Int32},
+ {16, OpLsh32x64, OpRsh32x64, c.config.Types.Int32},
+ {8, OpLsh16x64, OpRsh16x64, c.config.Types.Int16},
+ }
+ for _, tc := range tests {
+ fun := makeShiftExtensionFunc(c, tc.amount, tc.left, tc.right, tc.typ)
+ checkOpcodeCounts(t, fun.f, ops)
+ }
+}
+
+// makeShiftExtensionFunc generates a function containing:
+//
+// (rshift (lshift (Const64 [amount])) (Const64 [amount]))
+//
+// This may be equivalent to a sign or zero extension.
+func makeShiftExtensionFunc(c *Conf, amount int64, lshift, rshift Op, typ *types.Type) fun {
+ ptyp := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("SP", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("argptr", OpOffPtr, ptyp, 8, nil, "SP"),
+ Valu("resptr", OpOffPtr, ptyp, 16, nil, "SP"),
+ Valu("load", OpLoad, typ, 0, nil, "argptr", "mem"),
+ Valu("c", OpConst64, c.config.Types.UInt64, amount, nil),
+ Valu("lshift", lshift, typ, 0, nil, "load", "c"),
+ Valu("rshift", rshift, typ, 0, nil, "lshift", "c"),
+ Valu("store", OpStore, types.TypeMem, 0, c.config.Types.UInt64, "resptr", "rshift", "mem"),
+ Exit("store")))
+ Compile(fun.f)
+ return fun
+}
diff --git a/src/cmd/compile/internal/ssa/shortcircuit.go b/src/cmd/compile/internal/ssa/shortcircuit.go
new file mode 100644
index 0000000..4dd86ec
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/shortcircuit.go
@@ -0,0 +1,510 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// Shortcircuit finds situations where branch directions
+// are always correlated and rewrites the CFG to take
+// advantage of that fact.
+// This optimization is useful for compiling && and || expressions.
+func shortcircuit(f *Func) {
+ // Step 1: Replace a phi arg with a constant if that arg
+ // is the control value of a preceding If block.
+ // b1:
+ // If a goto b2 else b3
+ // b2: <- b1 ...
+ // x = phi(a, ...)
+ //
+ // We can replace the "a" in the phi with the constant true.
+ var ct, cf *Value
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ if !v.Type.IsBoolean() {
+ continue
+ }
+ for i, a := range v.Args {
+ e := b.Preds[i]
+ p := e.b
+ if p.Kind != BlockIf {
+ continue
+ }
+ if p.Controls[0] != a {
+ continue
+ }
+ if e.i == 0 {
+ if ct == nil {
+ ct = f.ConstBool(f.Config.Types.Bool, true)
+ }
+ v.SetArg(i, ct)
+ } else {
+ if cf == nil {
+ cf = f.ConstBool(f.Config.Types.Bool, false)
+ }
+ v.SetArg(i, cf)
+ }
+ }
+ }
+ }
+
+ // Step 2: Redirect control flow around known branches.
+ // p:
+ // ... goto b ...
+ // b: <- p ...
+ // v = phi(true, ...)
+ // if v goto t else u
+ // We can redirect p to go directly to t instead of b.
+ // (If v is not live after b).
+ fuse(f, fuseTypePlain|fuseTypeShortCircuit)
+}
+
+// shortcircuitBlock checks for a CFG in which an If block
+// has as its control value a Phi that has a ConstBool arg.
+// In some such cases, we can rewrite the CFG into a flatter form.
+//
+// (1) Look for a CFG of the form
+//
+// p other pred(s)
+// \ /
+// b
+// / \
+// t other succ
+//
+// in which b is an If block containing a single phi value with a single use (b's Control),
+// which has a ConstBool arg.
+// p is the predecessor corresponding to the argument slot in which the ConstBool is found.
+// t is the successor corresponding to the value of the ConstBool arg.
+//
+// Rewrite this into
+//
+// p other pred(s)
+// | /
+// | b
+// |/ \
+// t u
+//
+// and remove the appropriate phi arg(s).
+//
+// (2) Look for a CFG of the form
+//
+// p q
+// \ /
+// b
+// / \
+// t u
+//
+// in which b is as described in (1).
+// However, b may also contain other phi values.
+// The CFG will be modified as described in (1).
+// However, in order to handle those other phi values,
+// for each other phi value w, we must be able to eliminate w from b.
+// We can do that though a combination of moving w to a different block
+// and rewriting uses of w to use a different value instead.
+// See shortcircuitPhiPlan for details.
+func shortcircuitBlock(b *Block) bool {
+ if b.Kind != BlockIf {
+ return false
+ }
+ // Look for control values of the form Copy(Not(Copy(Phi(const, ...)))).
+ // Those must be the only values in the b, and they each must be used only by b.
+ // Track the negations so that we can swap successors as needed later.
+ ctl := b.Controls[0]
+ nval := 1 // the control value
+ var swap int64
+ for ctl.Uses == 1 && ctl.Block == b && (ctl.Op == OpCopy || ctl.Op == OpNot) {
+ if ctl.Op == OpNot {
+ swap = 1 ^ swap
+ }
+ ctl = ctl.Args[0]
+ nval++ // wrapper around control value
+ }
+ if ctl.Op != OpPhi || ctl.Block != b || ctl.Uses != 1 {
+ return false
+ }
+ nOtherPhi := 0
+ for _, w := range b.Values {
+ if w.Op == OpPhi && w != ctl {
+ nOtherPhi++
+ }
+ }
+ if nOtherPhi > 0 && len(b.Preds) != 2 {
+ // We rely on b having exactly two preds in shortcircuitPhiPlan
+ // to reason about the values of phis.
+ return false
+ }
+ if len(b.Values) != nval+nOtherPhi {
+ return false
+ }
+ if nOtherPhi > 0 {
+ // Check for any phi which is the argument of another phi.
+ // These cases are tricky, as substitutions done by replaceUses
+ // are no longer trivial to do in any ordering. See issue 45175.
+ m := make(map[*Value]bool, 1+nOtherPhi)
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ m[v] = true
+ }
+ }
+ for v := range m {
+ for _, a := range v.Args {
+ if a != v && m[a] {
+ return false
+ }
+ }
+ }
+ }
+
+ // Locate index of first const phi arg.
+ cidx := -1
+ for i, a := range ctl.Args {
+ if a.Op == OpConstBool {
+ cidx = i
+ break
+ }
+ }
+ if cidx == -1 {
+ return false
+ }
+
+ // p is the predecessor corresponding to cidx.
+ pe := b.Preds[cidx]
+ p := pe.b
+ pi := pe.i
+
+ // t is the "taken" branch: the successor we always go to when coming in from p.
+ ti := 1 ^ ctl.Args[cidx].AuxInt ^ swap
+ te := b.Succs[ti]
+ t := te.b
+ if p == b || t == b {
+ // This is an infinite loop; we can't remove it. See issue 33903.
+ return false
+ }
+
+ var fixPhi func(*Value, int)
+ if nOtherPhi > 0 {
+ fixPhi = shortcircuitPhiPlan(b, ctl, cidx, ti)
+ if fixPhi == nil {
+ return false
+ }
+ }
+
+ // We're committed. Update CFG and Phis.
+ // If you modify this section, update shortcircuitPhiPlan corresponding.
+
+ // Remove b's incoming edge from p.
+ b.removePred(cidx)
+ n := len(b.Preds)
+ ctl.Args[cidx].Uses--
+ ctl.Args[cidx] = ctl.Args[n]
+ ctl.Args[n] = nil
+ ctl.Args = ctl.Args[:n]
+
+ // Redirect p's outgoing edge to t.
+ p.Succs[pi] = Edge{t, len(t.Preds)}
+
+ // Fix up t to have one more predecessor.
+ t.Preds = append(t.Preds, Edge{p, pi})
+ for _, v := range t.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ v.AddArg(v.Args[te.i])
+ }
+
+ if nOtherPhi != 0 {
+ // Adjust all other phis as necessary.
+ // Use a plain for loop instead of range because fixPhi may move phis,
+ // thus modifying b.Values.
+ for i := 0; i < len(b.Values); i++ {
+ phi := b.Values[i]
+ if phi.Uses == 0 || phi == ctl || phi.Op != OpPhi {
+ continue
+ }
+ fixPhi(phi, i)
+ if phi.Block == b {
+ continue
+ }
+ // phi got moved to a different block with v.moveTo.
+ // Adjust phi values in this new block that refer
+ // to phi to refer to the corresponding phi arg instead.
+ // phi used to be evaluated prior to this block,
+ // and now it is evaluated in this block.
+ for _, v := range phi.Block.Values {
+ if v.Op != OpPhi || v == phi {
+ continue
+ }
+ for j, a := range v.Args {
+ if a == phi {
+ v.SetArg(j, phi.Args[j])
+ }
+ }
+ }
+ if phi.Uses != 0 {
+ phielimValue(phi)
+ } else {
+ phi.reset(OpInvalid)
+ }
+ i-- // v.moveTo put a new value at index i; reprocess
+ }
+
+ // We may have left behind some phi values with no uses
+ // but the wrong number of arguments. Eliminate those.
+ for _, v := range b.Values {
+ if v.Uses == 0 {
+ v.reset(OpInvalid)
+ }
+ }
+ }
+
+ if len(b.Preds) == 0 {
+ // Block is now dead.
+ b.Kind = BlockInvalid
+ }
+
+ phielimValue(ctl)
+ return true
+}
+
+// shortcircuitPhiPlan returns a function to handle non-ctl phi values in b,
+// where b is as described in shortcircuitBlock.
+// The returned function accepts a value v
+// and the index i of v in v.Block: v.Block.Values[i] == v.
+// If the returned function moves v to a different block, it will use v.moveTo.
+// cidx is the index in ctl of the ConstBool arg.
+// ti is the index in b.Succs of the always taken branch when arriving from p.
+// If shortcircuitPhiPlan returns nil, there is no plan available,
+// and the CFG modifications must not proceed.
+// The returned function assumes that shortcircuitBlock has completed its CFG modifications.
+func shortcircuitPhiPlan(b *Block, ctl *Value, cidx int, ti int64) func(*Value, int) {
+ // t is the "taken" branch: the successor we always go to when coming in from p.
+ t := b.Succs[ti].b
+ // u is the "untaken" branch: the successor we never go to when coming in from p.
+ u := b.Succs[1^ti].b
+
+ // Look for some common CFG structures
+ // in which the outbound paths from b merge,
+ // with no other preds joining them.
+ // In these cases, we can reconstruct what the value
+ // of any phi in b must be in the successor blocks.
+
+ if len(t.Preds) == 1 && len(t.Succs) == 1 &&
+ len(u.Preds) == 1 && len(u.Succs) == 1 &&
+ t.Succs[0].b == u.Succs[0].b && len(t.Succs[0].b.Preds) == 2 {
+ // p q
+ // \ /
+ // b
+ // / \
+ // t u
+ // \ /
+ // m
+ //
+ // After the CFG modifications, this will look like
+ //
+ // p q
+ // | /
+ // | b
+ // |/ \
+ // t u
+ // \ /
+ // m
+ //
+ // NB: t.Preds is (b, p), not (p, b).
+ m := t.Succs[0].b
+ return func(v *Value, i int) {
+ // Replace any uses of v in t and u with the value v must have,
+ // given that we have arrived at that block.
+ // Then move v to m and adjust its value accordingly;
+ // this handles all other uses of v.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ u.replaceUses(v, argQ)
+ phi := t.Func.newValue(OpPhi, v.Type, t, v.Pos)
+ phi.AddArg2(argQ, argP)
+ t.replaceUses(v, phi)
+ if v.Uses == 0 {
+ return
+ }
+ v.moveTo(m, i)
+ // The phi in m belongs to whichever pred idx corresponds to t.
+ if m.Preds[0].b == t {
+ v.SetArgs2(phi, argQ)
+ } else {
+ v.SetArgs2(argQ, phi)
+ }
+ }
+ }
+
+ if len(t.Preds) == 2 && len(u.Preds) == 1 && len(u.Succs) == 1 && u.Succs[0].b == t {
+ // p q
+ // \ /
+ // b
+ // |\
+ // | u
+ // |/
+ // t
+ //
+ // After the CFG modifications, this will look like
+ //
+ // q
+ // /
+ // b
+ // |\
+ // p | u
+ // \|/
+ // t
+ //
+ // NB: t.Preds is (b or u, b or u, p).
+ return func(v *Value, i int) {
+ // Replace any uses of v in u. Then move v to t.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ u.replaceUses(v, argQ)
+ v.moveTo(t, i)
+ v.SetArgs3(argQ, argQ, argP)
+ }
+ }
+
+ if len(u.Preds) == 2 && len(t.Preds) == 1 && len(t.Succs) == 1 && t.Succs[0].b == u {
+ // p q
+ // \ /
+ // b
+ // /|
+ // t |
+ // \|
+ // u
+ //
+ // After the CFG modifications, this will look like
+ //
+ // p q
+ // | /
+ // | b
+ // |/|
+ // t |
+ // \|
+ // u
+ //
+ // NB: t.Preds is (b, p), not (p, b).
+ return func(v *Value, i int) {
+ // Replace any uses of v in t. Then move v to u.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ phi := t.Func.newValue(OpPhi, v.Type, t, v.Pos)
+ phi.AddArg2(argQ, argP)
+ t.replaceUses(v, phi)
+ if v.Uses == 0 {
+ return
+ }
+ v.moveTo(u, i)
+ v.SetArgs2(argQ, phi)
+ }
+ }
+
+ // Look for some common CFG structures
+ // in which one outbound path from b exits,
+ // with no other preds joining.
+ // In these cases, we can reconstruct what the value
+ // of any phi in b must be in the path leading to exit,
+ // and move the phi to the non-exit path.
+
+ if len(t.Preds) == 1 && len(u.Preds) == 1 && len(t.Succs) == 0 {
+ // p q
+ // \ /
+ // b
+ // / \
+ // t u
+ //
+ // where t is an Exit/Ret block.
+ //
+ // After the CFG modifications, this will look like
+ //
+ // p q
+ // | /
+ // | b
+ // |/ \
+ // t u
+ //
+ // NB: t.Preds is (b, p), not (p, b).
+ return func(v *Value, i int) {
+ // Replace any uses of v in t and x. Then move v to u.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ // If there are no uses of v in t or x, this phi will be unused.
+ // That's OK; it's not worth the cost to prevent that.
+ phi := t.Func.newValue(OpPhi, v.Type, t, v.Pos)
+ phi.AddArg2(argQ, argP)
+ t.replaceUses(v, phi)
+ if v.Uses == 0 {
+ return
+ }
+ v.moveTo(u, i)
+ v.SetArgs1(argQ)
+ }
+ }
+
+ if len(u.Preds) == 1 && len(t.Preds) == 1 && len(u.Succs) == 0 {
+ // p q
+ // \ /
+ // b
+ // / \
+ // t u
+ //
+ // where u is an Exit/Ret block.
+ //
+ // After the CFG modifications, this will look like
+ //
+ // p q
+ // | /
+ // | b
+ // |/ \
+ // t u
+ //
+ // NB: t.Preds is (b, p), not (p, b).
+ return func(v *Value, i int) {
+ // Replace any uses of v in u (and x). Then move v to t.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ u.replaceUses(v, argQ)
+ v.moveTo(t, i)
+ v.SetArgs2(argQ, argP)
+ }
+ }
+
+ // TODO: handle more cases; shortcircuit optimizations turn out to be reasonably high impact
+ return nil
+}
+
+// replaceUses replaces all uses of old in b with new.
+func (b *Block) replaceUses(old, new *Value) {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if a == old {
+ v.SetArg(i, new)
+ }
+ }
+ }
+ for i, v := range b.ControlValues() {
+ if v == old {
+ b.ReplaceControl(i, new)
+ }
+ }
+}
+
+// moveTo moves v to dst, adjusting the appropriate Block.Values slices.
+// The caller is responsible for ensuring that this is safe.
+// i is the index of v in v.Block.Values.
+func (v *Value) moveTo(dst *Block, i int) {
+ if dst.Func.scheduled {
+ v.Fatalf("moveTo after scheduling")
+ }
+ src := v.Block
+ if src.Values[i] != v {
+ v.Fatalf("moveTo bad index %d", v, i)
+ }
+ if src == dst {
+ return
+ }
+ v.Block = dst
+ dst.Values = append(dst.Values, v)
+ last := len(src.Values) - 1
+ src.Values[i] = src.Values[last]
+ src.Values[last] = nil
+ src.Values = src.Values[:last]
+}
diff --git a/src/cmd/compile/internal/ssa/shortcircuit_test.go b/src/cmd/compile/internal/ssa/shortcircuit_test.go
new file mode 100644
index 0000000..b25eeb4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/shortcircuit_test.go
@@ -0,0 +1,53 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestShortCircuit(t *testing.T) {
+ c := testConfig(t)
+
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("arg1", OpArg, c.config.Types.Int64, 0, nil),
+ Valu("arg2", OpArg, c.config.Types.Int64, 0, nil),
+ Valu("arg3", OpArg, c.config.Types.Int64, 0, nil),
+ Goto("b1")),
+ Bloc("b1",
+ Valu("cmp1", OpLess64, c.config.Types.Bool, 0, nil, "arg1", "arg2"),
+ If("cmp1", "b2", "b3")),
+ Bloc("b2",
+ Valu("cmp2", OpLess64, c.config.Types.Bool, 0, nil, "arg2", "arg3"),
+ Goto("b3")),
+ Bloc("b3",
+ Valu("phi2", OpPhi, c.config.Types.Bool, 0, nil, "cmp1", "cmp2"),
+ If("phi2", "b4", "b5")),
+ Bloc("b4",
+ Valu("cmp3", OpLess64, c.config.Types.Bool, 0, nil, "arg3", "arg1"),
+ Goto("b5")),
+ Bloc("b5",
+ Valu("phi3", OpPhi, c.config.Types.Bool, 0, nil, "phi2", "cmp3"),
+ If("phi3", "b6", "b7")),
+ Bloc("b6",
+ Exit("mem")),
+ Bloc("b7",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ shortcircuit(fun.f)
+ CheckFunc(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ t.Errorf("phi %s remains", v)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/sizeof_test.go b/src/cmd/compile/internal/ssa/sizeof_test.go
new file mode 100644
index 0000000..60ada01
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sizeof_test.go
@@ -0,0 +1,39 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// Assert that the size of important structures do not change unexpectedly.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ {Value{}, 72, 112},
+ {Block{}, 164, 304},
+ {LocalSlot{}, 32, 48},
+ {valState{}, 28, 40},
+ }
+
+ for _, tt := range tests {
+ want := tt._32bit
+ if _64bit {
+ want = tt._64bit
+ }
+ got := reflect.TypeOf(tt.val).Size()
+ if want != got {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/softfloat.go b/src/cmd/compile/internal/ssa/softfloat.go
new file mode 100644
index 0000000..a8a8f83
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/softfloat.go
@@ -0,0 +1,79 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "math"
+)
+
+func softfloat(f *Func) {
+ if !f.Config.SoftFloat {
+ return
+ }
+ newInt64 := false
+
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Type.IsFloat() {
+ f.unCache(v)
+ switch v.Op {
+ case OpPhi, OpLoad, OpArg:
+ if v.Type.Size() == 4 {
+ v.Type = f.Config.Types.UInt32
+ } else {
+ v.Type = f.Config.Types.UInt64
+ }
+ case OpConst32F:
+ v.Op = OpConst32
+ v.Type = f.Config.Types.UInt32
+ v.AuxInt = int64(int32(math.Float32bits(auxTo32F(v.AuxInt))))
+ case OpConst64F:
+ v.Op = OpConst64
+ v.Type = f.Config.Types.UInt64
+ case OpNeg32F:
+ arg0 := v.Args[0]
+ v.reset(OpXor32)
+ v.Type = f.Config.Types.UInt32
+ v.AddArg(arg0)
+ mask := v.Block.NewValue0(v.Pos, OpConst32, v.Type)
+ mask.AuxInt = -0x80000000
+ v.AddArg(mask)
+ case OpNeg64F:
+ arg0 := v.Args[0]
+ v.reset(OpXor64)
+ v.Type = f.Config.Types.UInt64
+ v.AddArg(arg0)
+ mask := v.Block.NewValue0(v.Pos, OpConst64, v.Type)
+ mask.AuxInt = -0x8000000000000000
+ v.AddArg(mask)
+ case OpRound32F:
+ v.Op = OpCopy
+ v.Type = f.Config.Types.UInt32
+ case OpRound64F:
+ v.Op = OpCopy
+ v.Type = f.Config.Types.UInt64
+ }
+ newInt64 = newInt64 || v.Type.Size() == 8
+ } else if (v.Op == OpStore || v.Op == OpZero || v.Op == OpMove) && v.Aux.(*types.Type).IsFloat() {
+ switch size := v.Aux.(*types.Type).Size(); size {
+ case 4:
+ v.Aux = f.Config.Types.UInt32
+ case 8:
+ v.Aux = f.Config.Types.UInt64
+ default:
+ v.Fatalf("bad float type with size %d", size)
+ }
+ }
+ }
+ }
+
+ if newInt64 && f.Config.RegSize == 4 {
+ // On 32bit arch, decompose Uint64 introduced in the switch above.
+ decomposeBuiltIn(f)
+ applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, removeDeadValues)
+ }
+
+}
diff --git a/src/cmd/compile/internal/ssa/sparsemap.go b/src/cmd/compile/internal/ssa/sparsemap.go
new file mode 100644
index 0000000..f55db54
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sparsemap.go
@@ -0,0 +1,93 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/internal/src"
+
+// from https://research.swtch.com/sparse
+// in turn, from Briggs and Torczon
+
+type sparseEntry struct {
+ key ID
+ val int32
+ aux src.XPos
+}
+
+type sparseMap struct {
+ dense []sparseEntry
+ sparse []int32
+}
+
+// newSparseMap returns a sparseMap that can map
+// integers between 0 and n-1 to int32s.
+func newSparseMap(n int) *sparseMap {
+ return &sparseMap{dense: nil, sparse: make([]int32, n)}
+}
+
+func (s *sparseMap) cap() int {
+ return len(s.sparse)
+}
+
+func (s *sparseMap) size() int {
+ return len(s.dense)
+}
+
+func (s *sparseMap) contains(k ID) bool {
+ i := s.sparse[k]
+ return i < int32(len(s.dense)) && s.dense[i].key == k
+}
+
+// get returns the value for key k, or -1 if k does
+// not appear in the map.
+func (s *sparseMap) get(k ID) int32 {
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ return s.dense[i].val
+ }
+ return -1
+}
+
+func (s *sparseMap) set(k ID, v int32, a src.XPos) {
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ s.dense[i].val = v
+ s.dense[i].aux = a
+ return
+ }
+ s.dense = append(s.dense, sparseEntry{k, v, a})
+ s.sparse[k] = int32(len(s.dense)) - 1
+}
+
+// setBit sets the v'th bit of k's value, where 0 <= v < 32
+func (s *sparseMap) setBit(k ID, v uint) {
+ if v >= 32 {
+ panic("bit index too large.")
+ }
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ s.dense[i].val |= 1 << v
+ return
+ }
+ s.dense = append(s.dense, sparseEntry{k, 1 << v, src.NoXPos})
+ s.sparse[k] = int32(len(s.dense)) - 1
+}
+
+func (s *sparseMap) remove(k ID) {
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ y := s.dense[len(s.dense)-1]
+ s.dense[i] = y
+ s.sparse[y.key] = i
+ s.dense = s.dense[:len(s.dense)-1]
+ }
+}
+
+func (s *sparseMap) clear() {
+ s.dense = s.dense[:0]
+}
+
+func (s *sparseMap) contents() []sparseEntry {
+ return s.dense
+}
diff --git a/src/cmd/compile/internal/ssa/sparseset.go b/src/cmd/compile/internal/ssa/sparseset.go
new file mode 100644
index 0000000..395931d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sparseset.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// from https://research.swtch.com/sparse
+// in turn, from Briggs and Torczon
+
+type sparseSet struct {
+ dense []ID
+ sparse []int32
+}
+
+// newSparseSet returns a sparseSet that can represent
+// integers between 0 and n-1
+func newSparseSet(n int) *sparseSet {
+ return &sparseSet{dense: nil, sparse: make([]int32, n)}
+}
+
+func (s *sparseSet) cap() int {
+ return len(s.sparse)
+}
+
+func (s *sparseSet) size() int {
+ return len(s.dense)
+}
+
+func (s *sparseSet) contains(x ID) bool {
+ i := s.sparse[x]
+ return i < int32(len(s.dense)) && s.dense[i] == x
+}
+
+func (s *sparseSet) add(x ID) {
+ i := s.sparse[x]
+ if i < int32(len(s.dense)) && s.dense[i] == x {
+ return
+ }
+ s.dense = append(s.dense, x)
+ s.sparse[x] = int32(len(s.dense)) - 1
+}
+
+func (s *sparseSet) addAll(a []ID) {
+ for _, x := range a {
+ s.add(x)
+ }
+}
+
+func (s *sparseSet) addAllValues(a []*Value) {
+ for _, v := range a {
+ s.add(v.ID)
+ }
+}
+
+func (s *sparseSet) remove(x ID) {
+ i := s.sparse[x]
+ if i < int32(len(s.dense)) && s.dense[i] == x {
+ y := s.dense[len(s.dense)-1]
+ s.dense[i] = y
+ s.sparse[y] = i
+ s.dense = s.dense[:len(s.dense)-1]
+ }
+}
+
+// pop removes an arbitrary element from the set.
+// The set must be nonempty.
+func (s *sparseSet) pop() ID {
+ x := s.dense[len(s.dense)-1]
+ s.dense = s.dense[:len(s.dense)-1]
+ return x
+}
+
+func (s *sparseSet) clear() {
+ s.dense = s.dense[:0]
+}
+
+func (s *sparseSet) contents() []ID {
+ return s.dense
+}
diff --git a/src/cmd/compile/internal/ssa/sparsetree.go b/src/cmd/compile/internal/ssa/sparsetree.go
new file mode 100644
index 0000000..1be20b2
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sparsetree.go
@@ -0,0 +1,235 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "strings"
+)
+
+type SparseTreeNode struct {
+ child *Block
+ sibling *Block
+ parent *Block
+
+ // Every block has 6 numbers associated with it:
+ // entry-1, entry, entry+1, exit-1, and exit, exit+1.
+ // entry and exit are conceptually the top of the block (phi functions)
+ // entry+1 and exit-1 are conceptually the bottom of the block (ordinary defs)
+ // entry-1 and exit+1 are conceptually "just before" the block (conditions flowing in)
+ //
+ // This simplifies life if we wish to query information about x
+ // when x is both an input to and output of a block.
+ entry, exit int32
+}
+
+func (s *SparseTreeNode) String() string {
+ return fmt.Sprintf("[%d,%d]", s.entry, s.exit)
+}
+
+func (s *SparseTreeNode) Entry() int32 {
+ return s.entry
+}
+
+func (s *SparseTreeNode) Exit() int32 {
+ return s.exit
+}
+
+const (
+ // When used to lookup up definitions in a sparse tree,
+ // these adjustments to a block's entry (+adjust) and
+ // exit (-adjust) numbers allow a distinction to be made
+ // between assignments (typically branch-dependent
+ // conditionals) occurring "before" the block (e.g., as inputs
+ // to the block and its phi functions), "within" the block,
+ // and "after" the block.
+ AdjustBefore = -1 // defined before phi
+ AdjustWithin = 0 // defined by phi
+ AdjustAfter = 1 // defined within block
+)
+
+// A SparseTree is a tree of Blocks.
+// It allows rapid ancestor queries,
+// such as whether one block dominates another.
+type SparseTree []SparseTreeNode
+
+// newSparseTree creates a SparseTree from a block-to-parent map (array indexed by Block.ID)
+func newSparseTree(f *Func, parentOf []*Block) SparseTree {
+ t := make(SparseTree, f.NumBlocks())
+ for _, b := range f.Blocks {
+ n := &t[b.ID]
+ if p := parentOf[b.ID]; p != nil {
+ n.parent = p
+ n.sibling = t[p.ID].child
+ t[p.ID].child = b
+ }
+ }
+ t.numberBlock(f.Entry, 1)
+ return t
+}
+
+// newSparseOrderedTree creates a SparseTree from a block-to-parent map (array indexed by Block.ID)
+// children will appear in the reverse of their order in reverseOrder
+// in particular, if reverseOrder is a dfs-reversePostOrder, then the root-to-children
+// walk of the tree will yield a pre-order.
+func newSparseOrderedTree(f *Func, parentOf, reverseOrder []*Block) SparseTree {
+ t := make(SparseTree, f.NumBlocks())
+ for _, b := range reverseOrder {
+ n := &t[b.ID]
+ if p := parentOf[b.ID]; p != nil {
+ n.parent = p
+ n.sibling = t[p.ID].child
+ t[p.ID].child = b
+ }
+ }
+ t.numberBlock(f.Entry, 1)
+ return t
+}
+
+// treestructure provides a string description of the dominator
+// tree and flow structure of block b and all blocks that it
+// dominates.
+func (t SparseTree) treestructure(b *Block) string {
+ return t.treestructure1(b, 0)
+}
+func (t SparseTree) treestructure1(b *Block, i int) string {
+ s := "\n" + strings.Repeat("\t", i) + b.String() + "->["
+ for i, e := range b.Succs {
+ if i > 0 {
+ s += ","
+ }
+ s += e.b.String()
+ }
+ s += "]"
+ if c0 := t[b.ID].child; c0 != nil {
+ s += "("
+ for c := c0; c != nil; c = t[c.ID].sibling {
+ if c != c0 {
+ s += " "
+ }
+ s += t.treestructure1(c, i+1)
+ }
+ s += ")"
+ }
+ return s
+}
+
+// numberBlock assigns entry and exit numbers for b and b's
+// children in an in-order walk from a gappy sequence, where n
+// is the first number not yet assigned or reserved. N should
+// be larger than zero. For each entry and exit number, the
+// values one larger and smaller are reserved to indicate
+// "strictly above" and "strictly below". numberBlock returns
+// the smallest number not yet assigned or reserved (i.e., the
+// exit number of the last block visited, plus two, because
+// last.exit+1 is a reserved value.)
+//
+// examples:
+//
+// single node tree Root, call with n=1
+// entry=2 Root exit=5; returns 7
+//
+// two node tree, Root->Child, call with n=1
+// entry=2 Root exit=11; returns 13
+// entry=5 Child exit=8
+//
+// three node tree, Root->(Left, Right), call with n=1
+// entry=2 Root exit=17; returns 19
+// entry=5 Left exit=8; entry=11 Right exit=14
+//
+// This is the in-order sequence of assigned and reserved numbers
+// for the last example:
+// root left left right right root
+// 1 2e 3 | 4 5e 6 | 7 8x 9 | 10 11e 12 | 13 14x 15 | 16 17x 18
+
+func (t SparseTree) numberBlock(b *Block, n int32) int32 {
+ // reserve n for entry-1, assign n+1 to entry
+ n++
+ t[b.ID].entry = n
+ // reserve n+1 for entry+1, n+2 is next free number
+ n += 2
+ for c := t[b.ID].child; c != nil; c = t[c.ID].sibling {
+ n = t.numberBlock(c, n) // preserves n = next free number
+ }
+ // reserve n for exit-1, assign n+1 to exit
+ n++
+ t[b.ID].exit = n
+ // reserve n+1 for exit+1, n+2 is next free number, returned.
+ return n + 2
+}
+
+// Sibling returns a sibling of x in the dominator tree (i.e.,
+// a node with the same immediate dominator) or nil if there
+// are no remaining siblings in the arbitrary but repeatable
+// order chosen. Because the Child-Sibling order is used
+// to assign entry and exit numbers in the treewalk, those
+// numbers are also consistent with this order (i.e.,
+// Sibling(x) has entry number larger than x's exit number).
+func (t SparseTree) Sibling(x *Block) *Block {
+ return t[x.ID].sibling
+}
+
+// Child returns a child of x in the dominator tree, or
+// nil if there are none. The choice of first child is
+// arbitrary but repeatable.
+func (t SparseTree) Child(x *Block) *Block {
+ return t[x.ID].child
+}
+
+// isAncestorEq reports whether x is an ancestor of or equal to y.
+func (t SparseTree) IsAncestorEq(x, y *Block) bool {
+ if x == y {
+ return true
+ }
+ xx := &t[x.ID]
+ yy := &t[y.ID]
+ return xx.entry <= yy.entry && yy.exit <= xx.exit
+}
+
+// isAncestor reports whether x is a strict ancestor of y.
+func (t SparseTree) isAncestor(x, y *Block) bool {
+ if x == y {
+ return false
+ }
+ xx := &t[x.ID]
+ yy := &t[y.ID]
+ return xx.entry < yy.entry && yy.exit < xx.exit
+}
+
+// domorder returns a value for dominator-oriented sorting.
+// Block domination does not provide a total ordering,
+// but domorder two has useful properties.
+// (1) If domorder(x) > domorder(y) then x does not dominate y.
+// (2) If domorder(x) < domorder(y) and domorder(y) < domorder(z) and x does not dominate y,
+// then x does not dominate z.
+// Property (1) means that blocks sorted by domorder always have a maximal dominant block first.
+// Property (2) allows searches for dominated blocks to exit early.
+func (t SparseTree) domorder(x *Block) int32 {
+ // Here is an argument that entry(x) provides the properties documented above.
+ //
+ // Entry and exit values are assigned in a depth-first dominator tree walk.
+ // For all blocks x and y, one of the following holds:
+ //
+ // (x-dom-y) x dominates y => entry(x) < entry(y) < exit(y) < exit(x)
+ // (y-dom-x) y dominates x => entry(y) < entry(x) < exit(x) < exit(y)
+ // (x-then-y) neither x nor y dominates the other and x walked before y => entry(x) < exit(x) < entry(y) < exit(y)
+ // (y-then-x) neither x nor y dominates the other and y walked before y => entry(y) < exit(y) < entry(x) < exit(x)
+ //
+ // entry(x) > entry(y) eliminates case x-dom-y. This provides property (1) above.
+ //
+ // For property (2), assume entry(x) < entry(y) and entry(y) < entry(z) and x does not dominate y.
+ // entry(x) < entry(y) allows cases x-dom-y and x-then-y.
+ // But by supposition, x does not dominate y. So we have x-then-y.
+ //
+ // For contradiction, assume x dominates z.
+ // Then entry(x) < entry(z) < exit(z) < exit(x).
+ // But we know x-then-y, so entry(x) < exit(x) < entry(y) < exit(y).
+ // Combining those, entry(x) < entry(z) < exit(z) < exit(x) < entry(y) < exit(y).
+ // By supposition, entry(y) < entry(z), which allows cases y-dom-z and y-then-z.
+ // y-dom-z requires entry(y) < entry(z), but we have entry(z) < entry(y).
+ // y-then-z requires exit(y) < entry(z), but we have entry(z) < exit(y).
+ // We have a contradiction, so x does not dominate z, as required.
+ return t[x.ID].entry
+}
diff --git a/src/cmd/compile/internal/ssa/sparsetreemap.go b/src/cmd/compile/internal/ssa/sparsetreemap.go
new file mode 100644
index 0000000..d264675
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sparsetreemap.go
@@ -0,0 +1,189 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "fmt"
+
+// A SparseTreeMap encodes a subset of nodes within a tree
+// used for sparse-ancestor queries.
+//
+// Combined with a SparseTreeHelper, this supports an Insert
+// to add a tree node to the set and a Find operation to locate
+// the nearest tree ancestor of a given node such that the
+// ancestor is also in the set.
+//
+// Given a set of blocks {B1, B2, B3} within the dominator tree, established
+// by stm.Insert()ing B1, B2, B3, etc, a query at block B
+// (performed with stm.Find(stm, B, adjust, helper))
+// will return the member of the set that is the nearest strict
+// ancestor of B within the dominator tree, or nil if none exists.
+// The expected complexity of this operation is the log of the size
+// the set, given certain assumptions about sparsity (the log complexity
+// could be guaranteed with additional data structures whose constant-
+// factor overhead has not yet been justified.)
+//
+// The adjust parameter allows positioning of the insertion
+// and lookup points within a block -- one of
+// AdjustBefore, AdjustWithin, AdjustAfter,
+// where lookups at AdjustWithin can find insertions at
+// AdjustBefore in the same block, and lookups at AdjustAfter
+// can find insertions at either AdjustBefore or AdjustWithin
+// in the same block. (Note that this assumes a gappy numbering
+// such that exit number or exit number is separated from its
+// nearest neighbor by at least 3).
+//
+// The Sparse Tree lookup algorithm is described by
+// Paul F. Dietz. Maintaining order in a linked list. In
+// Proceedings of the Fourteenth Annual ACM Symposium on
+// Theory of Computing, pages 122–127, May 1982.
+// and by
+// Ben Wegbreit. Faster retrieval from context trees.
+// Communications of the ACM, 19(9):526–529, September 1976.
+type SparseTreeMap RBTint32
+
+// A SparseTreeHelper contains indexing and allocation data
+// structures common to a collection of SparseTreeMaps, as well
+// as exposing some useful control-flow-related data to other
+// packages, such as gc.
+type SparseTreeHelper struct {
+ Sdom []SparseTreeNode // indexed by block.ID
+ Po []*Block // exported data; the blocks, in a post-order
+ Dom []*Block // exported data; the dominator of this block.
+ Ponums []int32 // exported data; Po[Ponums[b.ID]] == b; the index of b in Po
+}
+
+// NewSparseTreeHelper returns a SparseTreeHelper for use
+// in the gc package, for example in phi-function placement.
+func NewSparseTreeHelper(f *Func) *SparseTreeHelper {
+ dom := f.Idom()
+ ponums := make([]int32, f.NumBlocks())
+ po := postorderWithNumbering(f, ponums)
+ return makeSparseTreeHelper(newSparseTree(f, dom), dom, po, ponums)
+}
+
+func (h *SparseTreeHelper) NewTree() *SparseTreeMap {
+ return &SparseTreeMap{}
+}
+
+func makeSparseTreeHelper(sdom SparseTree, dom, po []*Block, ponums []int32) *SparseTreeHelper {
+ helper := &SparseTreeHelper{Sdom: []SparseTreeNode(sdom),
+ Dom: dom,
+ Po: po,
+ Ponums: ponums,
+ }
+ return helper
+}
+
+// A sparseTreeMapEntry contains the data stored in a binary search
+// data structure indexed by (dominator tree walk) entry and exit numbers.
+// Each entry is added twice, once keyed by entry-1/entry/entry+1 and
+// once keyed by exit+1/exit/exit-1.
+//
+// Within a sparse tree, the two entries added bracket all their descendant
+// entries within the tree; the first insertion is keyed by entry number,
+// which comes before all the entry and exit numbers of descendants, and
+// the second insertion is keyed by exit number, which comes after all the
+// entry and exit numbers of the descendants.
+type sparseTreeMapEntry struct {
+ index *SparseTreeNode // references the entry and exit numbers for a block in the sparse tree
+ block *Block // TODO: store this in a separate index.
+ data interface{}
+ sparseParent *sparseTreeMapEntry // references the nearest ancestor of this block in the sparse tree.
+ adjust int32 // at what adjustment was this node entered into the sparse tree? The same block may be entered more than once, but at different adjustments.
+}
+
+// Insert creates a definition within b with data x.
+// adjust indicates where in the block should be inserted:
+// AdjustBefore means defined at a phi function (visible Within or After in the same block)
+// AdjustWithin means defined within the block (visible After in the same block)
+// AdjustAfter means after the block (visible within child blocks)
+func (m *SparseTreeMap) Insert(b *Block, adjust int32, x interface{}, helper *SparseTreeHelper) {
+ rbtree := (*RBTint32)(m)
+ blockIndex := &helper.Sdom[b.ID]
+ if blockIndex.entry == 0 {
+ // assert unreachable
+ return
+ }
+ // sp will be the sparse parent in this sparse tree (nearest ancestor in the larger tree that is also in this sparse tree)
+ sp := m.findEntry(b, adjust, helper)
+ entry := &sparseTreeMapEntry{index: blockIndex, block: b, data: x, sparseParent: sp, adjust: adjust}
+
+ right := blockIndex.exit - adjust
+ _ = rbtree.Insert(right, entry)
+
+ left := blockIndex.entry + adjust
+ _ = rbtree.Insert(left, entry)
+
+ // This newly inserted block may now be the sparse parent of some existing nodes (the new sparse children of this block)
+ // Iterate over nodes bracketed by this new node to correct their parent, but not over the proper sparse descendants of those nodes.
+ _, d := rbtree.Lub(left) // Lub (not EQ) of left is either right or a sparse child
+ for tme := d.(*sparseTreeMapEntry); tme != entry; tme = d.(*sparseTreeMapEntry) {
+ tme.sparseParent = entry
+ // all descendants of tme are unchanged;
+ // next sparse sibling (or right-bracketing sparse parent == entry) is first node after tme.index.exit - tme.adjust
+ _, d = rbtree.Lub(tme.index.exit - tme.adjust)
+ }
+}
+
+// Find returns the definition visible from block b, or nil if none can be found.
+// Adjust indicates where the block should be searched.
+// AdjustBefore searches before the phi functions of b.
+// AdjustWithin searches starting at the phi functions of b.
+// AdjustAfter searches starting at the exit from the block, including normal within-block definitions.
+//
+// Note that Finds are properly nested with Inserts:
+// m.Insert(b, a) followed by m.Find(b, a) will not return the result of the insert,
+// but m.Insert(b, AdjustBefore) followed by m.Find(b, AdjustWithin) will.
+//
+// Another way to think of this is that Find searches for inputs, Insert defines outputs.
+func (m *SparseTreeMap) Find(b *Block, adjust int32, helper *SparseTreeHelper) interface{} {
+ v := m.findEntry(b, adjust, helper)
+ if v == nil {
+ return nil
+ }
+ return v.data
+}
+
+func (m *SparseTreeMap) findEntry(b *Block, adjust int32, helper *SparseTreeHelper) *sparseTreeMapEntry {
+ rbtree := (*RBTint32)(m)
+ if rbtree == nil {
+ return nil
+ }
+ blockIndex := &helper.Sdom[b.ID]
+
+ // The Glb (not EQ) of this probe is either the entry-indexed end of a sparse parent
+ // or the exit-indexed end of a sparse sibling
+ _, v := rbtree.Glb(blockIndex.entry + adjust)
+
+ if v == nil {
+ return nil
+ }
+
+ otherEntry := v.(*sparseTreeMapEntry)
+ if otherEntry.index.exit >= blockIndex.exit { // otherEntry exit after blockIndex exit; therefore, brackets
+ return otherEntry
+ }
+ // otherEntry is a sparse Sibling, and shares the same sparse parent (nearest ancestor within larger tree)
+ sp := otherEntry.sparseParent
+ if sp != nil {
+ if sp.index.exit < blockIndex.exit { // no ancestor found
+ return nil
+ }
+ return sp
+ }
+ return nil
+}
+
+func (m *SparseTreeMap) String() string {
+ tree := (*RBTint32)(m)
+ return tree.String()
+}
+
+func (e *sparseTreeMapEntry) String() string {
+ if e == nil {
+ return "nil"
+ }
+ return fmt.Sprintf("(index=%v, block=%v, data=%v)->%v", e.index, e.block, e.data, e.sparseParent)
+}
diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go
new file mode 100644
index 0000000..406a3c3
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/stackalloc.go
@@ -0,0 +1,420 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO: live at start of block instead?
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+)
+
+type stackAllocState struct {
+ f *Func
+
+ // live is the output of stackalloc.
+ // live[b.id] = live values at the end of block b.
+ live [][]ID
+
+ // The following slices are reused across multiple users
+ // of stackAllocState.
+ values []stackValState
+ interfere [][]ID // interfere[v.id] = values that interfere with v.
+ names []LocalSlot
+ slots []int
+ used []bool
+
+ nArgSlot, // Number of Values sourced to arg slot
+ nNotNeed, // Number of Values not needing a stack slot
+ nNamedSlot, // Number of Values using a named stack slot
+ nReuse, // Number of values reusing a stack slot
+ nAuto, // Number of autos allocated for stack slots.
+ nSelfInterfere int32 // Number of self-interferences
+}
+
+func newStackAllocState(f *Func) *stackAllocState {
+ s := f.Cache.stackAllocState
+ if s == nil {
+ return new(stackAllocState)
+ }
+ if s.f != nil {
+ f.fe.Fatalf(src.NoXPos, "newStackAllocState called without previous free")
+ }
+ return s
+}
+
+func putStackAllocState(s *stackAllocState) {
+ for i := range s.values {
+ s.values[i] = stackValState{}
+ }
+ for i := range s.interfere {
+ s.interfere[i] = nil
+ }
+ for i := range s.names {
+ s.names[i] = LocalSlot{}
+ }
+ for i := range s.slots {
+ s.slots[i] = 0
+ }
+ for i := range s.used {
+ s.used[i] = false
+ }
+ s.f.Cache.stackAllocState = s
+ s.f = nil
+ s.live = nil
+ s.nArgSlot, s.nNotNeed, s.nNamedSlot, s.nReuse, s.nAuto, s.nSelfInterfere = 0, 0, 0, 0, 0, 0
+}
+
+type stackValState struct {
+ typ *types.Type
+ spill *Value
+ needSlot bool
+ isArg bool
+}
+
+// stackalloc allocates storage in the stack frame for
+// all Values that did not get a register.
+// Returns a map from block ID to the stack values live at the end of that block.
+func stackalloc(f *Func, spillLive [][]ID) [][]ID {
+ if f.pass.debug > stackDebug {
+ fmt.Println("before stackalloc")
+ fmt.Println(f.String())
+ }
+ s := newStackAllocState(f)
+ s.init(f, spillLive)
+ defer putStackAllocState(s)
+
+ s.stackalloc()
+ if f.pass.stats > 0 {
+ f.LogStat("stack_alloc_stats",
+ s.nArgSlot, "arg_slots", s.nNotNeed, "slot_not_needed",
+ s.nNamedSlot, "named_slots", s.nAuto, "auto_slots",
+ s.nReuse, "reused_slots", s.nSelfInterfere, "self_interfering")
+ }
+
+ return s.live
+}
+
+func (s *stackAllocState) init(f *Func, spillLive [][]ID) {
+ s.f = f
+
+ // Initialize value information.
+ if n := f.NumValues(); cap(s.values) >= n {
+ s.values = s.values[:n]
+ } else {
+ s.values = make([]stackValState, n)
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ s.values[v.ID].typ = v.Type
+ s.values[v.ID].needSlot = !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && f.getHome(v.ID) == nil && !v.rematerializeable() && !v.OnWasmStack
+ s.values[v.ID].isArg = v.Op == OpArg
+ if f.pass.debug > stackDebug && s.values[v.ID].needSlot {
+ fmt.Printf("%s needs a stack slot\n", v)
+ }
+ if v.Op == OpStoreReg {
+ s.values[v.Args[0].ID].spill = v
+ }
+ }
+ }
+
+ // Compute liveness info for values needing a slot.
+ s.computeLive(spillLive)
+
+ // Build interference graph among values needing a slot.
+ s.buildInterferenceGraph()
+}
+
+func (s *stackAllocState) stackalloc() {
+ f := s.f
+
+ // Build map from values to their names, if any.
+ // A value may be associated with more than one name (e.g. after
+ // the assignment i=j). This step picks one name per value arbitrarily.
+ if n := f.NumValues(); cap(s.names) >= n {
+ s.names = s.names[:n]
+ } else {
+ s.names = make([]LocalSlot, n)
+ }
+ names := s.names
+ for _, name := range f.Names {
+ // Note: not "range f.NamedValues" above, because
+ // that would be nondeterministic.
+ for _, v := range f.NamedValues[name] {
+ names[v.ID] = name
+ }
+ }
+
+ // Allocate args to their assigned locations.
+ for _, v := range f.Entry.Values {
+ if v.Op != OpArg {
+ continue
+ }
+ if v.Aux == nil {
+ f.Fatalf("%s has nil Aux\n", v.LongString())
+ }
+ loc := LocalSlot{N: v.Aux.(GCNode), Type: v.Type, Off: v.AuxInt}
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc %s to %s\n", v, loc)
+ }
+ f.setHome(v, loc)
+ }
+
+ // For each type, we keep track of all the stack slots we
+ // have allocated for that type.
+ // TODO: share slots among equivalent types. We would need to
+ // only share among types with the same GC signature. See the
+ // type.Equal calls below for where this matters.
+ locations := map[*types.Type][]LocalSlot{}
+
+ // Each time we assign a stack slot to a value v, we remember
+ // the slot we used via an index into locations[v.Type].
+ slots := s.slots
+ if n := f.NumValues(); cap(slots) >= n {
+ slots = slots[:n]
+ } else {
+ slots = make([]int, n)
+ s.slots = slots
+ }
+ for i := range slots {
+ slots[i] = -1
+ }
+
+ // Pick a stack slot for each value needing one.
+ var used []bool
+ if n := f.NumValues(); cap(s.used) >= n {
+ used = s.used[:n]
+ } else {
+ used = make([]bool, n)
+ s.used = used
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if !s.values[v.ID].needSlot {
+ s.nNotNeed++
+ continue
+ }
+ if v.Op == OpArg {
+ s.nArgSlot++
+ continue // already picked
+ }
+
+ // If this is a named value, try to use the name as
+ // the spill location.
+ var name LocalSlot
+ if v.Op == OpStoreReg {
+ name = names[v.Args[0].ID]
+ } else {
+ name = names[v.ID]
+ }
+ if name.N != nil && v.Type.Compare(name.Type) == types.CMPeq {
+ for _, id := range s.interfere[v.ID] {
+ h := f.getHome(id)
+ if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off {
+ // A variable can interfere with itself.
+ // It is rare, but it can happen.
+ s.nSelfInterfere++
+ goto noname
+ }
+ }
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc %s to %s\n", v, name)
+ }
+ s.nNamedSlot++
+ f.setHome(v, name)
+ continue
+ }
+
+ noname:
+ // Set of stack slots we could reuse.
+ locs := locations[v.Type]
+ // Mark all positions in locs used by interfering values.
+ for i := 0; i < len(locs); i++ {
+ used[i] = false
+ }
+ for _, xid := range s.interfere[v.ID] {
+ slot := slots[xid]
+ if slot >= 0 {
+ used[slot] = true
+ }
+ }
+ // Find an unused stack slot.
+ var i int
+ for i = 0; i < len(locs); i++ {
+ if !used[i] {
+ s.nReuse++
+ break
+ }
+ }
+ // If there is no unused stack slot, allocate a new one.
+ if i == len(locs) {
+ s.nAuto++
+ locs = append(locs, LocalSlot{N: f.fe.Auto(v.Pos, v.Type), Type: v.Type, Off: 0})
+ locations[v.Type] = locs
+ }
+ // Use the stack variable at that index for v.
+ loc := locs[i]
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc %s to %s\n", v, loc)
+ }
+ f.setHome(v, loc)
+ slots[v.ID] = i
+ }
+ }
+}
+
+// computeLive computes a map from block ID to a list of
+// stack-slot-needing value IDs live at the end of that block.
+// TODO: this could be quadratic if lots of variables are live across lots of
+// basic blocks. Figure out a way to make this function (or, more precisely, the user
+// of this function) require only linear size & time.
+func (s *stackAllocState) computeLive(spillLive [][]ID) {
+ s.live = make([][]ID, s.f.NumBlocks())
+ var phis []*Value
+ live := s.f.newSparseSet(s.f.NumValues())
+ defer s.f.retSparseSet(live)
+ t := s.f.newSparseSet(s.f.NumValues())
+ defer s.f.retSparseSet(t)
+
+ // Instead of iterating over f.Blocks, iterate over their postordering.
+ // Liveness information flows backward, so starting at the end
+ // increases the probability that we will stabilize quickly.
+ po := s.f.postorder()
+ for {
+ changed := false
+ for _, b := range po {
+ // Start with known live values at the end of the block
+ live.clear()
+ live.addAll(s.live[b.ID])
+
+ // Propagate backwards to the start of the block
+ phis = phis[:0]
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ live.remove(v.ID)
+ if v.Op == OpPhi {
+ // Save phi for later.
+ // Note: its args might need a stack slot even though
+ // the phi itself doesn't. So don't use needSlot.
+ if !v.Type.IsMemory() && !v.Type.IsVoid() {
+ phis = append(phis, v)
+ }
+ continue
+ }
+ for _, a := range v.Args {
+ if s.values[a.ID].needSlot {
+ live.add(a.ID)
+ }
+ }
+ }
+
+ // for each predecessor of b, expand its list of live-at-end values
+ // invariant: s contains the values live at the start of b (excluding phi inputs)
+ for i, e := range b.Preds {
+ p := e.b
+ t.clear()
+ t.addAll(s.live[p.ID])
+ t.addAll(live.contents())
+ t.addAll(spillLive[p.ID])
+ for _, v := range phis {
+ a := v.Args[i]
+ if s.values[a.ID].needSlot {
+ t.add(a.ID)
+ }
+ if spill := s.values[a.ID].spill; spill != nil {
+ //TODO: remove? Subsumed by SpillUse?
+ t.add(spill.ID)
+ }
+ }
+ if t.size() == len(s.live[p.ID]) {
+ continue
+ }
+ // grow p's live set
+ s.live[p.ID] = append(s.live[p.ID][:0], t.contents()...)
+ changed = true
+ }
+ }
+
+ if !changed {
+ break
+ }
+ }
+ if s.f.pass.debug > stackDebug {
+ for _, b := range s.f.Blocks {
+ fmt.Printf("stacklive %s %v\n", b, s.live[b.ID])
+ }
+ }
+}
+
+func (f *Func) getHome(vid ID) Location {
+ if int(vid) >= len(f.RegAlloc) {
+ return nil
+ }
+ return f.RegAlloc[vid]
+}
+
+func (f *Func) setHome(v *Value, loc Location) {
+ for v.ID >= ID(len(f.RegAlloc)) {
+ f.RegAlloc = append(f.RegAlloc, nil)
+ }
+ f.RegAlloc[v.ID] = loc
+}
+
+func (s *stackAllocState) buildInterferenceGraph() {
+ f := s.f
+ if n := f.NumValues(); cap(s.interfere) >= n {
+ s.interfere = s.interfere[:n]
+ } else {
+ s.interfere = make([][]ID, n)
+ }
+ live := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(live)
+ for _, b := range f.Blocks {
+ // Propagate liveness backwards to the start of the block.
+ // Two values interfere if one is defined while the other is live.
+ live.clear()
+ live.addAll(s.live[b.ID])
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if s.values[v.ID].needSlot {
+ live.remove(v.ID)
+ for _, id := range live.contents() {
+ // Note: args can have different types and still interfere
+ // (with each other or with other values). See issue 23522.
+ if s.values[v.ID].typ.Compare(s.values[id].typ) == types.CMPeq || v.Op == OpArg || s.values[id].isArg {
+ s.interfere[v.ID] = append(s.interfere[v.ID], id)
+ s.interfere[id] = append(s.interfere[id], v.ID)
+ }
+ }
+ }
+ for _, a := range v.Args {
+ if s.values[a.ID].needSlot {
+ live.add(a.ID)
+ }
+ }
+ if v.Op == OpArg && s.values[v.ID].needSlot {
+ // OpArg is an input argument which is pre-spilled.
+ // We add back v.ID here because we want this value
+ // to appear live even before this point. Being live
+ // all the way to the start of the entry block prevents other
+ // values from being allocated to the same slot and clobbering
+ // the input value before we have a chance to load it.
+ live.add(v.ID)
+ }
+ }
+ }
+ if f.pass.debug > stackDebug {
+ for vid, i := range s.interfere {
+ if len(i) > 0 {
+ fmt.Printf("v%d interferes with", vid)
+ for _, x := range i {
+ fmt.Printf(" v%d", x)
+ }
+ fmt.Println()
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/stackframe.go b/src/cmd/compile/internal/ssa/stackframe.go
new file mode 100644
index 0000000..08be62a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/stackframe.go
@@ -0,0 +1,10 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// stackframe calls back into the frontend to assign frame offsets.
+func stackframe(f *Func) {
+ f.fe.AllocFrame(f)
+}
diff --git a/src/cmd/compile/internal/ssa/stmtlines_test.go b/src/cmd/compile/internal/ssa/stmtlines_test.go
new file mode 100644
index 0000000..f5ff3a5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/stmtlines_test.go
@@ -0,0 +1,132 @@
+package ssa_test
+
+import (
+ cmddwarf "cmd/internal/dwarf"
+ "debug/dwarf"
+ "debug/elf"
+ "debug/macho"
+ "debug/pe"
+ "fmt"
+ "internal/testenv"
+ "internal/xcoff"
+ "io"
+ "os"
+ "runtime"
+ "sort"
+ "testing"
+)
+
+func open(path string) (*dwarf.Data, error) {
+ if fh, err := elf.Open(path); err == nil {
+ return fh.DWARF()
+ }
+
+ if fh, err := pe.Open(path); err == nil {
+ return fh.DWARF()
+ }
+
+ if fh, err := macho.Open(path); err == nil {
+ return fh.DWARF()
+ }
+
+ if fh, err := xcoff.Open(path); err == nil {
+ return fh.DWARF()
+ }
+
+ return nil, fmt.Errorf("unrecognized executable format")
+}
+
+func must(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+type Line struct {
+ File string
+ Line int
+}
+
+func TestStmtLines(t *testing.T) {
+ if runtime.GOOS == "plan9" {
+ t.Skip("skipping on plan9; no DWARF symbol table in executables")
+ }
+
+ if runtime.GOOS == "aix" {
+ extld := os.Getenv("CC")
+ if extld == "" {
+ extld = "gcc"
+ }
+ enabled, err := cmddwarf.IsDWARFEnabledOnAIXLd(extld)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !enabled {
+ t.Skip("skipping on aix: no DWARF with ld version < 7.2.2 ")
+ }
+ }
+
+ lines := map[Line]bool{}
+ dw, err := open(testenv.GoToolPath(t))
+ must(err)
+ rdr := dw.Reader()
+ rdr.Seek(0)
+ for {
+ e, err := rdr.Next()
+ must(err)
+ if e == nil {
+ break
+ }
+ if e.Tag != dwarf.TagCompileUnit {
+ continue
+ }
+ pkgname, _ := e.Val(dwarf.AttrName).(string)
+ if pkgname == "runtime" {
+ continue
+ }
+ if e.Val(dwarf.AttrStmtList) == nil {
+ continue
+ }
+ lrdr, err := dw.LineReader(e)
+ must(err)
+
+ var le dwarf.LineEntry
+
+ for {
+ err := lrdr.Next(&le)
+ if err == io.EOF {
+ break
+ }
+ must(err)
+ fl := Line{le.File.Name, le.Line}
+ lines[fl] = lines[fl] || le.IsStmt
+ }
+ }
+
+ nonStmtLines := []Line{}
+ for line, isstmt := range lines {
+ if !isstmt {
+ nonStmtLines = append(nonStmtLines, line)
+ }
+ }
+
+ if runtime.GOARCH == "amd64" {
+ if len(nonStmtLines)*100 > len(lines) { // > 99% obtained on amd64, no backsliding
+ t.Errorf("Saw too many (amd64, > 1%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", len(lines), len(nonStmtLines))
+ }
+ } else if len(nonStmtLines)*100 > 2*len(lines) { // expect 98% elsewhere.
+ t.Errorf("Saw too many (not amd64, > 2%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", len(lines), len(nonStmtLines))
+ }
+ if testing.Verbose() {
+ sort.Slice(nonStmtLines, func(i, j int) bool {
+ if nonStmtLines[i].File != nonStmtLines[j].File {
+ return nonStmtLines[i].File < nonStmtLines[j].File
+ }
+ return nonStmtLines[i].Line < nonStmtLines[j].Line
+ })
+ for _, l := range nonStmtLines {
+ t.Logf("%s:%d has no DWARF is_stmt mark\n", l.File, l.Line)
+ }
+ }
+ t.Logf("total=%d, nostmt=%d\n", len(lines), len(nonStmtLines))
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts
new file mode 100644
index 0000000..a0404e4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts
@@ -0,0 +1,99 @@
+ ./testdata/hist.go
+55: func test() {
+57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+58: tinycall() // this forces l etc to stack
+59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+61: sink = dx + dy //gdb-opt=(dx,dy)
+63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+65: if len(os.Args) > 1 {
+73: scanner := bufio.NewScanner(reader)
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+84: t := 0
+85: n := 0
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+99: }
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts b/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts
new file mode 100644
index 0000000..2be83ce
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts
@@ -0,0 +1,94 @@
+ ./testdata/hist.go
+55: func test() {
+57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+58: tinycall() // this forces l etc to stack
+59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+61: sink = dx + dy //gdb-opt=(dx,dy)
+63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+65: if len(os.Args) > 1 {
+73: scanner := bufio.NewScanner(reader)
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+86: for i, a := range hist {
+99: }
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts
new file mode 100644
index 0000000..72df60c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts
@@ -0,0 +1,123 @@
+ src/cmd/compile/internal/ssa/testdata/hist.go
+55: func test() {
+57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+58: tinycall() // this forces l etc to stack
+59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+l.begin.x = 1
+l.end.y = 4
+60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+61: sink = dx + dy //gdb-opt=(dx,dy)
+63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+hist = {array = <A>, len = 7, cap = 7}
+65: if len(os.Args) > 1 {
+73: scanner := bufio.NewScanner(reader)
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 4
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 4
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 5
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+84: t := 0
+85: n := 0
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+n = 3
+i = 1
+t = 3
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+n = 6
+i = 2
+t = 9
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+n = 8
+i = 4
+t = 17
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+n = 9
+i = 5
+t = 22
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+99: }
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts b/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts
new file mode 100644
index 0000000..d3a34ac
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts
@@ -0,0 +1,143 @@
+ src/cmd/compile/internal/ssa/testdata/hist.go
+55: func test() {
+57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+58: tinycall() // this forces l etc to stack
+59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+l = {begin = {x = 1, y = 2}, end = {x = 3, y = 4}}
+dx = <Optimized out, as expected>
+dy = <Optimized out, as expected>
+60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+dx = 2
+dy = <Optimized out, as expected>
+61: sink = dx + dy //gdb-opt=(dx,dy)
+dx = 2
+dy = 2
+63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+dx = 2
+dy = <Optimized out, as expected>
+64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+65: if len(os.Args) > 1 {
+73: scanner := bufio.NewScanner(reader)
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+scanner = (bufio.Scanner *) <A>
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 4
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 4
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 5
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 0
+n = 0
+t = 0
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 3
+n = 0
+t = 0
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 3
+n = 3
+t = 3
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 0
+n = 6
+t = 9
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 2
+n = 6
+t = 9
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 1
+n = 8
+t = 17
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 0
+n = 9
+t = 22
+86: for i, a := range hist {
+99: }
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.go b/src/cmd/compile/internal/ssa/testdata/hist.go
new file mode 100644
index 0000000..f8fa6e6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.go
@@ -0,0 +1,106 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is the input program for an end-to-end test of the DWARF produced
+// by the compiler. It is compiled with various flags, then the resulting
+// binary is "debugged" under the control of a harness. Because the compile+debug
+// step is time-consuming, the tests for different bugs are all accumulated here
+// so that their cost is only the time to "n" through the additional code.
+
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+type point struct {
+ x, y int
+}
+
+type line struct {
+ begin, end point
+}
+
+var zero int
+var sink int
+
+//go:noinline
+func tinycall() {
+}
+
+func ensure(n int, sl []int) []int {
+ for len(sl) <= n {
+ sl = append(sl, 0)
+ }
+ return sl
+}
+
+var cannedInput string = `1
+1
+1
+2
+2
+2
+4
+4
+5
+`
+
+func test() {
+ // For #19868
+ l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+ tinycall() // this forces l etc to stack
+ dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+ dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+ sink = dx + dy //gdb-opt=(dx,dy)
+ // For #21098
+ hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+ var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+ if len(os.Args) > 1 {
+ var err error
+ reader, err = os.Open(os.Args[1])
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "There was an error opening %s: %v\n", os.Args[1], err)
+ return
+ }
+ }
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() { //gdb-opt=(scanner/A)
+ s := scanner.Text()
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+ fmt.Fprintf(os.Stderr, "There was an error: %v\n", err)
+ return
+ }
+ hist = ensure(int(i), hist)
+ hist[int(i)]++
+ }
+ t := 0
+ n := 0
+ for i, a := range hist {
+ if a == 0 { //gdb-opt=(a,n,t)
+ continue
+ }
+ t += i * a
+ n += a
+ fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+ }
+}
+
+func main() {
+ growstack() // Use stack early to prevent growth during test, which confuses gdb
+ test()
+}
+
+var snk string
+
+//go:noinline
+func growstack() {
+ snk = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat")
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts
new file mode 100644
index 0000000..a00934b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts
@@ -0,0 +1,11 @@
+ ./testdata/i22558.go
+19: func test(t *thing, u *thing) {
+20: if t.next != nil {
+23: fmt.Fprintf(os.Stderr, "%s\n", t.name)
+24: u.self = u
+25: t.self = t
+26: t.next = u
+27: for _, p := range t.stuff {
+28: if isFoo(t, p) {
+29: return
+44: }
diff --git a/src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts
new file mode 100644
index 0000000..70dfa07
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts
@@ -0,0 +1,11 @@
+ src/cmd/compile/internal/ssa/testdata/i22558.go
+19: func test(t *thing, u *thing) {
+20: if t.next != nil {
+23: fmt.Fprintf(os.Stderr, "%s\n", t.name)
+24: u.self = u
+25: t.self = t
+26: t.next = u
+27: for _, p := range t.stuff {
+28: if isFoo(t, p) {
+29: return
+44: }
diff --git a/src/cmd/compile/internal/ssa/testdata/i22558.go b/src/cmd/compile/internal/ssa/testdata/i22558.go
new file mode 100644
index 0000000..8aea76c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22558.go
@@ -0,0 +1,51 @@
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+type big struct {
+ pile [768]int8
+}
+
+type thing struct {
+ name string
+ next *thing
+ self *thing
+ stuff []big
+}
+
+func test(t *thing, u *thing) {
+ if t.next != nil {
+ return
+ }
+ fmt.Fprintf(os.Stderr, "%s\n", t.name)
+ u.self = u
+ t.self = t
+ t.next = u
+ for _, p := range t.stuff {
+ if isFoo(t, p) {
+ return
+ }
+ }
+}
+
+//go:noinline
+func isFoo(t *thing, b big) bool {
+ return true
+}
+
+func main() {
+ growstack() // Use stack early to prevent growth during test, which confuses gdb
+ t := &thing{name: "t", self: nil, next: nil, stuff: make([]big, 1)}
+ u := thing{name: "u", self: t, next: t, stuff: make([]big, 1)}
+ test(t, &u)
+}
+
+var snk string
+
+//go:noinline
+func growstack() {
+ snk = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat")
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts b/src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts
new file mode 100644
index 0000000..18a5ff9
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts
@@ -0,0 +1,7 @@
+ ./testdata/i22600.go
+8: func test() {
+9: pwd, err := os.Getwd()
+10: if err != nil {
+14: fmt.Println(pwd)
+15: }
+20: }
diff --git a/src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts b/src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts
new file mode 100644
index 0000000..46285e2
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts
@@ -0,0 +1,7 @@
+ src/cmd/compile/internal/ssa/testdata/i22600.go
+8: func test() {
+9: pwd, err := os.Getwd()
+10: if err != nil {
+14: fmt.Println(pwd)
+15: }
+20: }
diff --git a/src/cmd/compile/internal/ssa/testdata/i22600.go b/src/cmd/compile/internal/ssa/testdata/i22600.go
new file mode 100644
index 0000000..27f0d3d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22600.go
@@ -0,0 +1,27 @@
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+func test() {
+ pwd, err := os.Getwd()
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ fmt.Println(pwd)
+}
+
+func main() {
+ growstack() // Use stack early to prevent growth during test, which confuses gdb
+ test()
+}
+
+var snk string
+
+//go:noinline
+func growstack() {
+ snk = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat")
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts b/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts
new file mode 100644
index 0000000..0b9f06f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts
@@ -0,0 +1,12 @@
+ ./testdata/infloop.go
+6: func test() {
+8: go func() {}()
+10: for {
+1: package main
+10: for {
+1: package main
+10: for {
+1: package main
+10: for {
+1: package main
+10: for {
diff --git a/src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts b/src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts
new file mode 100644
index 0000000..d465ad1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts
@@ -0,0 +1,4 @@
+ src/cmd/compile/internal/ssa/testdata/infloop.go
+6: func test() {
+8: go func() {}()
+10: for {
diff --git a/src/cmd/compile/internal/ssa/testdata/infloop.go b/src/cmd/compile/internal/ssa/testdata/infloop.go
new file mode 100644
index 0000000..cdb374f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/infloop.go
@@ -0,0 +1,16 @@
+package main
+
+var sink int
+
+//go:noinline
+func test() {
+ // This is for #30167, incorrect line numbers in an infinite loop
+ go func() {}()
+
+ for {
+ }
+}
+
+func main() {
+ test()
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts
new file mode 100644
index 0000000..f182ff4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts
@@ -0,0 +1,56 @@
+ ./testdata/scopes.go
+22: func test() {
+23: x := id(0)
+24: y := id(0)
+25: fmt.Println(x)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+31: fmt.Println(x, y)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+34: a := y
+35: f1(a)
+37: b := 0
+38: f2(b)
+39: if gretbool() {
+40: c := 0
+41: f3(c)
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+34: a := y
+35: f1(a)
+37: b := 0
+38: f2(b)
+39: if gretbool() {
+43: c := 1.1
+44: f4(int(c))
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+53: j = id(1)
+54: f = id(2)
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+59: fmt.Println("foo")
+60: break
+64: helloworld()
+66: }
+15: }
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts
new file mode 100644
index 0000000..b5e41aa
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts
@@ -0,0 +1,46 @@
+ ./testdata/scopes.go
+22: func test() {
+23: x := id(0)
+24: y := id(0)
+25: fmt.Println(x)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+31: fmt.Println(x, y)
+30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+31: fmt.Println(x, y)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+35: f1(a)
+38: f2(b)
+39: if gretbool() {
+41: f3(c)
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+35: f1(a)
+38: f2(b)
+39: if gretbool() {
+44: f4(int(c))
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+53: j = id(1)
+54: f = id(2)
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+59: fmt.Println("foo")
+64: helloworld()
+15: }
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts
new file mode 100644
index 0000000..6eb4903
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts
@@ -0,0 +1,64 @@
+ src/cmd/compile/internal/ssa/testdata/scopes.go
+22: func test() {
+23: x := id(0)
+24: y := id(0)
+25: fmt.Println(x)
+0:
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 0
+y = 0
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 1
+y = 0
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 4
+y = 1
+26: for i := x; i < 3; i++ {
+30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 0
+y = 5
+31: fmt.Println(x, y)
+0: 5
+34: a := y
+35: f1(a)
+37: b := 0
+38: f2(b)
+39: if gretbool() {
+40: c := 0
+41: f3(c)
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+34: a := y
+35: f1(a)
+37: b := 0
+38: f2(b)
+39: if gretbool() {
+43: c := 1.1
+44: f4(int(c))
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+53: j = id(1)
+54: f = id(2)
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+59: fmt.Println("foo")
+60: break
+64: helloworld()
+66: }
+15: }
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts
new file mode 100644
index 0000000..5a186b5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts
@@ -0,0 +1,55 @@
+ src/cmd/compile/internal/ssa/testdata/scopes.go
+22: func test() {
+23: x := id(0)
+24: y := id(0)
+25: fmt.Println(x)
+0:
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 0
+y = 0
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 1
+y = 0
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 4
+y = 1
+26: for i := x; i < 3; i++ {
+31: fmt.Println(x, y)
+30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 0
+y = 5
+31: fmt.Println(x, y)
+0: 5
+35: f1(a)
+38: f2(b)
+39: if gretbool() {
+41: f3(c)
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+35: f1(a)
+38: f2(b)
+39: if gretbool() {
+44: f4(int(c))
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+53: j = id(1)
+54: f = id(2)
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+59: fmt.Println("foo")
+64: helloworld()
+66: }
+15: }
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.go b/src/cmd/compile/internal/ssa/testdata/scopes.go
new file mode 100644
index 0000000..e93d699
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.go
@@ -0,0 +1,107 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "time"
+)
+
+func main() {
+ growstack() // Use stack early to prevent growth during test, which confuses gdb
+ test()
+}
+
+//go:noinline
+func id(x int) int {
+ return x
+}
+
+func test() {
+ x := id(0)
+ y := id(0)
+ fmt.Println(x)
+ for i := x; i < 3; i++ {
+ x := i * i
+ y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+ }
+ y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+ fmt.Println(x, y)
+
+ for x := 0; x <= 1; x++ { // From delve scopetest.go
+ a := y
+ f1(a)
+ {
+ b := 0
+ f2(b)
+ if gretbool() {
+ c := 0
+ f3(c)
+ } else {
+ c := 1.1
+ f4(int(c))
+ }
+ f5(b)
+ }
+ f6(a)
+ }
+
+ { // From delve testnextprog.go
+ var (
+ j = id(1)
+ f = id(2)
+ )
+ for i := 0; i <= 5; i++ {
+ j += j * (j ^ 3) / 100
+ if i == f {
+ fmt.Println("foo")
+ break
+ }
+ sleepytime()
+ }
+ helloworld()
+ }
+}
+
+func sleepytime() {
+ time.Sleep(5 * time.Millisecond)
+}
+
+func helloworld() {
+ fmt.Println("Hello, World!")
+}
+
+//go:noinline
+func f1(x int) {}
+
+//go:noinline
+func f2(x int) {}
+
+//go:noinline
+func f3(x int) {}
+
+//go:noinline
+func f4(x int) {}
+
+//go:noinline
+func f5(x int) {}
+
+//go:noinline
+func f6(x int) {}
+
+var boolvar = true
+
+func gretbool() bool {
+ x := boolvar
+ boolvar = !boolvar
+ return x
+}
+
+var sink string
+
+//go:noinline
+func growstack() {
+ sink = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat")
+}
diff --git a/src/cmd/compile/internal/ssa/tighten.go b/src/cmd/compile/internal/ssa/tighten.go
new file mode 100644
index 0000000..5dfc453
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/tighten.go
@@ -0,0 +1,164 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// tighten moves Values closer to the Blocks in which they are used.
+// This can reduce the amount of register spilling required,
+// if it doesn't also create more live values.
+// A Value can be moved to any block that
+// dominates all blocks in which it is used.
+func tighten(f *Func) {
+ canMove := make([]bool, f.NumValues())
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op.isLoweredGetClosurePtr() {
+ // Must stay in the entry block.
+ continue
+ }
+ switch v.Op {
+ case OpPhi, OpArg, OpSelect0, OpSelect1:
+ // Phis need to stay in their block.
+ // Arg must stay in the entry block.
+ // Tuple selectors must stay with the tuple generator.
+ continue
+ }
+ if v.MemoryArg() != nil {
+ // We can't move values which have a memory arg - it might
+ // make two memory values live across a block boundary.
+ continue
+ }
+ // Count arguments which will need a register.
+ narg := 0
+ for _, a := range v.Args {
+ if !a.rematerializeable() {
+ narg++
+ }
+ }
+ if narg >= 2 && !v.Type.IsFlags() {
+ // Don't move values with more than one input, as that may
+ // increase register pressure.
+ // We make an exception for flags, as we want flag generators
+ // moved next to uses (because we only have 1 flag register).
+ continue
+ }
+ canMove[v.ID] = true
+ }
+ }
+
+ // Build data structure for fast least-common-ancestor queries.
+ lca := makeLCArange(f)
+
+ // For each moveable value, record the block that dominates all uses found so far.
+ target := make([]*Block, f.NumValues())
+
+ // Grab loop information.
+ // We use this to make sure we don't tighten a value into a (deeper) loop.
+ idom := f.Idom()
+ loops := f.loopnest()
+ loops.calculateDepths()
+
+ changed := true
+ for changed {
+ changed = false
+
+ // Reset target
+ for i := range target {
+ target[i] = nil
+ }
+
+ // Compute target locations (for moveable values only).
+ // target location = the least common ancestor of all uses in the dominator tree.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if !canMove[a.ID] {
+ continue
+ }
+ use := b
+ if v.Op == OpPhi {
+ use = b.Preds[i].b
+ }
+ if target[a.ID] == nil {
+ target[a.ID] = use
+ } else {
+ target[a.ID] = lca.find(target[a.ID], use)
+ }
+ }
+ }
+ for _, c := range b.ControlValues() {
+ if !canMove[c.ID] {
+ continue
+ }
+ if target[c.ID] == nil {
+ target[c.ID] = b
+ } else {
+ target[c.ID] = lca.find(target[c.ID], b)
+ }
+ }
+ }
+
+ // If the target location is inside a loop,
+ // move the target location up to just before the loop head.
+ for _, b := range f.Blocks {
+ origloop := loops.b2l[b.ID]
+ for _, v := range b.Values {
+ t := target[v.ID]
+ if t == nil {
+ continue
+ }
+ targetloop := loops.b2l[t.ID]
+ for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) {
+ t = idom[targetloop.header.ID]
+ target[v.ID] = t
+ targetloop = loops.b2l[t.ID]
+ }
+ }
+ }
+
+ // Move values to target locations.
+ for _, b := range f.Blocks {
+ for i := 0; i < len(b.Values); i++ {
+ v := b.Values[i]
+ t := target[v.ID]
+ if t == nil || t == b {
+ // v is not moveable, or is already in correct place.
+ continue
+ }
+ // Move v to the block which dominates its uses.
+ t.Values = append(t.Values, v)
+ v.Block = t
+ last := len(b.Values) - 1
+ b.Values[i] = b.Values[last]
+ b.Values[last] = nil
+ b.Values = b.Values[:last]
+ changed = true
+ i--
+ }
+ }
+ }
+}
+
+// phiTighten moves constants closer to phi users.
+// This pass avoids having lots of constants live for lots of the program.
+// See issue 16407.
+func phiTighten(f *Func) {
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ for i, a := range v.Args {
+ if !a.rematerializeable() {
+ continue // not a constant we can move around
+ }
+ if a.Block == b.Preds[i].b {
+ continue // already in the right place
+ }
+ // Make a copy of a, put in predecessor block.
+ v.SetArg(i, a.copyInto(b.Preds[i].b))
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/trim.go b/src/cmd/compile/internal/ssa/trim.go
new file mode 100644
index 0000000..c930a20
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/trim.go
@@ -0,0 +1,172 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/internal/src"
+
+// trim removes blocks with no code in them.
+// These blocks were inserted to remove critical edges.
+func trim(f *Func) {
+ n := 0
+ for _, b := range f.Blocks {
+ if !trimmableBlock(b) {
+ f.Blocks[n] = b
+ n++
+ continue
+ }
+
+ bPos := b.Pos
+ bIsStmt := bPos.IsStmt() == src.PosIsStmt
+
+ // Splice b out of the graph. NOTE: `mergePhi` depends on the
+ // order, in which the predecessors edges are merged here.
+ p, i := b.Preds[0].b, b.Preds[0].i
+ s, j := b.Succs[0].b, b.Succs[0].i
+ ns := len(s.Preds)
+ p.Succs[i] = Edge{s, j}
+ s.Preds[j] = Edge{p, i}
+
+ for _, e := range b.Preds[1:] {
+ p, i := e.b, e.i
+ p.Succs[i] = Edge{s, len(s.Preds)}
+ s.Preds = append(s.Preds, Edge{p, i})
+ }
+
+ // Attempt to preserve a statement boundary
+ if bIsStmt {
+ sawStmt := false
+ for _, v := range s.Values {
+ if isPoorStatementOp(v.Op) {
+ continue
+ }
+ if v.Pos.SameFileAndLine(bPos) {
+ v.Pos = v.Pos.WithIsStmt()
+ }
+ sawStmt = true
+ break
+ }
+ if !sawStmt && s.Pos.SameFileAndLine(bPos) {
+ s.Pos = s.Pos.WithIsStmt()
+ }
+ }
+ // If `s` had more than one predecessor, update its phi-ops to
+ // account for the merge.
+ if ns > 1 {
+ for _, v := range s.Values {
+ if v.Op == OpPhi {
+ mergePhi(v, j, b)
+ }
+
+ }
+ // Remove the phi-ops from `b` if they were merged into the
+ // phi-ops of `s`.
+ k := 0
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ if v.Uses == 0 {
+ v.resetArgs()
+ continue
+ }
+ // Pad the arguments of the remaining phi-ops so
+ // they match the new predecessor count of `s`.
+ // Since s did not have a Phi op corresponding to
+ // the phi op in b, the other edges coming into s
+ // must be loopback edges from s, so v is the right
+ // argument to v!
+ args := make([]*Value, len(v.Args))
+ copy(args, v.Args)
+ v.resetArgs()
+ for x := 0; x < j; x++ {
+ v.AddArg(v)
+ }
+ v.AddArg(args[0])
+ for x := j + 1; x < ns; x++ {
+ v.AddArg(v)
+ }
+ for _, a := range args[1:] {
+ v.AddArg(a)
+ }
+ }
+ b.Values[k] = v
+ k++
+ }
+ b.Values = b.Values[:k]
+ }
+
+ // Merge the blocks' values.
+ for _, v := range b.Values {
+ v.Block = s
+ }
+ k := len(b.Values)
+ m := len(s.Values)
+ for i := 0; i < k; i++ {
+ s.Values = append(s.Values, nil)
+ }
+ copy(s.Values[k:], s.Values[:m])
+ copy(s.Values, b.Values)
+ }
+ if n < len(f.Blocks) {
+ f.invalidateCFG()
+ tail := f.Blocks[n:]
+ for i := range tail {
+ tail[i] = nil
+ }
+ f.Blocks = f.Blocks[:n]
+ }
+}
+
+// emptyBlock reports whether the block does not contain actual
+// instructions
+func emptyBlock(b *Block) bool {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ return false
+ }
+ }
+ return true
+}
+
+// trimmableBlock reports whether the block can be trimmed from the CFG,
+// subject to the following criteria:
+// - it should not be the first block
+// - it should be BlockPlain
+// - it should not loop back to itself
+// - it either is the single predecessor of the successor block or
+// contains no actual instructions
+func trimmableBlock(b *Block) bool {
+ if b.Kind != BlockPlain || b == b.Func.Entry {
+ return false
+ }
+ s := b.Succs[0].b
+ return s != b && (len(s.Preds) == 1 || emptyBlock(b))
+}
+
+// mergePhi adjusts the number of `v`s arguments to account for merge
+// of `b`, which was `i`th predecessor of the `v`s block.
+func mergePhi(v *Value, i int, b *Block) {
+ u := v.Args[i]
+ if u.Block == b {
+ if u.Op != OpPhi {
+ b.Func.Fatalf("value %s is not a phi operation", u.LongString())
+ }
+ // If the original block contained u = φ(u0, u1, ..., un) and
+ // the current phi is
+ // v = φ(v0, v1, ..., u, ..., vk)
+ // then the merged phi is
+ // v = φ(v0, v1, ..., u0, ..., vk, u1, ..., un)
+ v.SetArg(i, u.Args[0])
+ v.AddArgs(u.Args[1:]...)
+ } else {
+ // If the original block contained u = φ(u0, u1, ..., un) and
+ // the current phi is
+ // v = φ(v0, v1, ..., vi, ..., vk)
+ // i.e. it does not use a value from the predecessor block,
+ // then the merged phi is
+ // v = φ(v0, v1, ..., vk, vi, vi, ...)
+ for j := 1; j < len(b.Preds); j++ {
+ v.AddArg(v.Args[i])
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/tuple.go b/src/cmd/compile/internal/ssa/tuple.go
new file mode 100644
index 0000000..38deabf
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/tuple.go
@@ -0,0 +1,59 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// tightenTupleSelectors ensures that tuple selectors (Select0 and
+// Select1 ops) are in the same block as their tuple generator. The
+// function also ensures that there are no duplicate tuple selectors.
+// These properties are expected by the scheduler but may not have
+// been maintained by the optimization pipeline up to this point.
+//
+// See issues 16741 and 39472.
+func tightenTupleSelectors(f *Func) {
+ selectors := make(map[struct {
+ id ID
+ op Op
+ }]*Value)
+ for _, b := range f.Blocks {
+ for _, selector := range b.Values {
+ if selector.Op != OpSelect0 && selector.Op != OpSelect1 {
+ continue
+ }
+
+ // Get the tuple generator to use as a key for de-duplication.
+ tuple := selector.Args[0]
+ if !tuple.Type.IsTuple() {
+ f.Fatalf("arg of tuple selector %s is not a tuple: %s", selector.String(), tuple.LongString())
+ }
+
+ // If there is a pre-existing selector in the target block then
+ // use that. Do this even if the selector is already in the
+ // target block to avoid duplicate tuple selectors.
+ key := struct {
+ id ID
+ op Op
+ }{tuple.ID, selector.Op}
+ if t := selectors[key]; t != nil {
+ if selector != t {
+ selector.copyOf(t)
+ }
+ continue
+ }
+
+ // If the selector is in the wrong block copy it into the target
+ // block.
+ if selector.Block != tuple.Block {
+ t := selector.copyInto(tuple.Block)
+ selector.copyOf(t)
+ selectors[key] = t
+ continue
+ }
+
+ // The selector is in the target block. Add it to the map so it
+ // cannot be duplicated.
+ selectors[key] = selector
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
new file mode 100644
index 0000000..edc43aa
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -0,0 +1,494 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "math"
+ "sort"
+ "strings"
+)
+
+// A Value represents a value in the SSA representation of the program.
+// The ID and Type fields must not be modified. The remainder may be modified
+// if they preserve the value of the Value (e.g. changing a (mul 2 x) to an (add x x)).
+type Value struct {
+ // A unique identifier for the value. For performance we allocate these IDs
+ // densely starting at 1. There is no guarantee that there won't be occasional holes, though.
+ ID ID
+
+ // The operation that computes this value. See op.go.
+ Op Op
+
+ // The type of this value. Normally this will be a Go type, but there
+ // are a few other pseudo-types, see ../types/type.go.
+ Type *types.Type
+
+ // Auxiliary info for this value. The type of this information depends on the opcode and type.
+ // AuxInt is used for integer values, Aux is used for other values.
+ // Floats are stored in AuxInt using math.Float64bits(f).
+ // Unused portions of AuxInt are filled by sign-extending the used portion,
+ // even if the represented value is unsigned.
+ // Users of AuxInt which interpret AuxInt as unsigned (e.g. shifts) must be careful.
+ // Use Value.AuxUnsigned to get the zero-extended value of AuxInt.
+ AuxInt int64
+ Aux interface{}
+
+ // Arguments of this value
+ Args []*Value
+
+ // Containing basic block
+ Block *Block
+
+ // Source position
+ Pos src.XPos
+
+ // Use count. Each appearance in Value.Args and Block.Controls counts once.
+ Uses int32
+
+ // wasm: Value stays on the WebAssembly stack. This value will not get a "register" (WebAssembly variable)
+ // nor a slot on Go stack, and the generation of this value is delayed to its use time.
+ OnWasmStack bool
+
+ // Is this value in the per-function constant cache? If so, remove from cache before changing it or recycling it.
+ InCache bool
+
+ // Storage for the first three args
+ argstorage [3]*Value
+}
+
+// Examples:
+// Opcode aux args
+// OpAdd nil 2
+// OpConst string 0 string constant
+// OpConst int64 0 int64 constant
+// OpAddcq int64 1 amd64 op: v = arg[0] + constant
+
+// short form print. Just v#.
+func (v *Value) String() string {
+ if v == nil {
+ return "nil" // should never happen, but not panicking helps with debugging
+ }
+ return fmt.Sprintf("v%d", v.ID)
+}
+
+func (v *Value) AuxInt8() int8 {
+ if opcodeTable[v.Op].auxType != auxInt8 {
+ v.Fatalf("op %s doesn't have an int8 aux field", v.Op)
+ }
+ return int8(v.AuxInt)
+}
+
+func (v *Value) AuxInt16() int16 {
+ if opcodeTable[v.Op].auxType != auxInt16 {
+ v.Fatalf("op %s doesn't have an int16 aux field", v.Op)
+ }
+ return int16(v.AuxInt)
+}
+
+func (v *Value) AuxInt32() int32 {
+ if opcodeTable[v.Op].auxType != auxInt32 {
+ v.Fatalf("op %s doesn't have an int32 aux field", v.Op)
+ }
+ return int32(v.AuxInt)
+}
+
+// AuxUnsigned returns v.AuxInt as an unsigned value for OpConst*.
+// v.AuxInt is always sign-extended to 64 bits, even if the
+// represented value is unsigned. This undoes that sign extension.
+func (v *Value) AuxUnsigned() uint64 {
+ c := v.AuxInt
+ switch v.Op {
+ case OpConst64:
+ return uint64(c)
+ case OpConst32:
+ return uint64(uint32(c))
+ case OpConst16:
+ return uint64(uint16(c))
+ case OpConst8:
+ return uint64(uint8(c))
+ }
+ v.Fatalf("op %s isn't OpConst*", v.Op)
+ return 0
+}
+
+func (v *Value) AuxFloat() float64 {
+ if opcodeTable[v.Op].auxType != auxFloat32 && opcodeTable[v.Op].auxType != auxFloat64 {
+ v.Fatalf("op %s doesn't have a float aux field", v.Op)
+ }
+ return math.Float64frombits(uint64(v.AuxInt))
+}
+func (v *Value) AuxValAndOff() ValAndOff {
+ if opcodeTable[v.Op].auxType != auxSymValAndOff {
+ v.Fatalf("op %s doesn't have a ValAndOff aux field", v.Op)
+ }
+ return ValAndOff(v.AuxInt)
+}
+
+func (v *Value) AuxArm64BitField() arm64BitField {
+ if opcodeTable[v.Op].auxType != auxARM64BitField {
+ v.Fatalf("op %s doesn't have a ValAndOff aux field", v.Op)
+ }
+ return arm64BitField(v.AuxInt)
+}
+
+// long form print. v# = opcode <type> [aux] args [: reg] (names)
+func (v *Value) LongString() string {
+ s := fmt.Sprintf("v%d = %s", v.ID, v.Op)
+ s += " <" + v.Type.String() + ">"
+ s += v.auxString()
+ for _, a := range v.Args {
+ s += fmt.Sprintf(" %v", a)
+ }
+ var r []Location
+ if v.Block != nil {
+ r = v.Block.Func.RegAlloc
+ }
+ if int(v.ID) < len(r) && r[v.ID] != nil {
+ s += " : " + r[v.ID].String()
+ }
+ var names []string
+ if v.Block != nil {
+ for name, values := range v.Block.Func.NamedValues {
+ for _, value := range values {
+ if value == v {
+ names = append(names, name.String())
+ break // drop duplicates.
+ }
+ }
+ }
+ }
+ if len(names) != 0 {
+ sort.Strings(names) // Otherwise a source of variation in debugging output.
+ s += " (" + strings.Join(names, ", ") + ")"
+ }
+ return s
+}
+
+func (v *Value) auxString() string {
+ switch opcodeTable[v.Op].auxType {
+ case auxBool:
+ if v.AuxInt == 0 {
+ return " [false]"
+ } else {
+ return " [true]"
+ }
+ case auxInt8:
+ return fmt.Sprintf(" [%d]", v.AuxInt8())
+ case auxInt16:
+ return fmt.Sprintf(" [%d]", v.AuxInt16())
+ case auxInt32:
+ return fmt.Sprintf(" [%d]", v.AuxInt32())
+ case auxInt64, auxInt128:
+ return fmt.Sprintf(" [%d]", v.AuxInt)
+ case auxARM64BitField:
+ lsb := v.AuxArm64BitField().getARM64BFlsb()
+ width := v.AuxArm64BitField().getARM64BFwidth()
+ return fmt.Sprintf(" [lsb=%d,width=%d]", lsb, width)
+ case auxFloat32, auxFloat64:
+ return fmt.Sprintf(" [%g]", v.AuxFloat())
+ case auxString:
+ return fmt.Sprintf(" {%q}", v.Aux)
+ case auxSym, auxCall, auxTyp:
+ if v.Aux != nil {
+ return fmt.Sprintf(" {%v}", v.Aux)
+ }
+ case auxSymOff, auxCallOff, auxTypSize:
+ s := ""
+ if v.Aux != nil {
+ s = fmt.Sprintf(" {%v}", v.Aux)
+ }
+ if v.AuxInt != 0 {
+ s += fmt.Sprintf(" [%v]", v.AuxInt)
+ }
+ return s
+ case auxSymValAndOff:
+ s := ""
+ if v.Aux != nil {
+ s = fmt.Sprintf(" {%v}", v.Aux)
+ }
+ return s + fmt.Sprintf(" [%s]", v.AuxValAndOff())
+ case auxCCop:
+ return fmt.Sprintf(" {%s}", Op(v.AuxInt))
+ case auxS390XCCMask, auxS390XRotateParams:
+ return fmt.Sprintf(" {%v}", v.Aux)
+ case auxFlagConstant:
+ return fmt.Sprintf("[%s]", flagConstant(v.AuxInt))
+ }
+ return ""
+}
+
+// If/when midstack inlining is enabled (-l=4), the compiler gets both larger and slower.
+// Not-inlining this method is a help (*Value.reset and *Block.NewValue0 are similar).
+//go:noinline
+func (v *Value) AddArg(w *Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, w)
+ w.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg2(w1, w2 *Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, w1, w2)
+ w1.Uses++
+ w2.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg3(w1, w2, w3 *Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, w1, w2, w3)
+ w1.Uses++
+ w2.Uses++
+ w3.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg4(w1, w2, w3, w4 *Value) {
+ v.Args = append(v.Args, w1, w2, w3, w4)
+ w1.Uses++
+ w2.Uses++
+ w3.Uses++
+ w4.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg5(w1, w2, w3, w4, w5 *Value) {
+ v.Args = append(v.Args, w1, w2, w3, w4, w5)
+ w1.Uses++
+ w2.Uses++
+ w3.Uses++
+ w4.Uses++
+ w5.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg6(w1, w2, w3, w4, w5, w6 *Value) {
+ v.Args = append(v.Args, w1, w2, w3, w4, w5, w6)
+ w1.Uses++
+ w2.Uses++
+ w3.Uses++
+ w4.Uses++
+ w5.Uses++
+ w6.Uses++
+}
+
+func (v *Value) AddArgs(a ...*Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, a...)
+ for _, x := range a {
+ x.Uses++
+ }
+}
+func (v *Value) SetArg(i int, w *Value) {
+ v.Args[i].Uses--
+ v.Args[i] = w
+ w.Uses++
+}
+func (v *Value) RemoveArg(i int) {
+ v.Args[i].Uses--
+ copy(v.Args[i:], v.Args[i+1:])
+ v.Args[len(v.Args)-1] = nil // aid GC
+ v.Args = v.Args[:len(v.Args)-1]
+}
+func (v *Value) SetArgs1(a *Value) {
+ v.resetArgs()
+ v.AddArg(a)
+}
+func (v *Value) SetArgs2(a, b *Value) {
+ v.resetArgs()
+ v.AddArg(a)
+ v.AddArg(b)
+}
+func (v *Value) SetArgs3(a, b, c *Value) {
+ v.resetArgs()
+ v.AddArg(a)
+ v.AddArg(b)
+ v.AddArg(c)
+}
+
+func (v *Value) resetArgs() {
+ for _, a := range v.Args {
+ a.Uses--
+ }
+ v.argstorage[0] = nil
+ v.argstorage[1] = nil
+ v.argstorage[2] = nil
+ v.Args = v.argstorage[:0]
+}
+
+// reset is called from most rewrite rules.
+// Allowing it to be inlined increases the size
+// of cmd/compile by almost 10%, and slows it down.
+//go:noinline
+func (v *Value) reset(op Op) {
+ if v.InCache {
+ v.Block.Func.unCache(v)
+ }
+ v.Op = op
+ v.resetArgs()
+ v.AuxInt = 0
+ v.Aux = nil
+}
+
+// copyOf is called from rewrite rules.
+// It modifies v to be (Copy a).
+//go:noinline
+func (v *Value) copyOf(a *Value) {
+ if v == a {
+ return
+ }
+ if v.InCache {
+ v.Block.Func.unCache(v)
+ }
+ v.Op = OpCopy
+ v.resetArgs()
+ v.AddArg(a)
+ v.AuxInt = 0
+ v.Aux = nil
+ v.Type = a.Type
+}
+
+// copyInto makes a new value identical to v and adds it to the end of b.
+// unlike copyIntoWithXPos this does not check for v.Pos being a statement.
+func (v *Value) copyInto(b *Block) *Value {
+ c := b.NewValue0(v.Pos.WithNotStmt(), v.Op, v.Type) // Lose the position, this causes line number churn otherwise.
+ c.Aux = v.Aux
+ c.AuxInt = v.AuxInt
+ c.AddArgs(v.Args...)
+ for _, a := range v.Args {
+ if a.Type.IsMemory() {
+ v.Fatalf("can't move a value with a memory arg %s", v.LongString())
+ }
+ }
+ return c
+}
+
+// copyIntoWithXPos makes a new value identical to v and adds it to the end of b.
+// The supplied position is used as the position of the new value.
+// Because this is used for rematerialization, check for case that (rematerialized)
+// input to value with position 'pos' carried a statement mark, and that the supplied
+// position (of the instruction using the rematerialized value) is not marked, and
+// preserve that mark if its line matches the supplied position.
+func (v *Value) copyIntoWithXPos(b *Block, pos src.XPos) *Value {
+ if v.Pos.IsStmt() == src.PosIsStmt && pos.IsStmt() != src.PosIsStmt && v.Pos.SameFileAndLine(pos) {
+ pos = pos.WithIsStmt()
+ }
+ c := b.NewValue0(pos, v.Op, v.Type)
+ c.Aux = v.Aux
+ c.AuxInt = v.AuxInt
+ c.AddArgs(v.Args...)
+ for _, a := range v.Args {
+ if a.Type.IsMemory() {
+ v.Fatalf("can't move a value with a memory arg %s", v.LongString())
+ }
+ }
+ return c
+}
+
+func (v *Value) Logf(msg string, args ...interface{}) { v.Block.Logf(msg, args...) }
+func (v *Value) Log() bool { return v.Block.Log() }
+func (v *Value) Fatalf(msg string, args ...interface{}) {
+ v.Block.Func.fe.Fatalf(v.Pos, msg, args...)
+}
+
+// isGenericIntConst reports whether v is a generic integer constant.
+func (v *Value) isGenericIntConst() bool {
+ return v != nil && (v.Op == OpConst64 || v.Op == OpConst32 || v.Op == OpConst16 || v.Op == OpConst8)
+}
+
+// Reg returns the register assigned to v, in cmd/internal/obj/$ARCH numbering.
+func (v *Value) Reg() int16 {
+ reg := v.Block.Func.RegAlloc[v.ID]
+ if reg == nil {
+ v.Fatalf("nil register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).objNum
+}
+
+// Reg0 returns the register assigned to the first output of v, in cmd/internal/obj/$ARCH numbering.
+func (v *Value) Reg0() int16 {
+ reg := v.Block.Func.RegAlloc[v.ID].(LocPair)[0]
+ if reg == nil {
+ v.Fatalf("nil first register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).objNum
+}
+
+// Reg1 returns the register assigned to the second output of v, in cmd/internal/obj/$ARCH numbering.
+func (v *Value) Reg1() int16 {
+ reg := v.Block.Func.RegAlloc[v.ID].(LocPair)[1]
+ if reg == nil {
+ v.Fatalf("nil second register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).objNum
+}
+
+func (v *Value) RegName() string {
+ reg := v.Block.Func.RegAlloc[v.ID]
+ if reg == nil {
+ v.Fatalf("nil register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).name
+}
+
+// MemoryArg returns the memory argument for the Value.
+// The returned value, if non-nil, will be memory-typed (or a tuple with a memory-typed second part).
+// Otherwise, nil is returned.
+func (v *Value) MemoryArg() *Value {
+ if v.Op == OpPhi {
+ v.Fatalf("MemoryArg on Phi")
+ }
+ na := len(v.Args)
+ if na == 0 {
+ return nil
+ }
+ if m := v.Args[na-1]; m.Type.IsMemory() {
+ return m
+ }
+ return nil
+}
+
+// LackingPos indicates whether v is a value that is unlikely to have a correct
+// position assigned to it. Ignoring such values leads to more user-friendly positions
+// assigned to nearby values and the blocks containing them.
+func (v *Value) LackingPos() bool {
+ // The exact definition of LackingPos is somewhat heuristically defined and may change
+ // in the future, for example if some of these operations are generated more carefully
+ // with respect to their source position.
+ return v.Op == OpVarDef || v.Op == OpVarKill || v.Op == OpVarLive || v.Op == OpPhi ||
+ (v.Op == OpFwdRef || v.Op == OpCopy) && v.Type == types.TypeMem
+}
+
+// removeable reports whether the value v can be removed from the SSA graph entirely
+// if its use count drops to 0.
+func (v *Value) removeable() bool {
+ if v.Type.IsVoid() {
+ // Void ops, like nil pointer checks, must stay.
+ return false
+ }
+ if v.Type.IsMemory() {
+ // All memory ops aren't needed here, but we do need
+ // to keep calls at least (because they might have
+ // syncronization operations we can't see).
+ return false
+ }
+ if v.Op.HasSideEffects() {
+ // These are mostly synchronization operations.
+ return false
+ }
+ return true
+}
diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go
new file mode 100644
index 0000000..849c9e8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/writebarrier.go
@@ -0,0 +1,616 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "fmt"
+)
+
+// A ZeroRegion records parts of an object which are known to be zero.
+// A ZeroRegion only applies to a single memory state.
+// Each bit in mask is set if the corresponding pointer-sized word of
+// the base object is known to be zero.
+// In other words, if mask & (1<<i) != 0, then [base+i*ptrSize, base+(i+1)*ptrSize)
+// is known to be zero.
+type ZeroRegion struct {
+ base *Value
+ mask uint64
+}
+
+// needwb reports whether we need write barrier for store op v.
+// v must be Store/Move/Zero.
+// zeroes provides known zero information (keyed by ID of memory-type values).
+func needwb(v *Value, zeroes map[ID]ZeroRegion) bool {
+ t, ok := v.Aux.(*types.Type)
+ if !ok {
+ v.Fatalf("store aux is not a type: %s", v.LongString())
+ }
+ if !t.HasPointers() {
+ return false
+ }
+ if IsStackAddr(v.Args[0]) {
+ return false // write on stack doesn't need write barrier
+ }
+ if v.Op == OpMove && IsReadOnlyGlobalAddr(v.Args[1]) && IsNewObject(v.Args[0], v.MemoryArg()) {
+ // Copying data from readonly memory into a fresh object doesn't need a write barrier.
+ return false
+ }
+ if v.Op == OpStore && IsGlobalAddr(v.Args[1]) {
+ // Storing pointers to non-heap locations into zeroed memory doesn't need a write barrier.
+ ptr := v.Args[0]
+ var off int64
+ size := v.Aux.(*types.Type).Size()
+ for ptr.Op == OpOffPtr {
+ off += ptr.AuxInt
+ ptr = ptr.Args[0]
+ }
+ ptrSize := v.Block.Func.Config.PtrSize
+ if off%ptrSize != 0 || size%ptrSize != 0 {
+ v.Fatalf("unaligned pointer write")
+ }
+ if off < 0 || off+size > 64*ptrSize {
+ // write goes off end of tracked offsets
+ return true
+ }
+ z := zeroes[v.MemoryArg().ID]
+ if ptr != z.base {
+ return true
+ }
+ for i := off; i < off+size; i += ptrSize {
+ if z.mask>>uint(i/ptrSize)&1 == 0 {
+ return true // not known to be zero
+ }
+ }
+ // All written locations are known to be zero - write barrier not needed.
+ return false
+ }
+ return true
+}
+
+// writebarrier pass inserts write barriers for store ops (Store, Move, Zero)
+// when necessary (the condition above). It rewrites store ops to branches
+// and runtime calls, like
+//
+// if writeBarrier.enabled {
+// gcWriteBarrier(ptr, val) // Not a regular Go call
+// } else {
+// *ptr = val
+// }
+//
+// A sequence of WB stores for many pointer fields of a single type will
+// be emitted together, with a single branch.
+func writebarrier(f *Func) {
+ if !f.fe.UseWriteBarrier() {
+ return
+ }
+
+ var sb, sp, wbaddr, const0 *Value
+ var typedmemmove, typedmemclr, gcWriteBarrier *obj.LSym
+ var stores, after []*Value
+ var sset *sparseSet
+ var storeNumber []int32
+
+ zeroes := f.computeZeroMap()
+ for _, b := range f.Blocks { // range loop is safe since the blocks we added contain no stores to expand
+ // first, identify all the stores that need to insert a write barrier.
+ // mark them with WB ops temporarily. record presence of WB ops.
+ nWBops := 0 // count of temporarily created WB ops remaining to be rewritten in the current block
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpStore, OpMove, OpZero:
+ if needwb(v, zeroes) {
+ switch v.Op {
+ case OpStore:
+ v.Op = OpStoreWB
+ case OpMove:
+ v.Op = OpMoveWB
+ case OpZero:
+ v.Op = OpZeroWB
+ }
+ nWBops++
+ }
+ }
+ }
+ if nWBops == 0 {
+ continue
+ }
+
+ if wbaddr == nil {
+ // lazily initialize global values for write barrier test and calls
+ // find SB and SP values in entry block
+ initpos := f.Entry.Pos
+ sp, sb = f.spSb()
+ wbsym := f.fe.Syslook("writeBarrier")
+ wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32Ptr, wbsym, sb)
+ gcWriteBarrier = f.fe.Syslook("gcWriteBarrier")
+ typedmemmove = f.fe.Syslook("typedmemmove")
+ typedmemclr = f.fe.Syslook("typedmemclr")
+ const0 = f.ConstInt32(f.Config.Types.UInt32, 0)
+
+ // allocate auxiliary data structures for computing store order
+ sset = f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(sset)
+ storeNumber = make([]int32, f.NumValues())
+ }
+
+ // order values in store order
+ b.Values = storeOrder(b.Values, sset, storeNumber)
+
+ firstSplit := true
+ again:
+ // find the start and end of the last contiguous WB store sequence.
+ // a branch will be inserted there. values after it will be moved
+ // to a new block.
+ var last *Value
+ var start, end int
+ values := b.Values
+ FindSeq:
+ for i := len(values) - 1; i >= 0; i-- {
+ w := values[i]
+ switch w.Op {
+ case OpStoreWB, OpMoveWB, OpZeroWB:
+ start = i
+ if last == nil {
+ last = w
+ end = i + 1
+ }
+ case OpVarDef, OpVarLive, OpVarKill:
+ continue
+ default:
+ if last == nil {
+ continue
+ }
+ break FindSeq
+ }
+ }
+ stores = append(stores[:0], b.Values[start:end]...) // copy to avoid aliasing
+ after = append(after[:0], b.Values[end:]...)
+ b.Values = b.Values[:start]
+
+ // find the memory before the WB stores
+ mem := stores[0].MemoryArg()
+ pos := stores[0].Pos
+ bThen := f.NewBlock(BlockPlain)
+ bElse := f.NewBlock(BlockPlain)
+ bEnd := f.NewBlock(b.Kind)
+ bThen.Pos = pos
+ bElse.Pos = pos
+ bEnd.Pos = b.Pos
+ b.Pos = pos
+
+ // set up control flow for end block
+ bEnd.CopyControls(b)
+ bEnd.Likely = b.Likely
+ for _, e := range b.Succs {
+ bEnd.Succs = append(bEnd.Succs, e)
+ e.b.Preds[e.i].b = bEnd
+ }
+
+ // set up control flow for write barrier test
+ // load word, test word, avoiding partial register write from load byte.
+ cfgtypes := &f.Config.Types
+ flag := b.NewValue2(pos, OpLoad, cfgtypes.UInt32, wbaddr, mem)
+ flag = b.NewValue2(pos, OpNeq32, cfgtypes.Bool, flag, const0)
+ b.Kind = BlockIf
+ b.SetControl(flag)
+ b.Likely = BranchUnlikely
+ b.Succs = b.Succs[:0]
+ b.AddEdgeTo(bThen)
+ b.AddEdgeTo(bElse)
+ // TODO: For OpStoreWB and the buffered write barrier,
+ // we could move the write out of the write barrier,
+ // which would lead to fewer branches. We could do
+ // something similar to OpZeroWB, since the runtime
+ // could provide just the barrier half and then we
+ // could unconditionally do an OpZero (which could
+ // also generate better zeroing code). OpMoveWB is
+ // trickier and would require changing how
+ // cgoCheckMemmove works.
+ bThen.AddEdgeTo(bEnd)
+ bElse.AddEdgeTo(bEnd)
+
+ // for each write barrier store, append write barrier version to bThen
+ // and simple store version to bElse
+ memThen := mem
+ memElse := mem
+
+ // If the source of a MoveWB is volatile (will be clobbered by a
+ // function call), we need to copy it to a temporary location, as
+ // marshaling the args of typedmemmove might clobber the value we're
+ // trying to move.
+ // Look for volatile source, copy it to temporary before we emit any
+ // call.
+ // It is unlikely to have more than one of them. Just do a linear
+ // search instead of using a map.
+ type volatileCopy struct {
+ src *Value // address of original volatile value
+ tmp *Value // address of temporary we've copied the volatile value into
+ }
+ var volatiles []volatileCopy
+ copyLoop:
+ for _, w := range stores {
+ if w.Op == OpMoveWB {
+ val := w.Args[1]
+ if isVolatile(val) {
+ for _, c := range volatiles {
+ if val == c.src {
+ continue copyLoop // already copied
+ }
+ }
+
+ t := val.Type.Elem()
+ tmp := f.fe.Auto(w.Pos, t)
+ memThen = bThen.NewValue1A(w.Pos, OpVarDef, types.TypeMem, tmp, memThen)
+ tmpaddr := bThen.NewValue2A(w.Pos, OpLocalAddr, t.PtrTo(), tmp, sp, memThen)
+ siz := t.Size()
+ memThen = bThen.NewValue3I(w.Pos, OpMove, types.TypeMem, siz, tmpaddr, val, memThen)
+ memThen.Aux = t
+ volatiles = append(volatiles, volatileCopy{val, tmpaddr})
+ }
+ }
+ }
+
+ for _, w := range stores {
+ ptr := w.Args[0]
+ pos := w.Pos
+
+ var fn *obj.LSym
+ var typ *obj.LSym
+ var val *Value
+ switch w.Op {
+ case OpStoreWB:
+ val = w.Args[1]
+ nWBops--
+ case OpMoveWB:
+ fn = typedmemmove
+ val = w.Args[1]
+ typ = w.Aux.(*types.Type).Symbol()
+ nWBops--
+ case OpZeroWB:
+ fn = typedmemclr
+ typ = w.Aux.(*types.Type).Symbol()
+ nWBops--
+ case OpVarDef, OpVarLive, OpVarKill:
+ }
+
+ // then block: emit write barrier call
+ switch w.Op {
+ case OpStoreWB, OpMoveWB, OpZeroWB:
+ if w.Op == OpStoreWB {
+ memThen = bThen.NewValue3A(pos, OpWB, types.TypeMem, gcWriteBarrier, ptr, val, memThen)
+ } else {
+ srcval := val
+ if w.Op == OpMoveWB && isVolatile(srcval) {
+ for _, c := range volatiles {
+ if srcval == c.src {
+ srcval = c.tmp
+ break
+ }
+ }
+ }
+ memThen = wbcall(pos, bThen, fn, typ, ptr, srcval, memThen, sp, sb)
+ }
+ // Note that we set up a writebarrier function call.
+ f.fe.SetWBPos(pos)
+ case OpVarDef, OpVarLive, OpVarKill:
+ memThen = bThen.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, memThen)
+ }
+
+ // else block: normal store
+ switch w.Op {
+ case OpStoreWB:
+ memElse = bElse.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, memElse)
+ case OpMoveWB:
+ memElse = bElse.NewValue3I(pos, OpMove, types.TypeMem, w.AuxInt, ptr, val, memElse)
+ memElse.Aux = w.Aux
+ case OpZeroWB:
+ memElse = bElse.NewValue2I(pos, OpZero, types.TypeMem, w.AuxInt, ptr, memElse)
+ memElse.Aux = w.Aux
+ case OpVarDef, OpVarLive, OpVarKill:
+ memElse = bElse.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, memElse)
+ }
+ }
+
+ // mark volatile temps dead
+ for _, c := range volatiles {
+ tmpNode := c.tmp.Aux
+ memThen = bThen.NewValue1A(memThen.Pos, OpVarKill, types.TypeMem, tmpNode, memThen)
+ }
+
+ // merge memory
+ // Splice memory Phi into the last memory of the original sequence,
+ // which may be used in subsequent blocks. Other memories in the
+ // sequence must be dead after this block since there can be only
+ // one memory live.
+ bEnd.Values = append(bEnd.Values, last)
+ last.Block = bEnd
+ last.reset(OpPhi)
+ last.Pos = last.Pos.WithNotStmt()
+ last.Type = types.TypeMem
+ last.AddArg(memThen)
+ last.AddArg(memElse)
+ for _, w := range stores {
+ if w != last {
+ w.resetArgs()
+ }
+ }
+ for _, w := range stores {
+ if w != last {
+ f.freeValue(w)
+ }
+ }
+
+ // put values after the store sequence into the end block
+ bEnd.Values = append(bEnd.Values, after...)
+ for _, w := range after {
+ w.Block = bEnd
+ }
+
+ // Preemption is unsafe between loading the write
+ // barrier-enabled flag and performing the write
+ // because that would allow a GC phase transition,
+ // which would invalidate the flag. Remember the
+ // conditional block so liveness analysis can disable
+ // safe-points. This is somewhat subtle because we're
+ // splitting b bottom-up.
+ if firstSplit {
+ // Add b itself.
+ b.Func.WBLoads = append(b.Func.WBLoads, b)
+ firstSplit = false
+ } else {
+ // We've already split b, so we just pushed a
+ // write barrier test into bEnd.
+ b.Func.WBLoads = append(b.Func.WBLoads, bEnd)
+ }
+
+ // if we have more stores in this block, do this block again
+ if nWBops > 0 {
+ goto again
+ }
+ }
+}
+
+// computeZeroMap returns a map from an ID of a memory value to
+// a set of locations that are known to be zeroed at that memory value.
+func (f *Func) computeZeroMap() map[ID]ZeroRegion {
+ ptrSize := f.Config.PtrSize
+ // Keep track of which parts of memory are known to be zero.
+ // This helps with removing write barriers for various initialization patterns.
+ // This analysis is conservative. We only keep track, for each memory state, of
+ // which of the first 64 words of a single object are known to be zero.
+ zeroes := map[ID]ZeroRegion{}
+ // Find new objects.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpLoad {
+ continue
+ }
+ mem := v.MemoryArg()
+ if IsNewObject(v, mem) {
+ nptr := v.Type.Elem().Size() / ptrSize
+ if nptr > 64 {
+ nptr = 64
+ }
+ zeroes[mem.ID] = ZeroRegion{base: v, mask: 1<<uint(nptr) - 1}
+ }
+ }
+ }
+ // Find stores to those new objects.
+ for {
+ changed := false
+ for _, b := range f.Blocks {
+ // Note: iterating forwards helps convergence, as values are
+ // typically (but not always!) in store order.
+ for _, v := range b.Values {
+ if v.Op != OpStore {
+ continue
+ }
+ z, ok := zeroes[v.MemoryArg().ID]
+ if !ok {
+ continue
+ }
+ ptr := v.Args[0]
+ var off int64
+ size := v.Aux.(*types.Type).Size()
+ for ptr.Op == OpOffPtr {
+ off += ptr.AuxInt
+ ptr = ptr.Args[0]
+ }
+ if ptr != z.base {
+ // Different base object - we don't know anything.
+ // We could even be writing to the base object we know
+ // about, but through an aliased but offset pointer.
+ // So we have to throw all the zero information we have away.
+ continue
+ }
+ // Round to cover any partially written pointer slots.
+ // Pointer writes should never be unaligned like this, but non-pointer
+ // writes to pointer-containing types will do this.
+ if d := off % ptrSize; d != 0 {
+ off -= d
+ size += d
+ }
+ if d := size % ptrSize; d != 0 {
+ size += ptrSize - d
+ }
+ // Clip to the 64 words that we track.
+ min := off
+ max := off + size
+ if min < 0 {
+ min = 0
+ }
+ if max > 64*ptrSize {
+ max = 64 * ptrSize
+ }
+ // Clear bits for parts that we are writing (and hence
+ // will no longer necessarily be zero).
+ for i := min; i < max; i += ptrSize {
+ bit := i / ptrSize
+ z.mask &^= 1 << uint(bit)
+ }
+ if z.mask == 0 {
+ // No more known zeros - don't bother keeping.
+ continue
+ }
+ // Save updated known zero contents for new store.
+ if zeroes[v.ID] != z {
+ zeroes[v.ID] = z
+ changed = true
+ }
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+ if f.pass.debug > 0 {
+ fmt.Printf("func %s\n", f.Name)
+ for mem, z := range zeroes {
+ fmt.Printf(" memory=v%d ptr=%v zeromask=%b\n", mem, z.base, z.mask)
+ }
+ }
+ return zeroes
+}
+
+// wbcall emits write barrier runtime call in b, returns memory.
+func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Value) *Value {
+ config := b.Func.Config
+
+ // put arguments on stack
+ off := config.ctxt.FixedFrameSize()
+
+ var ACArgs []Param
+ if typ != nil { // for typedmemmove
+ taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
+ off = round(off, taddr.Type.Alignment())
+ arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp)
+ mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, taddr, mem)
+ ACArgs = append(ACArgs, Param{Type: b.Func.Config.Types.Uintptr, Offset: int32(off)})
+ off += taddr.Type.Size()
+ }
+
+ off = round(off, ptr.Type.Alignment())
+ arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp)
+ mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, ptr, mem)
+ ACArgs = append(ACArgs, Param{Type: ptr.Type, Offset: int32(off)})
+ off += ptr.Type.Size()
+
+ if val != nil {
+ off = round(off, val.Type.Alignment())
+ arg = b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp)
+ mem = b.NewValue3A(pos, OpStore, types.TypeMem, val.Type, arg, val, mem)
+ ACArgs = append(ACArgs, Param{Type: val.Type, Offset: int32(off)})
+ off += val.Type.Size()
+ }
+ off = round(off, config.PtrSize)
+
+ // issue call
+ mem = b.NewValue1A(pos, OpStaticCall, types.TypeMem, StaticAuxCall(fn, ACArgs, nil), mem)
+ mem.AuxInt = off - config.ctxt.FixedFrameSize()
+ return mem
+}
+
+// round to a multiple of r, r is a power of 2
+func round(o int64, r int64) int64 {
+ return (o + r - 1) &^ (r - 1)
+}
+
+// IsStackAddr reports whether v is known to be an address of a stack slot.
+func IsStackAddr(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
+ v = v.Args[0]
+ }
+ switch v.Op {
+ case OpSP, OpLocalAddr, OpSelectNAddr:
+ return true
+ }
+ return false
+}
+
+// IsGlobalAddr reports whether v is known to be an address of a global (or nil).
+func IsGlobalAddr(v *Value) bool {
+ if v.Op == OpAddr && v.Args[0].Op == OpSB {
+ return true // address of a global
+ }
+ if v.Op == OpConstNil {
+ return true
+ }
+ if v.Op == OpLoad && IsReadOnlyGlobalAddr(v.Args[0]) {
+ return true // loading from a read-only global - the resulting address can't be a heap address.
+ }
+ return false
+}
+
+// IsReadOnlyGlobalAddr reports whether v is known to be an address of a read-only global.
+func IsReadOnlyGlobalAddr(v *Value) bool {
+ if v.Op == OpConstNil {
+ // Nil pointers are read only. See issue 33438.
+ return true
+ }
+ if v.Op == OpAddr && v.Aux.(*obj.LSym).Type == objabi.SRODATA {
+ return true
+ }
+ return false
+}
+
+// IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object at memory state mem.
+func IsNewObject(v *Value, mem *Value) bool {
+ if v.Op != OpLoad {
+ return false
+ }
+ if v.MemoryArg() != mem {
+ return false
+ }
+ if mem.Op != OpStaticCall {
+ return false
+ }
+ if !isSameCall(mem.Aux, "runtime.newobject") {
+ return false
+ }
+ if v.Args[0].Op != OpOffPtr {
+ return false
+ }
+ if v.Args[0].Args[0].Op != OpSP {
+ return false
+ }
+ c := v.Block.Func.Config
+ if v.Args[0].AuxInt != c.ctxt.FixedFrameSize()+c.RegSize { // offset of return value
+ return false
+ }
+ return true
+}
+
+// IsSanitizerSafeAddr reports whether v is known to be an address
+// that doesn't need instrumentation.
+func IsSanitizerSafeAddr(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
+ v = v.Args[0]
+ }
+ switch v.Op {
+ case OpSP, OpLocalAddr, OpSelectNAddr:
+ // Stack addresses are always safe.
+ return true
+ case OpITab, OpStringPtr, OpGetClosurePtr:
+ // Itabs, string data, and closure fields are
+ // read-only once initialized.
+ return true
+ case OpAddr:
+ return v.Aux.(*obj.LSym).Type == objabi.SRODATA
+ }
+ return false
+}
+
+// isVolatile reports whether v is a pointer to argument region on stack which
+// will be clobbered by a function call.
+func isVolatile(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy || v.Op == OpSelectNAddr {
+ v = v.Args[0]
+ }
+ return v.Op == OpSP
+}
diff --git a/src/cmd/compile/internal/ssa/writebarrier_test.go b/src/cmd/compile/internal/ssa/writebarrier_test.go
new file mode 100644
index 0000000..0b11afc
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/writebarrier_test.go
@@ -0,0 +1,56 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestWriteBarrierStoreOrder(t *testing.T) {
+ // Make sure writebarrier phase works even StoreWB ops are not in dependency order
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstNil, ptrType, 0, nil),
+ Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("wb2", OpStore, types.TypeMem, 0, ptrType, "addr1", "v", "wb1"),
+ Valu("wb1", OpStore, types.TypeMem, 0, ptrType, "addr1", "v", "start"), // wb1 and wb2 are out of order
+ Goto("exit")),
+ Bloc("exit",
+ Exit("wb2")))
+
+ CheckFunc(fun.f)
+ writebarrier(fun.f)
+ CheckFunc(fun.f)
+}
+
+func TestWriteBarrierPhi(t *testing.T) {
+ // Make sure writebarrier phase works for single-block loop, where
+ // a Phi op takes the store in the same block as argument.
+ // See issue #19067.
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Goto("loop")),
+ Bloc("loop",
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, "start", "wb"),
+ Valu("v", OpConstNil, ptrType, 0, nil),
+ Valu("addr", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("wb", OpStore, types.TypeMem, 0, ptrType, "addr", "v", "phi"), // has write barrier
+ Goto("loop")))
+
+ CheckFunc(fun.f)
+ writebarrier(fun.f)
+ CheckFunc(fun.f)
+}
diff --git a/src/cmd/compile/internal/ssa/xposmap.go b/src/cmd/compile/internal/ssa/xposmap.go
new file mode 100644
index 0000000..93582e1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/xposmap.go
@@ -0,0 +1,116 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "fmt"
+)
+
+type lineRange struct {
+ first, last uint32
+}
+
+// An xposmap is a map from fileindex and line of src.XPos to int32,
+// implemented sparsely to save space (column and statement status are ignored).
+// The sparse skeleton is constructed once, and then reused by ssa phases
+// that (re)move values with statements attached.
+type xposmap struct {
+ // A map from file index to maps from line range to integers (block numbers)
+ maps map[int32]*biasedSparseMap
+ // The next two fields provide a single-item cache for common case of repeated lines from same file.
+ lastIndex int32 // -1 means no entry in cache
+ lastMap *biasedSparseMap // map found at maps[lastIndex]
+}
+
+// newXposmap constructs an xposmap valid for inputs which have a file index in the keys of x,
+// and line numbers in the range x[file index].
+// The resulting xposmap will panic if a caller attempts to set or add an XPos not in that range.
+func newXposmap(x map[int]lineRange) *xposmap {
+ maps := make(map[int32]*biasedSparseMap)
+ for i, p := range x {
+ maps[int32(i)] = newBiasedSparseMap(int(p.first), int(p.last))
+ }
+ return &xposmap{maps: maps, lastIndex: -1} // zero for the rest is okay
+}
+
+// clear removes data from the map but leaves the sparse skeleton.
+func (m *xposmap) clear() {
+ for _, l := range m.maps {
+ if l != nil {
+ l.clear()
+ }
+ }
+ m.lastIndex = -1
+ m.lastMap = nil
+}
+
+// mapFor returns the line range map for a given file index.
+func (m *xposmap) mapFor(index int32) *biasedSparseMap {
+ if index == m.lastIndex {
+ return m.lastMap
+ }
+ mf := m.maps[index]
+ m.lastIndex = index
+ m.lastMap = mf
+ return mf
+}
+
+// set inserts p->v into the map.
+// If p does not fall within the set of fileindex->lineRange used to construct m, this will panic.
+func (m *xposmap) set(p src.XPos, v int32) {
+ s := m.mapFor(p.FileIndex())
+ if s == nil {
+ panic(fmt.Sprintf("xposmap.set(%d), file index not found in map\n", p.FileIndex()))
+ }
+ s.set(p.Line(), v)
+}
+
+// get returns the int32 associated with the file index and line of p.
+func (m *xposmap) get(p src.XPos) int32 {
+ s := m.mapFor(p.FileIndex())
+ if s == nil {
+ return -1
+ }
+ return s.get(p.Line())
+}
+
+// add adds p to m, treating m as a set instead of as a map.
+// If p does not fall within the set of fileindex->lineRange used to construct m, this will panic.
+// Use clear() in between set/map interpretations of m.
+func (m *xposmap) add(p src.XPos) {
+ m.set(p, 0)
+}
+
+// contains returns whether the file index and line of p are in m,
+// treating m as a set instead of as a map.
+func (m *xposmap) contains(p src.XPos) bool {
+ s := m.mapFor(p.FileIndex())
+ if s == nil {
+ return false
+ }
+ return s.contains(p.Line())
+}
+
+// remove removes the file index and line for p from m,
+// whether m is currently treated as a map or set.
+func (m *xposmap) remove(p src.XPos) {
+ s := m.mapFor(p.FileIndex())
+ if s == nil {
+ return
+ }
+ s.remove(p.Line())
+}
+
+// foreachEntry applies f to each (fileindex, line, value) triple in m.
+func (m *xposmap) foreachEntry(f func(j int32, l uint, v int32)) {
+ for j, mm := range m.maps {
+ s := mm.size()
+ for i := 0; i < s; i++ {
+ l, v := mm.getEntry(i)
+ f(j, l, v)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/zcse.go b/src/cmd/compile/internal/ssa/zcse.go
new file mode 100644
index 0000000..ec38b7d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/zcse.go
@@ -0,0 +1,79 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+// zcse does an initial pass of common-subexpression elimination on the
+// function for values with zero arguments to allow the more expensive cse
+// to begin with a reduced number of values. Values are just relinked,
+// nothing is deleted. A subsequent deadcode pass is required to actually
+// remove duplicate expressions.
+func zcse(f *Func) {
+ vals := make(map[vkey]*Value)
+
+ for _, b := range f.Blocks {
+ for i := 0; i < len(b.Values); i++ {
+ v := b.Values[i]
+ if opcodeTable[v.Op].argLen == 0 {
+ key := vkey{v.Op, keyFor(v), v.Aux, v.Type}
+ if vals[key] == nil {
+ vals[key] = v
+ if b != f.Entry {
+ // Move v to the entry block so it will dominate every block
+ // where we might use it. This prevents the need for any dominator
+ // calculations in this pass.
+ v.Block = f.Entry
+ f.Entry.Values = append(f.Entry.Values, v)
+ last := len(b.Values) - 1
+ b.Values[i] = b.Values[last]
+ b.Values[last] = nil
+ b.Values = b.Values[:last]
+
+ i-- // process b.Values[i] again
+ }
+ }
+ }
+ }
+ }
+
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if opcodeTable[a.Op].argLen == 0 {
+ key := vkey{a.Op, keyFor(a), a.Aux, a.Type}
+ if rv, ok := vals[key]; ok {
+ v.SetArg(i, rv)
+ }
+ }
+ }
+ }
+ }
+}
+
+// vkey is a type used to uniquely identify a zero arg value.
+type vkey struct {
+ op Op
+ ai int64 // aux int
+ ax interface{} // aux
+ t *types.Type // type
+}
+
+// keyFor returns the AuxInt portion of a key structure uniquely identifying a
+// zero arg value for the supported ops.
+func keyFor(v *Value) int64 {
+ switch v.Op {
+ case OpConst64, OpConst64F, OpConst32F:
+ return v.AuxInt
+ case OpConst32:
+ return int64(int32(v.AuxInt))
+ case OpConst16:
+ return int64(int16(v.AuxInt))
+ case OpConst8, OpConstBool:
+ return int64(int8(v.AuxInt))
+ default:
+ return v.AuxInt
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/zeroextension_test.go b/src/cmd/compile/internal/ssa/zeroextension_test.go
new file mode 100644
index 0000000..2e31621
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/zeroextension_test.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "testing"
+
+type extTest struct {
+ f func(uint64, uint64) uint64
+ arg1 uint64
+ arg2 uint64
+ res uint64
+ name string
+}
+
+var extTests = [...]extTest{
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 / op2)) }, arg1: 0x1, arg2: 0xfffffffeffffffff, res: 0xffffffff, name: "div"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 * op2)) }, arg1: 0x1, arg2: 0x100000001, res: 0x1, name: "mul"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 + op2)) }, arg1: 0x1, arg2: 0xeeeeeeeeffffffff, res: 0x0, name: "add"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 - op2)) }, arg1: 0x1, arg2: 0xeeeeeeeeffffffff, res: 0x2, name: "sub"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 | op2)) }, arg1: 0x100000000000001, arg2: 0xfffffffffffffff, res: 0xffffffff, name: "or"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 ^ op2)) }, arg1: 0x100000000000001, arg2: 0xfffffffffffffff, res: 0xfffffffe, name: "xor"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 & op2)) }, arg1: 0x100000000000001, arg2: 0x100000000000001, res: 0x1, name: "and"},
+}
+
+func TestZeroExtension(t *testing.T) {
+ for _, x := range extTests {
+ r := x.f(x.arg1, x.arg2)
+ if x.res != r {
+ t.Errorf("%s: got %d want %d", x.name, r, x.res)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/branches.go b/src/cmd/compile/internal/syntax/branches.go
new file mode 100644
index 0000000..56e97c7
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/branches.go
@@ -0,0 +1,311 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import "fmt"
+
+// TODO(gri) consider making this part of the parser code
+
+// checkBranches checks correct use of labels and branch
+// statements (break, continue, goto) in a function body.
+// It catches:
+// - misplaced breaks and continues
+// - bad labeled breaks and continues
+// - invalid, unused, duplicate, and missing labels
+// - gotos jumping over variable declarations and into blocks
+func checkBranches(body *BlockStmt, errh ErrorHandler) {
+ if body == nil {
+ return
+ }
+
+ // scope of all labels in this body
+ ls := &labelScope{errh: errh}
+ fwdGotos := ls.blockBranches(nil, targets{}, nil, body.Pos(), body.List)
+
+ // If there are any forward gotos left, no matching label was
+ // found for them. Either those labels were never defined, or
+ // they are inside blocks and not reachable from the gotos.
+ for _, fwd := range fwdGotos {
+ name := fwd.Label.Value
+ if l := ls.labels[name]; l != nil {
+ l.used = true // avoid "defined and not used" error
+ ls.err(fwd.Label.Pos(), "goto %s jumps into block starting at %s", name, l.parent.start)
+ } else {
+ ls.err(fwd.Label.Pos(), "label %s not defined", name)
+ }
+ }
+
+ // spec: "It is illegal to define a label that is never used."
+ for _, l := range ls.labels {
+ if !l.used {
+ l := l.lstmt.Label
+ ls.err(l.Pos(), "label %s defined and not used", l.Value)
+ }
+ }
+}
+
+type labelScope struct {
+ errh ErrorHandler
+ labels map[string]*label // all label declarations inside the function; allocated lazily
+}
+
+type label struct {
+ parent *block // block containing this label declaration
+ lstmt *LabeledStmt // statement declaring the label
+ used bool // whether the label is used or not
+}
+
+type block struct {
+ parent *block // immediately enclosing block, or nil
+ start Pos // start of block
+ lstmt *LabeledStmt // labeled statement associated with this block, or nil
+}
+
+func (ls *labelScope) err(pos Pos, format string, args ...interface{}) {
+ ls.errh(Error{pos, fmt.Sprintf(format, args...)})
+}
+
+// declare declares the label introduced by s in block b and returns
+// the new label. If the label was already declared, declare reports
+// and error and the existing label is returned instead.
+func (ls *labelScope) declare(b *block, s *LabeledStmt) *label {
+ name := s.Label.Value
+ labels := ls.labels
+ if labels == nil {
+ labels = make(map[string]*label)
+ ls.labels = labels
+ } else if alt := labels[name]; alt != nil {
+ ls.err(s.Label.Pos(), "label %s already defined at %s", name, alt.lstmt.Label.Pos().String())
+ return alt
+ }
+ l := &label{b, s, false}
+ labels[name] = l
+ return l
+}
+
+// gotoTarget returns the labeled statement matching the given name and
+// declared in block b or any of its enclosing blocks. The result is nil
+// if the label is not defined, or doesn't match a valid labeled statement.
+func (ls *labelScope) gotoTarget(b *block, name string) *LabeledStmt {
+ if l := ls.labels[name]; l != nil {
+ l.used = true // even if it's not a valid target
+ for ; b != nil; b = b.parent {
+ if l.parent == b {
+ return l.lstmt
+ }
+ }
+ }
+ return nil
+}
+
+var invalid = new(LabeledStmt) // singleton to signal invalid enclosing target
+
+// enclosingTarget returns the innermost enclosing labeled statement matching
+// the given name. The result is nil if the label is not defined, and invalid
+// if the label is defined but doesn't label a valid labeled statement.
+func (ls *labelScope) enclosingTarget(b *block, name string) *LabeledStmt {
+ if l := ls.labels[name]; l != nil {
+ l.used = true // even if it's not a valid target (see e.g., test/fixedbugs/bug136.go)
+ for ; b != nil; b = b.parent {
+ if l.lstmt == b.lstmt {
+ return l.lstmt
+ }
+ }
+ return invalid
+ }
+ return nil
+}
+
+// targets describes the target statements within which break
+// or continue statements are valid.
+type targets struct {
+ breaks Stmt // *ForStmt, *SwitchStmt, *SelectStmt, or nil
+ continues *ForStmt // or nil
+}
+
+// blockBranches processes a block's body starting at start and returns the
+// list of unresolved (forward) gotos. parent is the immediately enclosing
+// block (or nil), ctxt provides information about the enclosing statements,
+// and lstmt is the labeled statement associated with this block, or nil.
+func (ls *labelScope) blockBranches(parent *block, ctxt targets, lstmt *LabeledStmt, start Pos, body []Stmt) []*BranchStmt {
+ b := &block{parent: parent, start: start, lstmt: lstmt}
+
+ var varPos Pos
+ var varName Expr
+ var fwdGotos, badGotos []*BranchStmt
+
+ recordVarDecl := func(pos Pos, name Expr) {
+ varPos = pos
+ varName = name
+ // Any existing forward goto jumping over the variable
+ // declaration is invalid. The goto may still jump out
+ // of the block and be ok, but we don't know that yet.
+ // Remember all forward gotos as potential bad gotos.
+ badGotos = append(badGotos[:0], fwdGotos...)
+ }
+
+ jumpsOverVarDecl := func(fwd *BranchStmt) bool {
+ if varPos.IsKnown() {
+ for _, bad := range badGotos {
+ if fwd == bad {
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ innerBlock := func(ctxt targets, start Pos, body []Stmt) {
+ // Unresolved forward gotos from the inner block
+ // become forward gotos for the current block.
+ fwdGotos = append(fwdGotos, ls.blockBranches(b, ctxt, lstmt, start, body)...)
+ }
+
+ for _, stmt := range body {
+ lstmt = nil
+ L:
+ switch s := stmt.(type) {
+ case *DeclStmt:
+ for _, d := range s.DeclList {
+ if v, ok := d.(*VarDecl); ok {
+ recordVarDecl(v.Pos(), v.NameList[0])
+ break // the first VarDecl will do
+ }
+ }
+
+ case *LabeledStmt:
+ // declare non-blank label
+ if name := s.Label.Value; name != "_" {
+ l := ls.declare(b, s)
+ // resolve matching forward gotos
+ i := 0
+ for _, fwd := range fwdGotos {
+ if fwd.Label.Value == name {
+ fwd.Target = s
+ l.used = true
+ if jumpsOverVarDecl(fwd) {
+ ls.err(
+ fwd.Label.Pos(),
+ "goto %s jumps over declaration of %s at %s",
+ name, String(varName), varPos,
+ )
+ }
+ } else {
+ // no match - keep forward goto
+ fwdGotos[i] = fwd
+ i++
+ }
+ }
+ fwdGotos = fwdGotos[:i]
+ lstmt = s
+ }
+ // process labeled statement
+ stmt = s.Stmt
+ goto L
+
+ case *BranchStmt:
+ // unlabeled branch statement
+ if s.Label == nil {
+ switch s.Tok {
+ case _Break:
+ if t := ctxt.breaks; t != nil {
+ s.Target = t
+ } else {
+ ls.err(s.Pos(), "break is not in a loop, switch, or select")
+ }
+ case _Continue:
+ if t := ctxt.continues; t != nil {
+ s.Target = t
+ } else {
+ ls.err(s.Pos(), "continue is not in a loop")
+ }
+ case _Fallthrough:
+ // nothing to do
+ case _Goto:
+ fallthrough // should always have a label
+ default:
+ panic("invalid BranchStmt")
+ }
+ break
+ }
+
+ // labeled branch statement
+ name := s.Label.Value
+ switch s.Tok {
+ case _Break:
+ // spec: "If there is a label, it must be that of an enclosing
+ // "for", "switch", or "select" statement, and that is the one
+ // whose execution terminates."
+ if t := ls.enclosingTarget(b, name); t != nil {
+ switch t := t.Stmt.(type) {
+ case *SwitchStmt, *SelectStmt, *ForStmt:
+ s.Target = t
+ default:
+ ls.err(s.Label.Pos(), "invalid break label %s", name)
+ }
+ } else {
+ ls.err(s.Label.Pos(), "break label not defined: %s", name)
+ }
+
+ case _Continue:
+ // spec: "If there is a label, it must be that of an enclosing
+ // "for" statement, and that is the one whose execution advances."
+ if t := ls.enclosingTarget(b, name); t != nil {
+ if t, ok := t.Stmt.(*ForStmt); ok {
+ s.Target = t
+ } else {
+ ls.err(s.Label.Pos(), "invalid continue label %s", name)
+ }
+ } else {
+ ls.err(s.Label.Pos(), "continue label not defined: %s", name)
+ }
+
+ case _Goto:
+ if t := ls.gotoTarget(b, name); t != nil {
+ s.Target = t
+ } else {
+ // label may be declared later - add goto to forward gotos
+ fwdGotos = append(fwdGotos, s)
+ }
+
+ case _Fallthrough:
+ fallthrough // should never have a label
+ default:
+ panic("invalid BranchStmt")
+ }
+
+ case *AssignStmt:
+ if s.Op == Def {
+ recordVarDecl(s.Pos(), s.Lhs)
+ }
+
+ case *BlockStmt:
+ innerBlock(ctxt, s.Pos(), s.List)
+
+ case *IfStmt:
+ innerBlock(ctxt, s.Then.Pos(), s.Then.List)
+ if s.Else != nil {
+ innerBlock(ctxt, s.Else.Pos(), []Stmt{s.Else})
+ }
+
+ case *ForStmt:
+ innerBlock(targets{s, s}, s.Body.Pos(), s.Body.List)
+
+ case *SwitchStmt:
+ inner := targets{s, ctxt.continues}
+ for _, cc := range s.Body {
+ innerBlock(inner, cc.Pos(), cc.Body)
+ }
+
+ case *SelectStmt:
+ inner := targets{s, ctxt.continues}
+ for _, cc := range s.Body {
+ innerBlock(inner, cc.Pos(), cc.Body)
+ }
+ }
+ }
+
+ return fwdGotos
+}
diff --git a/src/cmd/compile/internal/syntax/dumper.go b/src/cmd/compile/internal/syntax/dumper.go
new file mode 100644
index 0000000..01453d5
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/dumper.go
@@ -0,0 +1,212 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements printing of syntax tree structures.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Fdump dumps the structure of the syntax tree rooted at n to w.
+// It is intended for debugging purposes; no specific output format
+// is guaranteed.
+func Fdump(w io.Writer, n Node) (err error) {
+ p := dumper{
+ output: w,
+ ptrmap: make(map[Node]int),
+ last: '\n', // force printing of line number on first line
+ }
+
+ defer func() {
+ if e := recover(); e != nil {
+ err = e.(localError).err // re-panics if it's not a localError
+ }
+ }()
+
+ if n == nil {
+ p.printf("nil\n")
+ return
+ }
+ p.dump(reflect.ValueOf(n), n)
+ p.printf("\n")
+
+ return
+}
+
+type dumper struct {
+ output io.Writer
+ ptrmap map[Node]int // node -> dump line number
+ indent int // current indentation level
+ last byte // last byte processed by Write
+ line int // current line number
+}
+
+var indentBytes = []byte(". ")
+
+func (p *dumper) Write(data []byte) (n int, err error) {
+ var m int
+ for i, b := range data {
+ // invariant: data[0:n] has been written
+ if b == '\n' {
+ m, err = p.output.Write(data[n : i+1])
+ n += m
+ if err != nil {
+ return
+ }
+ } else if p.last == '\n' {
+ p.line++
+ _, err = fmt.Fprintf(p.output, "%6d ", p.line)
+ if err != nil {
+ return
+ }
+ for j := p.indent; j > 0; j-- {
+ _, err = p.output.Write(indentBytes)
+ if err != nil {
+ return
+ }
+ }
+ }
+ p.last = b
+ }
+ if len(data) > n {
+ m, err = p.output.Write(data[n:])
+ n += m
+ }
+ return
+}
+
+// localError wraps locally caught errors so we can distinguish
+// them from genuine panics which we don't want to return as errors.
+type localError struct {
+ err error
+}
+
+// printf is a convenience wrapper that takes care of print errors.
+func (p *dumper) printf(format string, args ...interface{}) {
+ if _, err := fmt.Fprintf(p, format, args...); err != nil {
+ panic(localError{err})
+ }
+}
+
+// dump prints the contents of x.
+// If x is the reflect.Value of a struct s, where &s
+// implements Node, then &s should be passed for n -
+// this permits printing of the unexported span and
+// comments fields of the embedded isNode field by
+// calling the Span() and Comment() instead of using
+// reflection.
+func (p *dumper) dump(x reflect.Value, n Node) {
+ switch x.Kind() {
+ case reflect.Interface:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.dump(x.Elem(), nil)
+
+ case reflect.Ptr:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+
+ // special cases for identifiers w/o attached comments (common case)
+ if x, ok := x.Interface().(*Name); ok {
+ p.printf("%s @ %v", x.Value, x.Pos())
+ return
+ }
+
+ p.printf("*")
+ // Fields may share type expressions, and declarations
+ // may share the same group - use ptrmap to keep track
+ // of nodes that have been printed already.
+ if ptr, ok := x.Interface().(Node); ok {
+ if line, exists := p.ptrmap[ptr]; exists {
+ p.printf("(Node @ %d)", line)
+ return
+ }
+ p.ptrmap[ptr] = p.line
+ n = ptr
+ }
+ p.dump(x.Elem(), n)
+
+ case reflect.Slice:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.printf("%s (%d entries) {", x.Type(), x.Len())
+ if x.Len() > 0 {
+ p.indent++
+ p.printf("\n")
+ for i, n := 0, x.Len(); i < n; i++ {
+ p.printf("%d: ", i)
+ p.dump(x.Index(i), nil)
+ p.printf("\n")
+ }
+ p.indent--
+ }
+ p.printf("}")
+
+ case reflect.Struct:
+ typ := x.Type()
+
+ // if span, ok := x.Interface().(lexical.Span); ok {
+ // p.printf("%s", &span)
+ // return
+ // }
+
+ p.printf("%s {", typ)
+ p.indent++
+
+ first := true
+ if n != nil {
+ p.printf("\n")
+ first = false
+ // p.printf("Span: %s\n", n.Span())
+ // if c := *n.Comments(); c != nil {
+ // p.printf("Comments: ")
+ // p.dump(reflect.ValueOf(c), nil) // a Comment is not a Node
+ // p.printf("\n")
+ // }
+ }
+
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ // Exclude non-exported fields because their
+ // values cannot be accessed via reflection.
+ if name := typ.Field(i).Name; isExported(name) {
+ if first {
+ p.printf("\n")
+ first = false
+ }
+ p.printf("%s: ", name)
+ p.dump(x.Field(i), nil)
+ p.printf("\n")
+ }
+ }
+
+ p.indent--
+ p.printf("}")
+
+ default:
+ switch x := x.Interface().(type) {
+ case string:
+ // print strings in quotes
+ p.printf("%q", x)
+ default:
+ p.printf("%v", x)
+ }
+ }
+}
+
+func isExported(name string) bool {
+ ch, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(ch)
+}
diff --git a/src/cmd/compile/internal/syntax/dumper_test.go b/src/cmd/compile/internal/syntax/dumper_test.go
new file mode 100644
index 0000000..f84bd2d
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/dumper_test.go
@@ -0,0 +1,25 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "testing"
+)
+
+func TestDump(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
+ // provide a dummy error handler so parsing doesn't stop after first error
+ ast, err := ParseFile(*src_, func(error) {}, nil, CheckBranches)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if ast != nil {
+ Fdump(testOut(), ast)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/error_test.go b/src/cmd/compile/internal/syntax/error_test.go
new file mode 100644
index 0000000..72b1ad6
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/error_test.go
@@ -0,0 +1,191 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a regression test harness for syntax errors.
+// The files in the testdata directory are parsed and the reported
+// errors are compared against the errors declared in those files.
+//
+// Errors are declared in place in the form of "error comments",
+// just before (or on the same line as) the offending token.
+//
+// Error comments must be of the form // ERROR rx or /* ERROR rx */
+// where rx is a regular expression that matches the reported error
+// message. The rx text comprises the comment text after "ERROR ",
+// with any white space around it stripped.
+//
+// If the line comment form is used, the reported error's line must
+// match the line of the error comment.
+//
+// If the regular comment form is used, the reported error's position
+// must match the position of the token immediately following the
+// error comment. Thus, /* ERROR ... */ comments should appear
+// immediately before the position where the error is reported.
+//
+// Currently, the test harness only supports one error comment per
+// token. If multiple error comments appear before a token, only
+// the last one is considered.
+
+package syntax
+
+import (
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+ "testing"
+)
+
+const testdata = "testdata" // directory containing test files
+
+var print = flag.Bool("print", false, "only print errors")
+
+// A position represents a source position in the current file.
+type position struct {
+ line, col uint
+}
+
+func (pos position) String() string {
+ return fmt.Sprintf("%d:%d", pos.line, pos.col)
+}
+
+func sortedPositions(m map[position]string) []position {
+ list := make([]position, len(m))
+ i := 0
+ for pos := range m {
+ list[i] = pos
+ i++
+ }
+ sort.Slice(list, func(i, j int) bool {
+ a, b := list[i], list[j]
+ return a.line < b.line || a.line == b.line && a.col < b.col
+ })
+ return list
+}
+
+// declaredErrors returns a map of source positions to error
+// patterns, extracted from error comments in the given file.
+// Error comments in the form of line comments use col = 0
+// in their position.
+func declaredErrors(t *testing.T, filename string) map[position]string {
+ f, err := os.Open(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+
+ declared := make(map[position]string)
+
+ var s scanner
+ var pattern string
+ s.init(f, func(line, col uint, msg string) {
+ // errors never start with '/' so they are automatically excluded here
+ switch {
+ case strings.HasPrefix(msg, "// ERROR "):
+ // we can't have another comment on the same line - just add it
+ declared[position{s.line, 0}] = strings.TrimSpace(msg[9:])
+ case strings.HasPrefix(msg, "/* ERROR "):
+ // we may have more comments before the next token - collect them
+ pattern = strings.TrimSpace(msg[9 : len(msg)-2])
+ }
+ }, comments)
+
+ // consume file
+ for {
+ s.next()
+ if pattern != "" {
+ declared[position{s.line, s.col}] = pattern
+ pattern = ""
+ }
+ if s.tok == _EOF {
+ break
+ }
+ }
+
+ return declared
+}
+
+func testSyntaxErrors(t *testing.T, filename string) {
+ declared := declaredErrors(t, filename)
+ if *print {
+ fmt.Println("Declared errors:")
+ for _, pos := range sortedPositions(declared) {
+ fmt.Printf("%s:%s: %s\n", filename, pos, declared[pos])
+ }
+
+ fmt.Println()
+ fmt.Println("Reported errors:")
+ }
+
+ f, err := os.Open(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+
+ ParseFile(filename, func(err error) {
+ e, ok := err.(Error)
+ if !ok {
+ return
+ }
+
+ if *print {
+ fmt.Println(err)
+ return
+ }
+
+ orig := position{e.Pos.Line(), e.Pos.Col()}
+ pos := orig
+ pattern, found := declared[pos]
+ if !found {
+ // try line comment (only line must match)
+ pos = position{e.Pos.Line(), 0}
+ pattern, found = declared[pos]
+ }
+ if found {
+ rx, err := regexp.Compile(pattern)
+ if err != nil {
+ t.Errorf("%s: %v", pos, err)
+ return
+ }
+ if match := rx.MatchString(e.Msg); !match {
+ t.Errorf("%s: %q does not match %q", pos, e.Msg, pattern)
+ return
+ }
+ // we have a match - eliminate this error
+ delete(declared, pos)
+ } else {
+ t.Errorf("%s: unexpected error: %s", orig, e.Msg)
+ }
+ }, nil, 0)
+
+ if *print {
+ fmt.Println()
+ return // we're done
+ }
+
+ // report expected but not reported errors
+ for pos, pattern := range declared {
+ t.Errorf("%s: missing error: %s", pos, pattern)
+ }
+}
+
+func TestSyntaxErrors(t *testing.T) {
+ testenv.MustHaveGoBuild(t) // we need access to source (testdata)
+
+ list, err := ioutil.ReadDir(testdata)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, fi := range list {
+ name := fi.Name()
+ if !fi.IsDir() && !strings.HasPrefix(name, ".") {
+ testSyntaxErrors(t, filepath.Join(testdata, name))
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/nodes.go b/src/cmd/compile/internal/syntax/nodes.go
new file mode 100644
index 0000000..815630f
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/nodes.go
@@ -0,0 +1,469 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+// ----------------------------------------------------------------------------
+// Nodes
+
+type Node interface {
+ // Pos() returns the position associated with the node as follows:
+ // 1) The position of a node representing a terminal syntax production
+ // (Name, BasicLit, etc.) is the position of the respective production
+ // in the source.
+ // 2) The position of a node representing a non-terminal production
+ // (IndexExpr, IfStmt, etc.) is the position of a token uniquely
+ // associated with that production; usually the left-most one
+ // ('[' for IndexExpr, 'if' for IfStmt, etc.)
+ Pos() Pos
+ aNode()
+}
+
+type node struct {
+ // commented out for now since not yet used
+ // doc *Comment // nil means no comment(s) attached
+ pos Pos
+}
+
+func (n *node) Pos() Pos { return n.pos }
+func (*node) aNode() {}
+
+// ----------------------------------------------------------------------------
+// Files
+
+// package PkgName; DeclList[0], DeclList[1], ...
+type File struct {
+ Pragma Pragma
+ PkgName *Name
+ DeclList []Decl
+ Lines uint
+ node
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+type (
+ Decl interface {
+ Node
+ aDecl()
+ }
+
+ // Path
+ // LocalPkgName Path
+ ImportDecl struct {
+ Group *Group // nil means not part of a group
+ Pragma Pragma
+ LocalPkgName *Name // including "."; nil means no rename present
+ Path *BasicLit
+ decl
+ }
+
+ // NameList
+ // NameList = Values
+ // NameList Type = Values
+ ConstDecl struct {
+ Group *Group // nil means not part of a group
+ Pragma Pragma
+ NameList []*Name
+ Type Expr // nil means no type
+ Values Expr // nil means no values
+ decl
+ }
+
+ // Name Type
+ TypeDecl struct {
+ Group *Group // nil means not part of a group
+ Pragma Pragma
+ Name *Name
+ Alias bool
+ Type Expr
+ decl
+ }
+
+ // NameList Type
+ // NameList Type = Values
+ // NameList = Values
+ VarDecl struct {
+ Group *Group // nil means not part of a group
+ Pragma Pragma
+ NameList []*Name
+ Type Expr // nil means no type
+ Values Expr // nil means no values
+ decl
+ }
+
+ // func Name Type { Body }
+ // func Name Type
+ // func Receiver Name Type { Body }
+ // func Receiver Name Type
+ FuncDecl struct {
+ Pragma Pragma
+ Recv *Field // nil means regular function
+ Name *Name
+ Type *FuncType
+ Body *BlockStmt // nil means no body (forward declaration)
+ decl
+ }
+)
+
+type decl struct{ node }
+
+func (*decl) aDecl() {}
+
+// All declarations belonging to the same group point to the same Group node.
+type Group struct {
+ dummy int // not empty so we are guaranteed different Group instances
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+type (
+ Expr interface {
+ Node
+ aExpr()
+ }
+
+ // Placeholder for an expression that failed to parse
+ // correctly and where we can't provide a better node.
+ BadExpr struct {
+ expr
+ }
+
+ // Value
+ Name struct {
+ Value string
+ expr
+ }
+
+ // Value
+ BasicLit struct {
+ Value string
+ Kind LitKind
+ Bad bool // true means the literal Value has syntax errors
+ expr
+ }
+
+ // Type { ElemList[0], ElemList[1], ... }
+ CompositeLit struct {
+ Type Expr // nil means no literal type
+ ElemList []Expr
+ NKeys int // number of elements with keys
+ Rbrace Pos
+ expr
+ }
+
+ // Key: Value
+ KeyValueExpr struct {
+ Key, Value Expr
+ expr
+ }
+
+ // func Type { Body }
+ FuncLit struct {
+ Type *FuncType
+ Body *BlockStmt
+ expr
+ }
+
+ // (X)
+ ParenExpr struct {
+ X Expr
+ expr
+ }
+
+ // X.Sel
+ SelectorExpr struct {
+ X Expr
+ Sel *Name
+ expr
+ }
+
+ // X[Index]
+ IndexExpr struct {
+ X Expr
+ Index Expr
+ expr
+ }
+
+ // X[Index[0] : Index[1] : Index[2]]
+ SliceExpr struct {
+ X Expr
+ Index [3]Expr
+ // Full indicates whether this is a simple or full slice expression.
+ // In a valid AST, this is equivalent to Index[2] != nil.
+ // TODO(mdempsky): This is only needed to report the "3-index
+ // slice of string" error when Index[2] is missing.
+ Full bool
+ expr
+ }
+
+ // X.(Type)
+ AssertExpr struct {
+ X Expr
+ Type Expr
+ expr
+ }
+
+ // X.(type)
+ // Lhs := X.(type)
+ TypeSwitchGuard struct {
+ Lhs *Name // nil means no Lhs :=
+ X Expr // X.(type)
+ expr
+ }
+
+ Operation struct {
+ Op Operator
+ X, Y Expr // Y == nil means unary expression
+ expr
+ }
+
+ // Fun(ArgList[0], ArgList[1], ...)
+ CallExpr struct {
+ Fun Expr
+ ArgList []Expr // nil means no arguments
+ HasDots bool // last argument is followed by ...
+ expr
+ }
+
+ // ElemList[0], ElemList[1], ...
+ ListExpr struct {
+ ElemList []Expr
+ expr
+ }
+
+ // [Len]Elem
+ ArrayType struct {
+ // TODO(gri) consider using Name{"..."} instead of nil (permits attaching of comments)
+ Len Expr // nil means Len is ...
+ Elem Expr
+ expr
+ }
+
+ // []Elem
+ SliceType struct {
+ Elem Expr
+ expr
+ }
+
+ // ...Elem
+ DotsType struct {
+ Elem Expr
+ expr
+ }
+
+ // struct { FieldList[0] TagList[0]; FieldList[1] TagList[1]; ... }
+ StructType struct {
+ FieldList []*Field
+ TagList []*BasicLit // i >= len(TagList) || TagList[i] == nil means no tag for field i
+ expr
+ }
+
+ // Name Type
+ // Type
+ Field struct {
+ Name *Name // nil means anonymous field/parameter (structs/parameters), or embedded interface (interfaces)
+ Type Expr // field names declared in a list share the same Type (identical pointers)
+ node
+ }
+
+ // interface { MethodList[0]; MethodList[1]; ... }
+ InterfaceType struct {
+ MethodList []*Field
+ expr
+ }
+
+ FuncType struct {
+ ParamList []*Field
+ ResultList []*Field
+ expr
+ }
+
+ // map[Key]Value
+ MapType struct {
+ Key, Value Expr
+ expr
+ }
+
+ // chan Elem
+ // <-chan Elem
+ // chan<- Elem
+ ChanType struct {
+ Dir ChanDir // 0 means no direction
+ Elem Expr
+ expr
+ }
+)
+
+type expr struct{ node }
+
+func (*expr) aExpr() {}
+
+type ChanDir uint
+
+const (
+ _ ChanDir = iota
+ SendOnly
+ RecvOnly
+)
+
+// ----------------------------------------------------------------------------
+// Statements
+
+type (
+ Stmt interface {
+ Node
+ aStmt()
+ }
+
+ SimpleStmt interface {
+ Stmt
+ aSimpleStmt()
+ }
+
+ EmptyStmt struct {
+ simpleStmt
+ }
+
+ LabeledStmt struct {
+ Label *Name
+ Stmt Stmt
+ stmt
+ }
+
+ BlockStmt struct {
+ List []Stmt
+ Rbrace Pos
+ stmt
+ }
+
+ ExprStmt struct {
+ X Expr
+ simpleStmt
+ }
+
+ SendStmt struct {
+ Chan, Value Expr // Chan <- Value
+ simpleStmt
+ }
+
+ DeclStmt struct {
+ DeclList []Decl
+ stmt
+ }
+
+ AssignStmt struct {
+ Op Operator // 0 means no operation
+ Lhs, Rhs Expr // Rhs == ImplicitOne means Lhs++ (Op == Add) or Lhs-- (Op == Sub)
+ simpleStmt
+ }
+
+ BranchStmt struct {
+ Tok token // Break, Continue, Fallthrough, or Goto
+ Label *Name
+ // Target is the continuation of the control flow after executing
+ // the branch; it is computed by the parser if CheckBranches is set.
+ // Target is a *LabeledStmt for gotos, and a *SwitchStmt, *SelectStmt,
+ // or *ForStmt for breaks and continues, depending on the context of
+ // the branch. Target is not set for fallthroughs.
+ Target Stmt
+ stmt
+ }
+
+ CallStmt struct {
+ Tok token // Go or Defer
+ Call *CallExpr
+ stmt
+ }
+
+ ReturnStmt struct {
+ Results Expr // nil means no explicit return values
+ stmt
+ }
+
+ IfStmt struct {
+ Init SimpleStmt
+ Cond Expr
+ Then *BlockStmt
+ Else Stmt // either nil, *IfStmt, or *BlockStmt
+ stmt
+ }
+
+ ForStmt struct {
+ Init SimpleStmt // incl. *RangeClause
+ Cond Expr
+ Post SimpleStmt
+ Body *BlockStmt
+ stmt
+ }
+
+ SwitchStmt struct {
+ Init SimpleStmt
+ Tag Expr // incl. *TypeSwitchGuard
+ Body []*CaseClause
+ Rbrace Pos
+ stmt
+ }
+
+ SelectStmt struct {
+ Body []*CommClause
+ Rbrace Pos
+ stmt
+ }
+)
+
+type (
+ RangeClause struct {
+ Lhs Expr // nil means no Lhs = or Lhs :=
+ Def bool // means :=
+ X Expr // range X
+ simpleStmt
+ }
+
+ CaseClause struct {
+ Cases Expr // nil means default clause
+ Body []Stmt
+ Colon Pos
+ node
+ }
+
+ CommClause struct {
+ Comm SimpleStmt // send or receive stmt; nil means default clause
+ Body []Stmt
+ Colon Pos
+ node
+ }
+)
+
+type stmt struct{ node }
+
+func (stmt) aStmt() {}
+
+type simpleStmt struct {
+ stmt
+}
+
+func (simpleStmt) aSimpleStmt() {}
+
+// ----------------------------------------------------------------------------
+// Comments
+
+// TODO(gri) Consider renaming to CommentPos, CommentPlacement, etc.
+// Kind = Above doesn't make much sense.
+type CommentKind uint
+
+const (
+ Above CommentKind = iota
+ Below
+ Left
+ Right
+)
+
+type Comment struct {
+ Kind CommentKind
+ Text string
+ Next *Comment
+}
diff --git a/src/cmd/compile/internal/syntax/nodes_test.go b/src/cmd/compile/internal/syntax/nodes_test.go
new file mode 100644
index 0000000..a39f08c
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/nodes_test.go
@@ -0,0 +1,329 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+)
+
+// A test is a source code snippet of a particular node type.
+// In the snippet, a '@' indicates the position recorded by
+// the parser when creating the respective node.
+type test struct {
+ nodetyp string
+ snippet string
+}
+
+var decls = []test{
+ // The position of declarations is always the
+ // position of the first token of an individual
+ // declaration, independent of grouping.
+ {"ImportDecl", `import @"math"`},
+ {"ImportDecl", `import @mymath "math"`},
+ {"ImportDecl", `import @. "math"`},
+ {"ImportDecl", `import (@"math")`},
+ {"ImportDecl", `import (@mymath "math")`},
+ {"ImportDecl", `import (@. "math")`},
+
+ {"ConstDecl", `const @x`},
+ {"ConstDecl", `const @x = 0`},
+ {"ConstDecl", `const @x, y, z = 0, 1, 2`},
+ {"ConstDecl", `const (@x)`},
+ {"ConstDecl", `const (@x = 0)`},
+ {"ConstDecl", `const (@x, y, z = 0, 1, 2)`},
+
+ {"TypeDecl", `type @T int`},
+ {"TypeDecl", `type @T = int`},
+ {"TypeDecl", `type (@T int)`},
+ {"TypeDecl", `type (@T = int)`},
+
+ {"VarDecl", `var @x int`},
+ {"VarDecl", `var @x, y, z int`},
+ {"VarDecl", `var @x int = 0`},
+ {"VarDecl", `var @x, y, z int = 1, 2, 3`},
+ {"VarDecl", `var @x = 0`},
+ {"VarDecl", `var @x, y, z = 1, 2, 3`},
+ {"VarDecl", `var (@x int)`},
+ {"VarDecl", `var (@x, y, z int)`},
+ {"VarDecl", `var (@x int = 0)`},
+ {"VarDecl", `var (@x, y, z int = 1, 2, 3)`},
+ {"VarDecl", `var (@x = 0)`},
+ {"VarDecl", `var (@x, y, z = 1, 2, 3)`},
+
+ {"FuncDecl", `func @f() {}`},
+ {"FuncDecl", `func @(T) f() {}`},
+ {"FuncDecl", `func @(x T) f() {}`},
+}
+
+var exprs = []test{
+ // The position of an expression is the position
+ // of the left-most token that identifies the
+ // kind of expression.
+ {"Name", `@x`},
+
+ {"BasicLit", `@0`},
+ {"BasicLit", `@0x123`},
+ {"BasicLit", `@3.1415`},
+ {"BasicLit", `@.2718`},
+ {"BasicLit", `@1i`},
+ {"BasicLit", `@'a'`},
+ {"BasicLit", `@"abc"`},
+ {"BasicLit", "@`abc`"},
+
+ {"CompositeLit", `@{}`},
+ {"CompositeLit", `T@{}`},
+ {"CompositeLit", `struct{x, y int}@{}`},
+
+ {"KeyValueExpr", `"foo"@: true`},
+ {"KeyValueExpr", `"a"@: b`},
+
+ {"FuncLit", `@func (){}`},
+ {"ParenExpr", `@(x)`},
+ {"SelectorExpr", `a@.b`},
+ {"IndexExpr", `a@[i]`},
+
+ {"SliceExpr", `a@[:]`},
+ {"SliceExpr", `a@[i:]`},
+ {"SliceExpr", `a@[:j]`},
+ {"SliceExpr", `a@[i:j]`},
+ {"SliceExpr", `a@[i:j:k]`},
+
+ {"AssertExpr", `x@.(T)`},
+
+ {"Operation", `@*b`},
+ {"Operation", `@+b`},
+ {"Operation", `@-b`},
+ {"Operation", `@!b`},
+ {"Operation", `@^b`},
+ {"Operation", `@&b`},
+ {"Operation", `@<-b`},
+
+ {"Operation", `a @|| b`},
+ {"Operation", `a @&& b`},
+ {"Operation", `a @== b`},
+ {"Operation", `a @+ b`},
+ {"Operation", `a @* b`},
+
+ {"CallExpr", `f@()`},
+ {"CallExpr", `f@(x, y, z)`},
+ {"CallExpr", `obj.f@(1, 2, 3)`},
+ {"CallExpr", `func(x int) int { return x + 1 }@(y)`},
+
+ // ListExpr: tested via multi-value const/var declarations
+}
+
+var types = []test{
+ {"Operation", `@*T`},
+ {"Operation", `@*struct{}`},
+
+ {"ArrayType", `@[10]T`},
+ {"ArrayType", `@[...]T`},
+
+ {"SliceType", `@[]T`},
+ {"DotsType", `@...T`},
+ {"StructType", `@struct{}`},
+ {"InterfaceType", `@interface{}`},
+ {"FuncType", `func@()`},
+ {"MapType", `@map[T]T`},
+
+ {"ChanType", `@chan T`},
+ {"ChanType", `@chan<- T`},
+ {"ChanType", `@<-chan T`},
+}
+
+var fields = []test{
+ {"Field", `@T`},
+ {"Field", `@(T)`},
+ {"Field", `@x T`},
+ {"Field", `@x *(T)`},
+ {"Field", `@x, y, z T`},
+ {"Field", `@x, y, z (*T)`},
+}
+
+var stmts = []test{
+ {"EmptyStmt", `@`},
+
+ {"LabeledStmt", `L@:`},
+ {"LabeledStmt", `L@: ;`},
+ {"LabeledStmt", `L@: f()`},
+
+ {"BlockStmt", `@{}`},
+
+ // The position of an ExprStmt is the position of the expression.
+ {"ExprStmt", `@<-ch`},
+ {"ExprStmt", `f@()`},
+ {"ExprStmt", `append@(s, 1, 2, 3)`},
+
+ {"SendStmt", `ch @<- x`},
+
+ {"DeclStmt", `@const x = 0`},
+ {"DeclStmt", `@const (x = 0)`},
+ {"DeclStmt", `@type T int`},
+ {"DeclStmt", `@type T = int`},
+ {"DeclStmt", `@type (T1 = int; T2 = float32)`},
+ {"DeclStmt", `@var x = 0`},
+ {"DeclStmt", `@var x, y, z int`},
+ {"DeclStmt", `@var (a, b = 1, 2)`},
+
+ {"AssignStmt", `x @= y`},
+ {"AssignStmt", `a, b, x @= 1, 2, 3`},
+ {"AssignStmt", `x @+= y`},
+ {"AssignStmt", `x @:= y`},
+ {"AssignStmt", `x, ok @:= f()`},
+ {"AssignStmt", `x@++`},
+ {"AssignStmt", `a[i]@--`},
+
+ {"BranchStmt", `@break`},
+ {"BranchStmt", `@break L`},
+ {"BranchStmt", `@continue`},
+ {"BranchStmt", `@continue L`},
+ {"BranchStmt", `@fallthrough`},
+ {"BranchStmt", `@goto L`},
+
+ {"CallStmt", `@defer f()`},
+ {"CallStmt", `@go f()`},
+
+ {"ReturnStmt", `@return`},
+ {"ReturnStmt", `@return x`},
+ {"ReturnStmt", `@return a, b, a + b*f(1, 2, 3)`},
+
+ {"IfStmt", `@if cond {}`},
+ {"IfStmt", `@if cond { f() } else {}`},
+ {"IfStmt", `@if cond { f() } else { g(); h() }`},
+ {"ForStmt", `@for {}`},
+ {"ForStmt", `@for { f() }`},
+ {"SwitchStmt", `@switch {}`},
+ {"SwitchStmt", `@switch { default: }`},
+ {"SwitchStmt", `@switch { default: x++ }`},
+ {"SelectStmt", `@select {}`},
+ {"SelectStmt", `@select { default: }`},
+ {"SelectStmt", `@select { default: ch <- false }`},
+}
+
+var ranges = []test{
+ {"RangeClause", `@range s`},
+ {"RangeClause", `i = @range s`},
+ {"RangeClause", `i := @range s`},
+ {"RangeClause", `_, x = @range s`},
+ {"RangeClause", `i, x = @range s`},
+ {"RangeClause", `_, x := @range s.f`},
+ {"RangeClause", `i, x := @range f(i)`},
+}
+
+var guards = []test{
+ {"TypeSwitchGuard", `x@.(type)`},
+ {"TypeSwitchGuard", `x := x@.(type)`},
+}
+
+var cases = []test{
+ {"CaseClause", `@case x:`},
+ {"CaseClause", `@case x, y, z:`},
+ {"CaseClause", `@case x == 1, y == 2:`},
+ {"CaseClause", `@default:`},
+}
+
+var comms = []test{
+ {"CommClause", `@case <-ch:`},
+ {"CommClause", `@case x <- ch:`},
+ {"CommClause", `@case x = <-ch:`},
+ {"CommClause", `@case x := <-ch:`},
+ {"CommClause", `@case x, ok = <-ch: f(1, 2, 3)`},
+ {"CommClause", `@case x, ok := <-ch: x++`},
+ {"CommClause", `@default:`},
+ {"CommClause", `@default: ch <- true`},
+}
+
+func TestPos(t *testing.T) {
+ // TODO(gri) Once we have a general tree walker, we can use that to find
+ // the first occurrence of the respective node and we don't need to hand-
+ // extract the node for each specific kind of construct.
+
+ testPos(t, decls, "package p; ", "",
+ func(f *File) Node { return f.DeclList[0] },
+ )
+
+ // embed expressions in a composite literal so we can test key:value and naked composite literals
+ testPos(t, exprs, "package p; var _ = T{ ", " }",
+ func(f *File) Node { return f.DeclList[0].(*VarDecl).Values.(*CompositeLit).ElemList[0] },
+ )
+
+ // embed types in a function signature so we can test ... types
+ testPos(t, types, "package p; func f(", ")",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Type.ParamList[0].Type },
+ )
+
+ testPos(t, fields, "package p; func f(", ")",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Type.ParamList[0] },
+ )
+
+ testPos(t, stmts, "package p; func _() { ", "; }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0] },
+ )
+
+ testPos(t, ranges, "package p; func _() { for ", " {} }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*ForStmt).Init.(*RangeClause) },
+ )
+
+ testPos(t, guards, "package p; func _() { switch ", " {} }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SwitchStmt).Tag.(*TypeSwitchGuard) },
+ )
+
+ testPos(t, cases, "package p; func _() { switch { ", " } }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SwitchStmt).Body[0] },
+ )
+
+ testPos(t, comms, "package p; func _() { select { ", " } }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SelectStmt).Body[0] },
+ )
+}
+
+func testPos(t *testing.T, list []test, prefix, suffix string, extract func(*File) Node) {
+ for _, test := range list {
+ // complete source, compute @ position, and strip @ from source
+ src, index := stripAt(prefix + test.snippet + suffix)
+ if index < 0 {
+ t.Errorf("missing @: %s (%s)", src, test.nodetyp)
+ continue
+ }
+
+ // build syntax tree
+ file, err := Parse(nil, strings.NewReader(src), nil, nil, 0)
+ if err != nil {
+ t.Errorf("parse error: %s: %v (%s)", src, err, test.nodetyp)
+ continue
+ }
+
+ // extract desired node
+ node := extract(file)
+ if typ := typeOf(node); typ != test.nodetyp {
+ t.Errorf("type error: %s: type = %s, want %s", src, typ, test.nodetyp)
+ continue
+ }
+
+ // verify node position with expected position as indicated by @
+ if pos := int(node.Pos().Col()); pos != index+colbase {
+ t.Errorf("pos error: %s: pos = %d, want %d (%s)", src, pos, index+colbase, test.nodetyp)
+ continue
+ }
+ }
+}
+
+func stripAt(s string) (string, int) {
+ if i := strings.Index(s, "@"); i >= 0 {
+ return s[:i] + s[i+1:], i
+ }
+ return s, -1
+}
+
+func typeOf(n Node) string {
+ const prefix = "*syntax."
+ k := fmt.Sprintf("%T", n)
+ if strings.HasPrefix(k, prefix) {
+ return k[len(prefix):]
+ }
+ return k
+}
diff --git a/src/cmd/compile/internal/syntax/operator_string.go b/src/cmd/compile/internal/syntax/operator_string.go
new file mode 100644
index 0000000..3c759b2
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/operator_string.go
@@ -0,0 +1,17 @@
+// Code generated by "stringer -type Operator -linecomment"; DO NOT EDIT.
+
+package syntax
+
+import "strconv"
+
+const _Operator_name = ":!<-||&&==!=<<=>>=+-|^*/%&&^<<>>"
+
+var _Operator_index = [...]uint8{0, 1, 2, 4, 6, 8, 10, 12, 13, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 30, 32}
+
+func (i Operator) String() string {
+ i -= 1
+ if i >= Operator(len(_Operator_index)-1) {
+ return "Operator(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _Operator_name[_Operator_index[i]:_Operator_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go
new file mode 100644
index 0000000..1485b70
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/parser.go
@@ -0,0 +1,2322 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+const debug = false
+const trace = false
+
+type parser struct {
+ file *PosBase
+ errh ErrorHandler
+ mode Mode
+ pragh PragmaHandler
+ scanner
+
+ base *PosBase // current position base
+ first error // first error encountered
+ errcnt int // number of errors encountered
+ pragma Pragma // pragmas
+
+ fnest int // function nesting level (for error handling)
+ xnest int // expression nesting level (for complit ambiguity resolution)
+ indent []byte // tracing support
+}
+
+func (p *parser) init(file *PosBase, r io.Reader, errh ErrorHandler, pragh PragmaHandler, mode Mode) {
+ p.file = file
+ p.errh = errh
+ p.mode = mode
+ p.pragh = pragh
+ p.scanner.init(
+ r,
+ // Error and directive handler for scanner.
+ // Because the (line, col) positions passed to the
+ // handler is always at or after the current reading
+ // position, it is safe to use the most recent position
+ // base to compute the corresponding Pos value.
+ func(line, col uint, msg string) {
+ if msg[0] != '/' {
+ p.errorAt(p.posAt(line, col), msg)
+ return
+ }
+
+ // otherwise it must be a comment containing a line or go: directive.
+ // //line directives must be at the start of the line (column colbase).
+ // /*line*/ directives can be anywhere in the line.
+ text := commentText(msg)
+ if (col == colbase || msg[1] == '*') && strings.HasPrefix(text, "line ") {
+ var pos Pos // position immediately following the comment
+ if msg[1] == '/' {
+ // line comment (newline is part of the comment)
+ pos = MakePos(p.file, line+1, colbase)
+ } else {
+ // regular comment
+ // (if the comment spans multiple lines it's not
+ // a valid line directive and will be discarded
+ // by updateBase)
+ pos = MakePos(p.file, line, col+uint(len(msg)))
+ }
+ p.updateBase(pos, line, col+2+5, text[5:]) // +2 to skip over // or /*
+ return
+ }
+
+ // go: directive (but be conservative and test)
+ if pragh != nil && strings.HasPrefix(text, "go:") {
+ p.pragma = pragh(p.posAt(line, col+2), p.scanner.blank, text, p.pragma) // +2 to skip over // or /*
+ }
+ },
+ directives,
+ )
+
+ p.base = file
+ p.first = nil
+ p.errcnt = 0
+ p.pragma = nil
+
+ p.fnest = 0
+ p.xnest = 0
+ p.indent = nil
+}
+
+// takePragma returns the current parsed pragmas
+// and clears them from the parser state.
+func (p *parser) takePragma() Pragma {
+ prag := p.pragma
+ p.pragma = nil
+ return prag
+}
+
+// clearPragma is called at the end of a statement or
+// other Go form that does NOT accept a pragma.
+// It sends the pragma back to the pragma handler
+// to be reported as unused.
+func (p *parser) clearPragma() {
+ if p.pragma != nil {
+ p.pragh(p.pos(), p.scanner.blank, "", p.pragma)
+ p.pragma = nil
+ }
+}
+
+// updateBase sets the current position base to a new line base at pos.
+// The base's filename, line, and column values are extracted from text
+// which is positioned at (tline, tcol) (only needed for error messages).
+func (p *parser) updateBase(pos Pos, tline, tcol uint, text string) {
+ i, n, ok := trailingDigits(text)
+ if i == 0 {
+ return // ignore (not a line directive)
+ }
+ // i > 0
+
+ if !ok {
+ // text has a suffix :xxx but xxx is not a number
+ p.errorAt(p.posAt(tline, tcol+i), "invalid line number: "+text[i:])
+ return
+ }
+
+ var line, col uint
+ i2, n2, ok2 := trailingDigits(text[:i-1])
+ if ok2 {
+ //line filename:line:col
+ i, i2 = i2, i
+ line, col = n2, n
+ if col == 0 || col > PosMax {
+ p.errorAt(p.posAt(tline, tcol+i2), "invalid column number: "+text[i2:])
+ return
+ }
+ text = text[:i2-1] // lop off ":col"
+ } else {
+ //line filename:line
+ line = n
+ }
+
+ if line == 0 || line > PosMax {
+ p.errorAt(p.posAt(tline, tcol+i), "invalid line number: "+text[i:])
+ return
+ }
+
+ // If we have a column (//line filename:line:col form),
+ // an empty filename means to use the previous filename.
+ filename := text[:i-1] // lop off ":line"
+ if filename == "" && ok2 {
+ filename = p.base.Filename()
+ }
+
+ p.base = NewLineBase(pos, filename, line, col)
+}
+
+func commentText(s string) string {
+ if s[:2] == "/*" {
+ return s[2 : len(s)-2] // lop off /* and */
+ }
+
+ // line comment (does not include newline)
+ // (on Windows, the line comment may end in \r\n)
+ i := len(s)
+ if s[i-1] == '\r' {
+ i--
+ }
+ return s[2:i] // lop off //, and \r at end, if any
+}
+
+func trailingDigits(text string) (uint, uint, bool) {
+ // Want to use LastIndexByte below but it's not defined in Go1.4 and bootstrap fails.
+ i := strings.LastIndex(text, ":") // look from right (Windows filenames may contain ':')
+ if i < 0 {
+ return 0, 0, false // no ":"
+ }
+ // i >= 0
+ n, err := strconv.ParseUint(text[i+1:], 10, 0)
+ return uint(i + 1), uint(n), err == nil
+}
+
+func (p *parser) got(tok token) bool {
+ if p.tok == tok {
+ p.next()
+ return true
+ }
+ return false
+}
+
+func (p *parser) want(tok token) {
+ if !p.got(tok) {
+ p.syntaxError("expecting " + tokstring(tok))
+ p.advance()
+ }
+}
+
+// gotAssign is like got(_Assign) but it also accepts ":="
+// (and reports an error) for better parser error recovery.
+func (p *parser) gotAssign() bool {
+ switch p.tok {
+ case _Define:
+ p.syntaxError("expecting =")
+ fallthrough
+ case _Assign:
+ p.next()
+ return true
+ }
+ return false
+}
+
+// ----------------------------------------------------------------------------
+// Error handling
+
+// posAt returns the Pos value for (line, col) and the current position base.
+func (p *parser) posAt(line, col uint) Pos {
+ return MakePos(p.base, line, col)
+}
+
+// error reports an error at the given position.
+func (p *parser) errorAt(pos Pos, msg string) {
+ err := Error{pos, msg}
+ if p.first == nil {
+ p.first = err
+ }
+ p.errcnt++
+ if p.errh == nil {
+ panic(p.first)
+ }
+ p.errh(err)
+}
+
+// syntaxErrorAt reports a syntax error at the given position.
+func (p *parser) syntaxErrorAt(pos Pos, msg string) {
+ if trace {
+ p.print("syntax error: " + msg)
+ }
+
+ if p.tok == _EOF && p.first != nil {
+ return // avoid meaningless follow-up errors
+ }
+
+ // add punctuation etc. as needed to msg
+ switch {
+ case msg == "":
+ // nothing to do
+ case strings.HasPrefix(msg, "in "), strings.HasPrefix(msg, "at "), strings.HasPrefix(msg, "after "):
+ msg = " " + msg
+ case strings.HasPrefix(msg, "expecting "):
+ msg = ", " + msg
+ default:
+ // plain error - we don't care about current token
+ p.errorAt(pos, "syntax error: "+msg)
+ return
+ }
+
+ // determine token string
+ var tok string
+ switch p.tok {
+ case _Name, _Semi:
+ tok = p.lit
+ case _Literal:
+ tok = "literal " + p.lit
+ case _Operator:
+ tok = p.op.String()
+ case _AssignOp:
+ tok = p.op.String() + "="
+ case _IncOp:
+ tok = p.op.String()
+ tok += tok
+ default:
+ tok = tokstring(p.tok)
+ }
+
+ p.errorAt(pos, "syntax error: unexpected "+tok+msg)
+}
+
+// tokstring returns the English word for selected punctuation tokens
+// for more readable error messages.
+func tokstring(tok token) string {
+ switch tok {
+ case _Comma:
+ return "comma"
+ case _Semi:
+ return "semicolon or newline"
+ }
+ return tok.String()
+}
+
+// Convenience methods using the current token position.
+func (p *parser) pos() Pos { return p.posAt(p.line, p.col) }
+func (p *parser) error(msg string) { p.errorAt(p.pos(), msg) }
+func (p *parser) syntaxError(msg string) { p.syntaxErrorAt(p.pos(), msg) }
+
+// The stopset contains keywords that start a statement.
+// They are good synchronization points in case of syntax
+// errors and (usually) shouldn't be skipped over.
+const stopset uint64 = 1<<_Break |
+ 1<<_Const |
+ 1<<_Continue |
+ 1<<_Defer |
+ 1<<_Fallthrough |
+ 1<<_For |
+ 1<<_Go |
+ 1<<_Goto |
+ 1<<_If |
+ 1<<_Return |
+ 1<<_Select |
+ 1<<_Switch |
+ 1<<_Type |
+ 1<<_Var
+
+// Advance consumes tokens until it finds a token of the stopset or followlist.
+// The stopset is only considered if we are inside a function (p.fnest > 0).
+// The followlist is the list of valid tokens that can follow a production;
+// if it is empty, exactly one (non-EOF) token is consumed to ensure progress.
+func (p *parser) advance(followlist ...token) {
+ if trace {
+ p.print(fmt.Sprintf("advance %s", followlist))
+ }
+
+ // compute follow set
+ // (not speed critical, advance is only called in error situations)
+ var followset uint64 = 1 << _EOF // don't skip over EOF
+ if len(followlist) > 0 {
+ if p.fnest > 0 {
+ followset |= stopset
+ }
+ for _, tok := range followlist {
+ followset |= 1 << tok
+ }
+ }
+
+ for !contains(followset, p.tok) {
+ if trace {
+ p.print("skip " + p.tok.String())
+ }
+ p.next()
+ if len(followlist) == 0 {
+ break
+ }
+ }
+
+ if trace {
+ p.print("next " + p.tok.String())
+ }
+}
+
+// usage: defer p.trace(msg)()
+func (p *parser) trace(msg string) func() {
+ p.print(msg + " (")
+ const tab = ". "
+ p.indent = append(p.indent, tab...)
+ return func() {
+ p.indent = p.indent[:len(p.indent)-len(tab)]
+ if x := recover(); x != nil {
+ panic(x) // skip print_trace
+ }
+ p.print(")")
+ }
+}
+
+func (p *parser) print(msg string) {
+ fmt.Printf("%5d: %s%s\n", p.line, p.indent, msg)
+}
+
+// ----------------------------------------------------------------------------
+// Package files
+//
+// Parse methods are annotated with matching Go productions as appropriate.
+// The annotations are intended as guidelines only since a single Go grammar
+// rule may be covered by multiple parse methods and vice versa.
+//
+// Excluding methods returning slices, parse methods named xOrNil may return
+// nil; all others are expected to return a valid non-nil node.
+
+// SourceFile = PackageClause ";" { ImportDecl ";" } { TopLevelDecl ";" } .
+func (p *parser) fileOrNil() *File {
+ if trace {
+ defer p.trace("file")()
+ }
+
+ f := new(File)
+ f.pos = p.pos()
+
+ // PackageClause
+ if !p.got(_Package) {
+ p.syntaxError("package statement must be first")
+ return nil
+ }
+ f.Pragma = p.takePragma()
+ f.PkgName = p.name()
+ p.want(_Semi)
+
+ // don't bother continuing if package clause has errors
+ if p.first != nil {
+ return nil
+ }
+
+ // { ImportDecl ";" }
+ for p.got(_Import) {
+ f.DeclList = p.appendGroup(f.DeclList, p.importDecl)
+ p.want(_Semi)
+ }
+
+ // { TopLevelDecl ";" }
+ for p.tok != _EOF {
+ switch p.tok {
+ case _Const:
+ p.next()
+ f.DeclList = p.appendGroup(f.DeclList, p.constDecl)
+
+ case _Type:
+ p.next()
+ f.DeclList = p.appendGroup(f.DeclList, p.typeDecl)
+
+ case _Var:
+ p.next()
+ f.DeclList = p.appendGroup(f.DeclList, p.varDecl)
+
+ case _Func:
+ p.next()
+ if d := p.funcDeclOrNil(); d != nil {
+ f.DeclList = append(f.DeclList, d)
+ }
+
+ default:
+ if p.tok == _Lbrace && len(f.DeclList) > 0 && isEmptyFuncDecl(f.DeclList[len(f.DeclList)-1]) {
+ // opening { of function declaration on next line
+ p.syntaxError("unexpected semicolon or newline before {")
+ } else {
+ p.syntaxError("non-declaration statement outside function body")
+ }
+ p.advance(_Const, _Type, _Var, _Func)
+ continue
+ }
+
+ // Reset p.pragma BEFORE advancing to the next token (consuming ';')
+ // since comments before may set pragmas for the next function decl.
+ p.clearPragma()
+
+ if p.tok != _EOF && !p.got(_Semi) {
+ p.syntaxError("after top level declaration")
+ p.advance(_Const, _Type, _Var, _Func)
+ }
+ }
+ // p.tok == _EOF
+
+ p.clearPragma()
+ f.Lines = p.line
+
+ return f
+}
+
+func isEmptyFuncDecl(dcl Decl) bool {
+ f, ok := dcl.(*FuncDecl)
+ return ok && f.Body == nil
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// list parses a possibly empty, sep-separated list, optionally
+// followed by sep and enclosed by ( and ) or { and }. open is
+// one of _Lparen, or _Lbrace, sep is one of _Comma or _Semi,
+// and close is expected to be the (closing) opposite of open.
+// For each list element, f is called. After f returns true, no
+// more list elements are accepted. list returns the position
+// of the closing token.
+//
+// list = "(" { f sep } ")" |
+// "{" { f sep } "}" . // sep is optional before ")" or "}"
+//
+func (p *parser) list(open, sep, close token, f func() bool) Pos {
+ p.want(open)
+
+ var done bool
+ for p.tok != _EOF && p.tok != close && !done {
+ done = f()
+ // sep is optional before close
+ if !p.got(sep) && p.tok != close {
+ p.syntaxError(fmt.Sprintf("expecting %s or %s", tokstring(sep), tokstring(close)))
+ p.advance(_Rparen, _Rbrack, _Rbrace)
+ if p.tok != close {
+ // position could be better but we had an error so we don't care
+ return p.pos()
+ }
+ }
+ }
+
+ pos := p.pos()
+ p.want(close)
+ return pos
+}
+
+// appendGroup(f) = f | "(" { f ";" } ")" . // ";" is optional before ")"
+func (p *parser) appendGroup(list []Decl, f func(*Group) Decl) []Decl {
+ if p.tok == _Lparen {
+ g := new(Group)
+ p.clearPragma()
+ p.list(_Lparen, _Semi, _Rparen, func() bool {
+ list = append(list, f(g))
+ return false
+ })
+ } else {
+ list = append(list, f(nil))
+ }
+
+ if debug {
+ for _, d := range list {
+ if d == nil {
+ panic("nil list entry")
+ }
+ }
+ }
+
+ return list
+}
+
+// ImportSpec = [ "." | PackageName ] ImportPath .
+// ImportPath = string_lit .
+func (p *parser) importDecl(group *Group) Decl {
+ if trace {
+ defer p.trace("importDecl")()
+ }
+
+ d := new(ImportDecl)
+ d.pos = p.pos()
+ d.Group = group
+ d.Pragma = p.takePragma()
+
+ switch p.tok {
+ case _Name:
+ d.LocalPkgName = p.name()
+ case _Dot:
+ d.LocalPkgName = p.newName(".")
+ p.next()
+ }
+ d.Path = p.oliteral()
+ if d.Path == nil {
+ p.syntaxError("missing import path")
+ p.advance(_Semi, _Rparen)
+ return nil
+ }
+
+ return d
+}
+
+// ConstSpec = IdentifierList [ [ Type ] "=" ExpressionList ] .
+func (p *parser) constDecl(group *Group) Decl {
+ if trace {
+ defer p.trace("constDecl")()
+ }
+
+ d := new(ConstDecl)
+ d.pos = p.pos()
+ d.Group = group
+ d.Pragma = p.takePragma()
+
+ d.NameList = p.nameList(p.name())
+ if p.tok != _EOF && p.tok != _Semi && p.tok != _Rparen {
+ d.Type = p.typeOrNil()
+ if p.gotAssign() {
+ d.Values = p.exprList()
+ }
+ }
+
+ return d
+}
+
+// TypeSpec = identifier [ "=" ] Type .
+func (p *parser) typeDecl(group *Group) Decl {
+ if trace {
+ defer p.trace("typeDecl")()
+ }
+
+ d := new(TypeDecl)
+ d.pos = p.pos()
+ d.Group = group
+ d.Pragma = p.takePragma()
+
+ d.Name = p.name()
+ d.Alias = p.gotAssign()
+ d.Type = p.typeOrNil()
+ if d.Type == nil {
+ d.Type = p.badExpr()
+ p.syntaxError("in type declaration")
+ p.advance(_Semi, _Rparen)
+ }
+
+ return d
+}
+
+// VarSpec = IdentifierList ( Type [ "=" ExpressionList ] | "=" ExpressionList ) .
+func (p *parser) varDecl(group *Group) Decl {
+ if trace {
+ defer p.trace("varDecl")()
+ }
+
+ d := new(VarDecl)
+ d.pos = p.pos()
+ d.Group = group
+ d.Pragma = p.takePragma()
+
+ d.NameList = p.nameList(p.name())
+ if p.gotAssign() {
+ d.Values = p.exprList()
+ } else {
+ d.Type = p.type_()
+ if p.gotAssign() {
+ d.Values = p.exprList()
+ }
+ }
+
+ return d
+}
+
+// FunctionDecl = "func" FunctionName ( Function | Signature ) .
+// FunctionName = identifier .
+// Function = Signature FunctionBody .
+// MethodDecl = "func" Receiver MethodName ( Function | Signature ) .
+// Receiver = Parameters .
+func (p *parser) funcDeclOrNil() *FuncDecl {
+ if trace {
+ defer p.trace("funcDecl")()
+ }
+
+ f := new(FuncDecl)
+ f.pos = p.pos()
+ f.Pragma = p.takePragma()
+
+ if p.tok == _Lparen {
+ rcvr := p.paramList()
+ switch len(rcvr) {
+ case 0:
+ p.error("method has no receiver")
+ default:
+ p.error("method has multiple receivers")
+ fallthrough
+ case 1:
+ f.Recv = rcvr[0]
+ }
+ }
+
+ if p.tok != _Name {
+ p.syntaxError("expecting name or (")
+ p.advance(_Lbrace, _Semi)
+ return nil
+ }
+
+ f.Name = p.name()
+ f.Type = p.funcType()
+ if p.tok == _Lbrace {
+ f.Body = p.funcBody()
+ }
+
+ return f
+}
+
+func (p *parser) funcBody() *BlockStmt {
+ p.fnest++
+ errcnt := p.errcnt
+ body := p.blockStmt("")
+ p.fnest--
+
+ // Don't check branches if there were syntax errors in the function
+ // as it may lead to spurious errors (e.g., see test/switch2.go) or
+ // possibly crashes due to incomplete syntax trees.
+ if p.mode&CheckBranches != 0 && errcnt == p.errcnt {
+ checkBranches(body, p.errh)
+ }
+
+ return body
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func (p *parser) expr() Expr {
+ if trace {
+ defer p.trace("expr")()
+ }
+
+ return p.binaryExpr(0)
+}
+
+// Expression = UnaryExpr | Expression binary_op Expression .
+func (p *parser) binaryExpr(prec int) Expr {
+ // don't trace binaryExpr - only leads to overly nested trace output
+
+ x := p.unaryExpr()
+ for (p.tok == _Operator || p.tok == _Star) && p.prec > prec {
+ t := new(Operation)
+ t.pos = p.pos()
+ t.Op = p.op
+ t.X = x
+ tprec := p.prec
+ p.next()
+ t.Y = p.binaryExpr(tprec)
+ x = t
+ }
+ return x
+}
+
+// UnaryExpr = PrimaryExpr | unary_op UnaryExpr .
+func (p *parser) unaryExpr() Expr {
+ if trace {
+ defer p.trace("unaryExpr")()
+ }
+
+ switch p.tok {
+ case _Operator, _Star:
+ switch p.op {
+ case Mul, Add, Sub, Not, Xor:
+ x := new(Operation)
+ x.pos = p.pos()
+ x.Op = p.op
+ p.next()
+ x.X = p.unaryExpr()
+ return x
+
+ case And:
+ x := new(Operation)
+ x.pos = p.pos()
+ x.Op = And
+ p.next()
+ // unaryExpr may have returned a parenthesized composite literal
+ // (see comment in operand) - remove parentheses if any
+ x.X = unparen(p.unaryExpr())
+ return x
+ }
+
+ case _Arrow:
+ // receive op (<-x) or receive-only channel (<-chan E)
+ pos := p.pos()
+ p.next()
+
+ // If the next token is _Chan we still don't know if it is
+ // a channel (<-chan int) or a receive op (<-chan int(ch)).
+ // We only know once we have found the end of the unaryExpr.
+
+ x := p.unaryExpr()
+
+ // There are two cases:
+ //
+ // <-chan... => <-x is a channel type
+ // <-x => <-x is a receive operation
+ //
+ // In the first case, <- must be re-associated with
+ // the channel type parsed already:
+ //
+ // <-(chan E) => (<-chan E)
+ // <-(chan<-E) => (<-chan (<-E))
+
+ if _, ok := x.(*ChanType); ok {
+ // x is a channel type => re-associate <-
+ dir := SendOnly
+ t := x
+ for dir == SendOnly {
+ c, ok := t.(*ChanType)
+ if !ok {
+ break
+ }
+ dir = c.Dir
+ if dir == RecvOnly {
+ // t is type <-chan E but <-<-chan E is not permitted
+ // (report same error as for "type _ <-<-chan E")
+ p.syntaxError("unexpected <-, expecting chan")
+ // already progressed, no need to advance
+ }
+ c.Dir = RecvOnly
+ t = c.Elem
+ }
+ if dir == SendOnly {
+ // channel dir is <- but channel element E is not a channel
+ // (report same error as for "type _ <-chan<-E")
+ p.syntaxError(fmt.Sprintf("unexpected %s, expecting chan", String(t)))
+ // already progressed, no need to advance
+ }
+ return x
+ }
+
+ // x is not a channel type => we have a receive op
+ o := new(Operation)
+ o.pos = pos
+ o.Op = Recv
+ o.X = x
+ return o
+ }
+
+ // TODO(mdempsky): We need parens here so we can report an
+ // error for "(x) := true". It should be possible to detect
+ // and reject that more efficiently though.
+ return p.pexpr(true)
+}
+
+// callStmt parses call-like statements that can be preceded by 'defer' and 'go'.
+func (p *parser) callStmt() *CallStmt {
+ if trace {
+ defer p.trace("callStmt")()
+ }
+
+ s := new(CallStmt)
+ s.pos = p.pos()
+ s.Tok = p.tok // _Defer or _Go
+ p.next()
+
+ x := p.pexpr(p.tok == _Lparen) // keep_parens so we can report error below
+ if t := unparen(x); t != x {
+ p.errorAt(x.Pos(), fmt.Sprintf("expression in %s must not be parenthesized", s.Tok))
+ // already progressed, no need to advance
+ x = t
+ }
+
+ cx, ok := x.(*CallExpr)
+ if !ok {
+ p.errorAt(x.Pos(), fmt.Sprintf("expression in %s must be function call", s.Tok))
+ // already progressed, no need to advance
+ cx = new(CallExpr)
+ cx.pos = x.Pos()
+ cx.Fun = x // assume common error of missing parentheses (function invocation)
+ }
+
+ s.Call = cx
+ return s
+}
+
+// Operand = Literal | OperandName | MethodExpr | "(" Expression ")" .
+// Literal = BasicLit | CompositeLit | FunctionLit .
+// BasicLit = int_lit | float_lit | imaginary_lit | rune_lit | string_lit .
+// OperandName = identifier | QualifiedIdent.
+func (p *parser) operand(keep_parens bool) Expr {
+ if trace {
+ defer p.trace("operand " + p.tok.String())()
+ }
+
+ switch p.tok {
+ case _Name:
+ return p.name()
+
+ case _Literal:
+ return p.oliteral()
+
+ case _Lparen:
+ pos := p.pos()
+ p.next()
+ p.xnest++
+ x := p.expr()
+ p.xnest--
+ p.want(_Rparen)
+
+ // Optimization: Record presence of ()'s only where needed
+ // for error reporting. Don't bother in other cases; it is
+ // just a waste of memory and time.
+
+ // Parentheses are not permitted on lhs of := .
+ // switch x.Op {
+ // case ONAME, ONONAME, OPACK, OTYPE, OLITERAL, OTYPESW:
+ // keep_parens = true
+ // }
+
+ // Parentheses are not permitted around T in a composite
+ // literal T{}. If the next token is a {, assume x is a
+ // composite literal type T (it may not be, { could be
+ // the opening brace of a block, but we don't know yet).
+ if p.tok == _Lbrace {
+ keep_parens = true
+ }
+
+ // Parentheses are also not permitted around the expression
+ // in a go/defer statement. In that case, operand is called
+ // with keep_parens set.
+ if keep_parens {
+ px := new(ParenExpr)
+ px.pos = pos
+ px.X = x
+ x = px
+ }
+ return x
+
+ case _Func:
+ pos := p.pos()
+ p.next()
+ t := p.funcType()
+ if p.tok == _Lbrace {
+ p.xnest++
+
+ f := new(FuncLit)
+ f.pos = pos
+ f.Type = t
+ f.Body = p.funcBody()
+
+ p.xnest--
+ return f
+ }
+ return t
+
+ case _Lbrack, _Chan, _Map, _Struct, _Interface:
+ return p.type_() // othertype
+
+ default:
+ x := p.badExpr()
+ p.syntaxError("expecting expression")
+ p.advance(_Rparen, _Rbrack, _Rbrace)
+ return x
+ }
+
+ // Syntactically, composite literals are operands. Because a complit
+ // type may be a qualified identifier which is handled by pexpr
+ // (together with selector expressions), complits are parsed there
+ // as well (operand is only called from pexpr).
+}
+
+// PrimaryExpr =
+// Operand |
+// Conversion |
+// PrimaryExpr Selector |
+// PrimaryExpr Index |
+// PrimaryExpr Slice |
+// PrimaryExpr TypeAssertion |
+// PrimaryExpr Arguments .
+//
+// Selector = "." identifier .
+// Index = "[" Expression "]" .
+// Slice = "[" ( [ Expression ] ":" [ Expression ] ) |
+// ( [ Expression ] ":" Expression ":" Expression )
+// "]" .
+// TypeAssertion = "." "(" Type ")" .
+// Arguments = "(" [ ( ExpressionList | Type [ "," ExpressionList ] ) [ "..." ] [ "," ] ] ")" .
+func (p *parser) pexpr(keep_parens bool) Expr {
+ if trace {
+ defer p.trace("pexpr")()
+ }
+
+ x := p.operand(keep_parens)
+
+loop:
+ for {
+ pos := p.pos()
+ switch p.tok {
+ case _Dot:
+ p.next()
+ switch p.tok {
+ case _Name:
+ // pexpr '.' sym
+ t := new(SelectorExpr)
+ t.pos = pos
+ t.X = x
+ t.Sel = p.name()
+ x = t
+
+ case _Lparen:
+ p.next()
+ if p.got(_Type) {
+ t := new(TypeSwitchGuard)
+ // t.Lhs is filled in by parser.simpleStmt
+ t.pos = pos
+ t.X = x
+ x = t
+ } else {
+ t := new(AssertExpr)
+ t.pos = pos
+ t.X = x
+ t.Type = p.type_()
+ x = t
+ }
+ p.want(_Rparen)
+
+ default:
+ p.syntaxError("expecting name or (")
+ p.advance(_Semi, _Rparen)
+ }
+
+ case _Lbrack:
+ p.next()
+ p.xnest++
+
+ var i Expr
+ if p.tok != _Colon {
+ i = p.expr()
+ if p.got(_Rbrack) {
+ // x[i]
+ t := new(IndexExpr)
+ t.pos = pos
+ t.X = x
+ t.Index = i
+ x = t
+ p.xnest--
+ break
+ }
+ }
+
+ // x[i:...
+ t := new(SliceExpr)
+ t.pos = pos
+ t.X = x
+ t.Index[0] = i
+ p.want(_Colon)
+ if p.tok != _Colon && p.tok != _Rbrack {
+ // x[i:j...
+ t.Index[1] = p.expr()
+ }
+ if p.tok == _Colon {
+ t.Full = true
+ // x[i:j:...]
+ if t.Index[1] == nil {
+ p.error("middle index required in 3-index slice")
+ t.Index[1] = p.badExpr()
+ }
+ p.next()
+ if p.tok != _Rbrack {
+ // x[i:j:k...
+ t.Index[2] = p.expr()
+ } else {
+ p.error("final index required in 3-index slice")
+ t.Index[2] = p.badExpr()
+ }
+ }
+ p.want(_Rbrack)
+
+ x = t
+ p.xnest--
+
+ case _Lparen:
+ t := new(CallExpr)
+ t.pos = pos
+ t.Fun = x
+ t.ArgList, t.HasDots = p.argList()
+ x = t
+
+ case _Lbrace:
+ // operand may have returned a parenthesized complit
+ // type; accept it but complain if we have a complit
+ t := unparen(x)
+ // determine if '{' belongs to a composite literal or a block statement
+ complit_ok := false
+ switch t.(type) {
+ case *Name, *SelectorExpr:
+ if p.xnest >= 0 {
+ // x is considered a composite literal type
+ complit_ok = true
+ }
+ case *ArrayType, *SliceType, *StructType, *MapType:
+ // x is a comptype
+ complit_ok = true
+ }
+ if !complit_ok {
+ break loop
+ }
+ if t != x {
+ p.syntaxError("cannot parenthesize type in composite literal")
+ // already progressed, no need to advance
+ }
+ n := p.complitexpr()
+ n.Type = x
+ x = n
+
+ default:
+ break loop
+ }
+ }
+
+ return x
+}
+
+// Element = Expression | LiteralValue .
+func (p *parser) bare_complitexpr() Expr {
+ if trace {
+ defer p.trace("bare_complitexpr")()
+ }
+
+ if p.tok == _Lbrace {
+ // '{' start_complit braced_keyval_list '}'
+ return p.complitexpr()
+ }
+
+ return p.expr()
+}
+
+// LiteralValue = "{" [ ElementList [ "," ] ] "}" .
+func (p *parser) complitexpr() *CompositeLit {
+ if trace {
+ defer p.trace("complitexpr")()
+ }
+
+ x := new(CompositeLit)
+ x.pos = p.pos()
+
+ p.xnest++
+ x.Rbrace = p.list(_Lbrace, _Comma, _Rbrace, func() bool {
+ // value
+ e := p.bare_complitexpr()
+ if p.tok == _Colon {
+ // key ':' value
+ l := new(KeyValueExpr)
+ l.pos = p.pos()
+ p.next()
+ l.Key = e
+ l.Value = p.bare_complitexpr()
+ e = l
+ x.NKeys++
+ }
+ x.ElemList = append(x.ElemList, e)
+ return false
+ })
+ p.xnest--
+
+ return x
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+func (p *parser) type_() Expr {
+ if trace {
+ defer p.trace("type_")()
+ }
+
+ typ := p.typeOrNil()
+ if typ == nil {
+ typ = p.badExpr()
+ p.syntaxError("expecting type")
+ p.advance(_Comma, _Colon, _Semi, _Rparen, _Rbrack, _Rbrace)
+ }
+
+ return typ
+}
+
+func newIndirect(pos Pos, typ Expr) Expr {
+ o := new(Operation)
+ o.pos = pos
+ o.Op = Mul
+ o.X = typ
+ return o
+}
+
+// typeOrNil is like type_ but it returns nil if there was no type
+// instead of reporting an error.
+//
+// Type = TypeName | TypeLit | "(" Type ")" .
+// TypeName = identifier | QualifiedIdent .
+// TypeLit = ArrayType | StructType | PointerType | FunctionType | InterfaceType |
+// SliceType | MapType | Channel_Type .
+func (p *parser) typeOrNil() Expr {
+ if trace {
+ defer p.trace("typeOrNil")()
+ }
+
+ pos := p.pos()
+ switch p.tok {
+ case _Star:
+ // ptrtype
+ p.next()
+ return newIndirect(pos, p.type_())
+
+ case _Arrow:
+ // recvchantype
+ p.next()
+ p.want(_Chan)
+ t := new(ChanType)
+ t.pos = pos
+ t.Dir = RecvOnly
+ t.Elem = p.chanElem()
+ return t
+
+ case _Func:
+ // fntype
+ p.next()
+ return p.funcType()
+
+ case _Lbrack:
+ // '[' oexpr ']' ntype
+ // '[' _DotDotDot ']' ntype
+ p.next()
+ p.xnest++
+ if p.got(_Rbrack) {
+ // []T
+ p.xnest--
+ t := new(SliceType)
+ t.pos = pos
+ t.Elem = p.type_()
+ return t
+ }
+
+ // [n]T
+ t := new(ArrayType)
+ t.pos = pos
+ if !p.got(_DotDotDot) {
+ t.Len = p.expr()
+ }
+ p.want(_Rbrack)
+ p.xnest--
+ t.Elem = p.type_()
+ return t
+
+ case _Chan:
+ // _Chan non_recvchantype
+ // _Chan _Comm ntype
+ p.next()
+ t := new(ChanType)
+ t.pos = pos
+ if p.got(_Arrow) {
+ t.Dir = SendOnly
+ }
+ t.Elem = p.chanElem()
+ return t
+
+ case _Map:
+ // _Map '[' ntype ']' ntype
+ p.next()
+ p.want(_Lbrack)
+ t := new(MapType)
+ t.pos = pos
+ t.Key = p.type_()
+ p.want(_Rbrack)
+ t.Value = p.type_()
+ return t
+
+ case _Struct:
+ return p.structType()
+
+ case _Interface:
+ return p.interfaceType()
+
+ case _Name:
+ return p.dotname(p.name())
+
+ case _Lparen:
+ p.next()
+ t := p.type_()
+ p.want(_Rparen)
+ return t
+ }
+
+ return nil
+}
+
+func (p *parser) funcType() *FuncType {
+ if trace {
+ defer p.trace("funcType")()
+ }
+
+ typ := new(FuncType)
+ typ.pos = p.pos()
+ typ.ParamList = p.paramList()
+ typ.ResultList = p.funcResult()
+
+ return typ
+}
+
+func (p *parser) chanElem() Expr {
+ if trace {
+ defer p.trace("chanElem")()
+ }
+
+ typ := p.typeOrNil()
+ if typ == nil {
+ typ = p.badExpr()
+ p.syntaxError("missing channel element type")
+ // assume element type is simply absent - don't advance
+ }
+
+ return typ
+}
+
+func (p *parser) dotname(name *Name) Expr {
+ if trace {
+ defer p.trace("dotname")()
+ }
+
+ if p.tok == _Dot {
+ s := new(SelectorExpr)
+ s.pos = p.pos()
+ p.next()
+ s.X = name
+ s.Sel = p.name()
+ return s
+ }
+ return name
+}
+
+// StructType = "struct" "{" { FieldDecl ";" } "}" .
+func (p *parser) structType() *StructType {
+ if trace {
+ defer p.trace("structType")()
+ }
+
+ typ := new(StructType)
+ typ.pos = p.pos()
+
+ p.want(_Struct)
+ p.list(_Lbrace, _Semi, _Rbrace, func() bool {
+ p.fieldDecl(typ)
+ return false
+ })
+
+ return typ
+}
+
+// InterfaceType = "interface" "{" { MethodSpec ";" } "}" .
+func (p *parser) interfaceType() *InterfaceType {
+ if trace {
+ defer p.trace("interfaceType")()
+ }
+
+ typ := new(InterfaceType)
+ typ.pos = p.pos()
+
+ p.want(_Interface)
+ p.list(_Lbrace, _Semi, _Rbrace, func() bool {
+ if m := p.methodDecl(); m != nil {
+ typ.MethodList = append(typ.MethodList, m)
+ }
+ return false
+ })
+
+ return typ
+}
+
+// Result = Parameters | Type .
+func (p *parser) funcResult() []*Field {
+ if trace {
+ defer p.trace("funcResult")()
+ }
+
+ if p.tok == _Lparen {
+ return p.paramList()
+ }
+
+ pos := p.pos()
+ if typ := p.typeOrNil(); typ != nil {
+ f := new(Field)
+ f.pos = pos
+ f.Type = typ
+ return []*Field{f}
+ }
+
+ return nil
+}
+
+func (p *parser) addField(styp *StructType, pos Pos, name *Name, typ Expr, tag *BasicLit) {
+ if tag != nil {
+ for i := len(styp.FieldList) - len(styp.TagList); i > 0; i-- {
+ styp.TagList = append(styp.TagList, nil)
+ }
+ styp.TagList = append(styp.TagList, tag)
+ }
+
+ f := new(Field)
+ f.pos = pos
+ f.Name = name
+ f.Type = typ
+ styp.FieldList = append(styp.FieldList, f)
+
+ if debug && tag != nil && len(styp.FieldList) != len(styp.TagList) {
+ panic("inconsistent struct field list")
+ }
+}
+
+// FieldDecl = (IdentifierList Type | AnonymousField) [ Tag ] .
+// AnonymousField = [ "*" ] TypeName .
+// Tag = string_lit .
+func (p *parser) fieldDecl(styp *StructType) {
+ if trace {
+ defer p.trace("fieldDecl")()
+ }
+
+ pos := p.pos()
+ switch p.tok {
+ case _Name:
+ name := p.name()
+ if p.tok == _Dot || p.tok == _Literal || p.tok == _Semi || p.tok == _Rbrace {
+ // embed oliteral
+ typ := p.qualifiedName(name)
+ tag := p.oliteral()
+ p.addField(styp, pos, nil, typ, tag)
+ return
+ }
+
+ // new_name_list ntype oliteral
+ names := p.nameList(name)
+ typ := p.type_()
+ tag := p.oliteral()
+
+ for _, name := range names {
+ p.addField(styp, name.Pos(), name, typ, tag)
+ }
+
+ case _Lparen:
+ p.next()
+ if p.tok == _Star {
+ // '(' '*' embed ')' oliteral
+ pos := p.pos()
+ p.next()
+ typ := newIndirect(pos, p.qualifiedName(nil))
+ p.want(_Rparen)
+ tag := p.oliteral()
+ p.addField(styp, pos, nil, typ, tag)
+ p.syntaxError("cannot parenthesize embedded type")
+
+ } else {
+ // '(' embed ')' oliteral
+ typ := p.qualifiedName(nil)
+ p.want(_Rparen)
+ tag := p.oliteral()
+ p.addField(styp, pos, nil, typ, tag)
+ p.syntaxError("cannot parenthesize embedded type")
+ }
+
+ case _Star:
+ p.next()
+ if p.got(_Lparen) {
+ // '*' '(' embed ')' oliteral
+ typ := newIndirect(pos, p.qualifiedName(nil))
+ p.want(_Rparen)
+ tag := p.oliteral()
+ p.addField(styp, pos, nil, typ, tag)
+ p.syntaxError("cannot parenthesize embedded type")
+
+ } else {
+ // '*' embed oliteral
+ typ := newIndirect(pos, p.qualifiedName(nil))
+ tag := p.oliteral()
+ p.addField(styp, pos, nil, typ, tag)
+ }
+
+ default:
+ p.syntaxError("expecting field name or embedded type")
+ p.advance(_Semi, _Rbrace)
+ }
+}
+
+func (p *parser) oliteral() *BasicLit {
+ if p.tok == _Literal {
+ b := new(BasicLit)
+ b.pos = p.pos()
+ b.Value = p.lit
+ b.Kind = p.kind
+ b.Bad = p.bad
+ p.next()
+ return b
+ }
+ return nil
+}
+
+// MethodSpec = MethodName Signature | InterfaceTypeName .
+// MethodName = identifier .
+// InterfaceTypeName = TypeName .
+func (p *parser) methodDecl() *Field {
+ if trace {
+ defer p.trace("methodDecl")()
+ }
+
+ switch p.tok {
+ case _Name:
+ name := p.name()
+
+ // accept potential name list but complain
+ hasNameList := false
+ for p.got(_Comma) {
+ p.name()
+ hasNameList = true
+ }
+ if hasNameList {
+ p.syntaxError("name list not allowed in interface type")
+ // already progressed, no need to advance
+ }
+
+ f := new(Field)
+ f.pos = name.Pos()
+ if p.tok != _Lparen {
+ // packname
+ f.Type = p.qualifiedName(name)
+ return f
+ }
+
+ f.Name = name
+ f.Type = p.funcType()
+ return f
+
+ case _Lparen:
+ p.syntaxError("cannot parenthesize embedded type")
+ f := new(Field)
+ f.pos = p.pos()
+ p.next()
+ f.Type = p.qualifiedName(nil)
+ p.want(_Rparen)
+ return f
+
+ default:
+ p.syntaxError("expecting method or interface name")
+ p.advance(_Semi, _Rbrace)
+ return nil
+ }
+}
+
+// ParameterDecl = [ IdentifierList ] [ "..." ] Type .
+func (p *parser) paramDeclOrNil() *Field {
+ if trace {
+ defer p.trace("paramDecl")()
+ }
+
+ f := new(Field)
+ f.pos = p.pos()
+
+ switch p.tok {
+ case _Name:
+ f.Name = p.name()
+ switch p.tok {
+ case _Name, _Star, _Arrow, _Func, _Lbrack, _Chan, _Map, _Struct, _Interface, _Lparen:
+ // sym name_or_type
+ f.Type = p.type_()
+
+ case _DotDotDot:
+ // sym dotdotdot
+ f.Type = p.dotsType()
+
+ case _Dot:
+ // name_or_type
+ // from dotname
+ f.Type = p.dotname(f.Name)
+ f.Name = nil
+ }
+
+ case _Arrow, _Star, _Func, _Lbrack, _Chan, _Map, _Struct, _Interface, _Lparen:
+ // name_or_type
+ f.Type = p.type_()
+
+ case _DotDotDot:
+ // dotdotdot
+ f.Type = p.dotsType()
+
+ default:
+ p.syntaxError("expecting )")
+ p.advance(_Comma, _Rparen)
+ return nil
+ }
+
+ return f
+}
+
+// ...Type
+func (p *parser) dotsType() *DotsType {
+ if trace {
+ defer p.trace("dotsType")()
+ }
+
+ t := new(DotsType)
+ t.pos = p.pos()
+
+ p.want(_DotDotDot)
+ t.Elem = p.typeOrNil()
+ if t.Elem == nil {
+ t.Elem = p.badExpr()
+ p.syntaxError("final argument in variadic function missing type")
+ }
+
+ return t
+}
+
+// Parameters = "(" [ ParameterList [ "," ] ] ")" .
+// ParameterList = ParameterDecl { "," ParameterDecl } .
+func (p *parser) paramList() (list []*Field) {
+ if trace {
+ defer p.trace("paramList")()
+ }
+
+ pos := p.pos()
+
+ var named int // number of parameters that have an explicit name and type
+ p.list(_Lparen, _Comma, _Rparen, func() bool {
+ if par := p.paramDeclOrNil(); par != nil {
+ if debug && par.Name == nil && par.Type == nil {
+ panic("parameter without name or type")
+ }
+ if par.Name != nil && par.Type != nil {
+ named++
+ }
+ list = append(list, par)
+ }
+ return false
+ })
+
+ // distribute parameter types
+ if named == 0 {
+ // all unnamed => found names are named types
+ for _, par := range list {
+ if typ := par.Name; typ != nil {
+ par.Type = typ
+ par.Name = nil
+ }
+ }
+ } else if named != len(list) {
+ // some named => all must be named
+ ok := true
+ var typ Expr
+ for i := len(list) - 1; i >= 0; i-- {
+ if par := list[i]; par.Type != nil {
+ typ = par.Type
+ if par.Name == nil {
+ ok = false
+ n := p.newName("_")
+ n.pos = typ.Pos() // correct position
+ par.Name = n
+ }
+ } else if typ != nil {
+ par.Type = typ
+ } else {
+ // par.Type == nil && typ == nil => we only have a par.Name
+ ok = false
+ t := p.badExpr()
+ t.pos = par.Name.Pos() // correct position
+ par.Type = t
+ }
+ }
+ if !ok {
+ p.syntaxErrorAt(pos, "mixed named and unnamed function parameters")
+ }
+ }
+
+ return
+}
+
+func (p *parser) badExpr() *BadExpr {
+ b := new(BadExpr)
+ b.pos = p.pos()
+ return b
+}
+
+// ----------------------------------------------------------------------------
+// Statements
+
+// We represent x++, x-- as assignments x += ImplicitOne, x -= ImplicitOne.
+// ImplicitOne should not be used elsewhere.
+var ImplicitOne = &BasicLit{Value: "1"}
+
+// SimpleStmt = EmptyStmt | ExpressionStmt | SendStmt | IncDecStmt | Assignment | ShortVarDecl .
+func (p *parser) simpleStmt(lhs Expr, keyword token) SimpleStmt {
+ if trace {
+ defer p.trace("simpleStmt")()
+ }
+
+ if keyword == _For && p.tok == _Range {
+ // _Range expr
+ if debug && lhs != nil {
+ panic("invalid call of simpleStmt")
+ }
+ return p.newRangeClause(nil, false)
+ }
+
+ if lhs == nil {
+ lhs = p.exprList()
+ }
+
+ if _, ok := lhs.(*ListExpr); !ok && p.tok != _Assign && p.tok != _Define {
+ // expr
+ pos := p.pos()
+ switch p.tok {
+ case _AssignOp:
+ // lhs op= rhs
+ op := p.op
+ p.next()
+ return p.newAssignStmt(pos, op, lhs, p.expr())
+
+ case _IncOp:
+ // lhs++ or lhs--
+ op := p.op
+ p.next()
+ return p.newAssignStmt(pos, op, lhs, ImplicitOne)
+
+ case _Arrow:
+ // lhs <- rhs
+ s := new(SendStmt)
+ s.pos = pos
+ p.next()
+ s.Chan = lhs
+ s.Value = p.expr()
+ return s
+
+ default:
+ // expr
+ s := new(ExprStmt)
+ s.pos = lhs.Pos()
+ s.X = lhs
+ return s
+ }
+ }
+
+ // expr_list
+ switch p.tok {
+ case _Assign, _Define:
+ pos := p.pos()
+ var op Operator
+ if p.tok == _Define {
+ op = Def
+ }
+ p.next()
+
+ if keyword == _For && p.tok == _Range {
+ // expr_list op= _Range expr
+ return p.newRangeClause(lhs, op == Def)
+ }
+
+ // expr_list op= expr_list
+ rhs := p.exprList()
+
+ if x, ok := rhs.(*TypeSwitchGuard); ok && keyword == _Switch && op == Def {
+ if lhs, ok := lhs.(*Name); ok {
+ // switch … lhs := rhs.(type)
+ x.Lhs = lhs
+ s := new(ExprStmt)
+ s.pos = x.Pos()
+ s.X = x
+ return s
+ }
+ }
+
+ return p.newAssignStmt(pos, op, lhs, rhs)
+
+ default:
+ p.syntaxError("expecting := or = or comma")
+ p.advance(_Semi, _Rbrace)
+ // make the best of what we have
+ if x, ok := lhs.(*ListExpr); ok {
+ lhs = x.ElemList[0]
+ }
+ s := new(ExprStmt)
+ s.pos = lhs.Pos()
+ s.X = lhs
+ return s
+ }
+}
+
+func (p *parser) newRangeClause(lhs Expr, def bool) *RangeClause {
+ r := new(RangeClause)
+ r.pos = p.pos()
+ p.next() // consume _Range
+ r.Lhs = lhs
+ r.Def = def
+ r.X = p.expr()
+ return r
+}
+
+func (p *parser) newAssignStmt(pos Pos, op Operator, lhs, rhs Expr) *AssignStmt {
+ a := new(AssignStmt)
+ a.pos = pos
+ a.Op = op
+ a.Lhs = lhs
+ a.Rhs = rhs
+ return a
+}
+
+func (p *parser) labeledStmtOrNil(label *Name) Stmt {
+ if trace {
+ defer p.trace("labeledStmt")()
+ }
+
+ s := new(LabeledStmt)
+ s.pos = p.pos()
+ s.Label = label
+
+ p.want(_Colon)
+
+ if p.tok == _Rbrace {
+ // We expect a statement (incl. an empty statement), which must be
+ // terminated by a semicolon. Because semicolons may be omitted before
+ // an _Rbrace, seeing an _Rbrace implies an empty statement.
+ e := new(EmptyStmt)
+ e.pos = p.pos()
+ s.Stmt = e
+ return s
+ }
+
+ s.Stmt = p.stmtOrNil()
+ if s.Stmt != nil {
+ return s
+ }
+
+ // report error at line of ':' token
+ p.syntaxErrorAt(s.pos, "missing statement after label")
+ // we are already at the end of the labeled statement - no need to advance
+ return nil // avoids follow-on errors (see e.g., fixedbugs/bug274.go)
+}
+
+// context must be a non-empty string unless we know that p.tok == _Lbrace.
+func (p *parser) blockStmt(context string) *BlockStmt {
+ if trace {
+ defer p.trace("blockStmt")()
+ }
+
+ s := new(BlockStmt)
+ s.pos = p.pos()
+
+ // people coming from C may forget that braces are mandatory in Go
+ if !p.got(_Lbrace) {
+ p.syntaxError("expecting { after " + context)
+ p.advance(_Name, _Rbrace)
+ s.Rbrace = p.pos() // in case we found "}"
+ if p.got(_Rbrace) {
+ return s
+ }
+ }
+
+ s.List = p.stmtList()
+ s.Rbrace = p.pos()
+ p.want(_Rbrace)
+
+ return s
+}
+
+func (p *parser) declStmt(f func(*Group) Decl) *DeclStmt {
+ if trace {
+ defer p.trace("declStmt")()
+ }
+
+ s := new(DeclStmt)
+ s.pos = p.pos()
+
+ p.next() // _Const, _Type, or _Var
+ s.DeclList = p.appendGroup(nil, f)
+
+ return s
+}
+
+func (p *parser) forStmt() Stmt {
+ if trace {
+ defer p.trace("forStmt")()
+ }
+
+ s := new(ForStmt)
+ s.pos = p.pos()
+
+ s.Init, s.Cond, s.Post = p.header(_For)
+ s.Body = p.blockStmt("for clause")
+
+ return s
+}
+
+func (p *parser) header(keyword token) (init SimpleStmt, cond Expr, post SimpleStmt) {
+ p.want(keyword)
+
+ if p.tok == _Lbrace {
+ if keyword == _If {
+ p.syntaxError("missing condition in if statement")
+ cond = p.badExpr()
+ }
+ return
+ }
+ // p.tok != _Lbrace
+
+ outer := p.xnest
+ p.xnest = -1
+
+ if p.tok != _Semi {
+ // accept potential varDecl but complain
+ if p.got(_Var) {
+ p.syntaxError(fmt.Sprintf("var declaration not allowed in %s initializer", keyword.String()))
+ }
+ init = p.simpleStmt(nil, keyword)
+ // If we have a range clause, we are done (can only happen for keyword == _For).
+ if _, ok := init.(*RangeClause); ok {
+ p.xnest = outer
+ return
+ }
+ }
+
+ var condStmt SimpleStmt
+ var semi struct {
+ pos Pos
+ lit string // valid if pos.IsKnown()
+ }
+ if p.tok != _Lbrace {
+ if p.tok == _Semi {
+ semi.pos = p.pos()
+ semi.lit = p.lit
+ p.next()
+ } else {
+ // asking for a '{' rather than a ';' here leads to a better error message
+ p.want(_Lbrace)
+ if p.tok != _Lbrace {
+ p.advance(_Lbrace, _Rbrace) // for better synchronization (e.g., issue #22581)
+ }
+ }
+ if keyword == _For {
+ if p.tok != _Semi {
+ if p.tok == _Lbrace {
+ p.syntaxError("expecting for loop condition")
+ goto done
+ }
+ condStmt = p.simpleStmt(nil, 0 /* range not permitted */)
+ }
+ p.want(_Semi)
+ if p.tok != _Lbrace {
+ post = p.simpleStmt(nil, 0 /* range not permitted */)
+ if a, _ := post.(*AssignStmt); a != nil && a.Op == Def {
+ p.syntaxErrorAt(a.Pos(), "cannot declare in post statement of for loop")
+ }
+ }
+ } else if p.tok != _Lbrace {
+ condStmt = p.simpleStmt(nil, keyword)
+ }
+ } else {
+ condStmt = init
+ init = nil
+ }
+
+done:
+ // unpack condStmt
+ switch s := condStmt.(type) {
+ case nil:
+ if keyword == _If && semi.pos.IsKnown() {
+ if semi.lit != "semicolon" {
+ p.syntaxErrorAt(semi.pos, fmt.Sprintf("unexpected %s, expecting { after if clause", semi.lit))
+ } else {
+ p.syntaxErrorAt(semi.pos, "missing condition in if statement")
+ }
+ b := new(BadExpr)
+ b.pos = semi.pos
+ cond = b
+ }
+ case *ExprStmt:
+ cond = s.X
+ default:
+ // A common syntax error is to write '=' instead of '==',
+ // which turns an expression into an assignment. Provide
+ // a more explicit error message in that case to prevent
+ // further confusion.
+ var str string
+ if as, ok := s.(*AssignStmt); ok && as.Op == 0 {
+ // Emphasize Lhs and Rhs of assignment with parentheses to highlight '='.
+ // Do it always - it's not worth going through the trouble of doing it
+ // only for "complex" left and right sides.
+ str = "assignment (" + String(as.Lhs) + ") = (" + String(as.Rhs) + ")"
+ } else {
+ str = String(s)
+ }
+ p.syntaxErrorAt(s.Pos(), fmt.Sprintf("cannot use %s as value", str))
+ }
+
+ p.xnest = outer
+ return
+}
+
+func (p *parser) ifStmt() *IfStmt {
+ if trace {
+ defer p.trace("ifStmt")()
+ }
+
+ s := new(IfStmt)
+ s.pos = p.pos()
+
+ s.Init, s.Cond, _ = p.header(_If)
+ s.Then = p.blockStmt("if clause")
+
+ if p.got(_Else) {
+ switch p.tok {
+ case _If:
+ s.Else = p.ifStmt()
+ case _Lbrace:
+ s.Else = p.blockStmt("")
+ default:
+ p.syntaxError("else must be followed by if or statement block")
+ p.advance(_Name, _Rbrace)
+ }
+ }
+
+ return s
+}
+
+func (p *parser) switchStmt() *SwitchStmt {
+ if trace {
+ defer p.trace("switchStmt")()
+ }
+
+ s := new(SwitchStmt)
+ s.pos = p.pos()
+
+ s.Init, s.Tag, _ = p.header(_Switch)
+
+ if !p.got(_Lbrace) {
+ p.syntaxError("missing { after switch clause")
+ p.advance(_Case, _Default, _Rbrace)
+ }
+ for p.tok != _EOF && p.tok != _Rbrace {
+ s.Body = append(s.Body, p.caseClause())
+ }
+ s.Rbrace = p.pos()
+ p.want(_Rbrace)
+
+ return s
+}
+
+func (p *parser) selectStmt() *SelectStmt {
+ if trace {
+ defer p.trace("selectStmt")()
+ }
+
+ s := new(SelectStmt)
+ s.pos = p.pos()
+
+ p.want(_Select)
+ if !p.got(_Lbrace) {
+ p.syntaxError("missing { after select clause")
+ p.advance(_Case, _Default, _Rbrace)
+ }
+ for p.tok != _EOF && p.tok != _Rbrace {
+ s.Body = append(s.Body, p.commClause())
+ }
+ s.Rbrace = p.pos()
+ p.want(_Rbrace)
+
+ return s
+}
+
+func (p *parser) caseClause() *CaseClause {
+ if trace {
+ defer p.trace("caseClause")()
+ }
+
+ c := new(CaseClause)
+ c.pos = p.pos()
+
+ switch p.tok {
+ case _Case:
+ p.next()
+ c.Cases = p.exprList()
+
+ case _Default:
+ p.next()
+
+ default:
+ p.syntaxError("expecting case or default or }")
+ p.advance(_Colon, _Case, _Default, _Rbrace)
+ }
+
+ c.Colon = p.pos()
+ p.want(_Colon)
+ c.Body = p.stmtList()
+
+ return c
+}
+
+func (p *parser) commClause() *CommClause {
+ if trace {
+ defer p.trace("commClause")()
+ }
+
+ c := new(CommClause)
+ c.pos = p.pos()
+
+ switch p.tok {
+ case _Case:
+ p.next()
+ c.Comm = p.simpleStmt(nil, 0)
+
+ // The syntax restricts the possible simple statements here to:
+ //
+ // lhs <- x (send statement)
+ // <-x
+ // lhs = <-x
+ // lhs := <-x
+ //
+ // All these (and more) are recognized by simpleStmt and invalid
+ // syntax trees are flagged later, during type checking.
+ // TODO(gri) eventually may want to restrict valid syntax trees
+ // here.
+
+ case _Default:
+ p.next()
+
+ default:
+ p.syntaxError("expecting case or default or }")
+ p.advance(_Colon, _Case, _Default, _Rbrace)
+ }
+
+ c.Colon = p.pos()
+ p.want(_Colon)
+ c.Body = p.stmtList()
+
+ return c
+}
+
+// Statement =
+// Declaration | LabeledStmt | SimpleStmt |
+// GoStmt | ReturnStmt | BreakStmt | ContinueStmt | GotoStmt |
+// FallthroughStmt | Block | IfStmt | SwitchStmt | SelectStmt | ForStmt |
+// DeferStmt .
+func (p *parser) stmtOrNil() Stmt {
+ if trace {
+ defer p.trace("stmt " + p.tok.String())()
+ }
+
+ // Most statements (assignments) start with an identifier;
+ // look for it first before doing anything more expensive.
+ if p.tok == _Name {
+ p.clearPragma()
+ lhs := p.exprList()
+ if label, ok := lhs.(*Name); ok && p.tok == _Colon {
+ return p.labeledStmtOrNil(label)
+ }
+ return p.simpleStmt(lhs, 0)
+ }
+
+ switch p.tok {
+ case _Var:
+ return p.declStmt(p.varDecl)
+
+ case _Const:
+ return p.declStmt(p.constDecl)
+
+ case _Type:
+ return p.declStmt(p.typeDecl)
+ }
+
+ p.clearPragma()
+
+ switch p.tok {
+ case _Lbrace:
+ return p.blockStmt("")
+
+ case _Operator, _Star:
+ switch p.op {
+ case Add, Sub, Mul, And, Xor, Not:
+ return p.simpleStmt(nil, 0) // unary operators
+ }
+
+ case _Literal, _Func, _Lparen, // operands
+ _Lbrack, _Struct, _Map, _Chan, _Interface, // composite types
+ _Arrow: // receive operator
+ return p.simpleStmt(nil, 0)
+
+ case _For:
+ return p.forStmt()
+
+ case _Switch:
+ return p.switchStmt()
+
+ case _Select:
+ return p.selectStmt()
+
+ case _If:
+ return p.ifStmt()
+
+ case _Fallthrough:
+ s := new(BranchStmt)
+ s.pos = p.pos()
+ p.next()
+ s.Tok = _Fallthrough
+ return s
+
+ case _Break, _Continue:
+ s := new(BranchStmt)
+ s.pos = p.pos()
+ s.Tok = p.tok
+ p.next()
+ if p.tok == _Name {
+ s.Label = p.name()
+ }
+ return s
+
+ case _Go, _Defer:
+ return p.callStmt()
+
+ case _Goto:
+ s := new(BranchStmt)
+ s.pos = p.pos()
+ s.Tok = _Goto
+ p.next()
+ s.Label = p.name()
+ return s
+
+ case _Return:
+ s := new(ReturnStmt)
+ s.pos = p.pos()
+ p.next()
+ if p.tok != _Semi && p.tok != _Rbrace {
+ s.Results = p.exprList()
+ }
+ return s
+
+ case _Semi:
+ s := new(EmptyStmt)
+ s.pos = p.pos()
+ return s
+ }
+
+ return nil
+}
+
+// StatementList = { Statement ";" } .
+func (p *parser) stmtList() (l []Stmt) {
+ if trace {
+ defer p.trace("stmtList")()
+ }
+
+ for p.tok != _EOF && p.tok != _Rbrace && p.tok != _Case && p.tok != _Default {
+ s := p.stmtOrNil()
+ p.clearPragma()
+ if s == nil {
+ break
+ }
+ l = append(l, s)
+ // ";" is optional before "}"
+ if !p.got(_Semi) && p.tok != _Rbrace {
+ p.syntaxError("at end of statement")
+ p.advance(_Semi, _Rbrace, _Case, _Default)
+ p.got(_Semi) // avoid spurious empty statement
+ }
+ }
+ return
+}
+
+// Arguments = "(" [ ( ExpressionList | Type [ "," ExpressionList ] ) [ "..." ] [ "," ] ] ")" .
+func (p *parser) argList() (list []Expr, hasDots bool) {
+ if trace {
+ defer p.trace("argList")()
+ }
+
+ p.xnest++
+ p.list(_Lparen, _Comma, _Rparen, func() bool {
+ list = append(list, p.expr())
+ hasDots = p.got(_DotDotDot)
+ return hasDots
+ })
+ p.xnest--
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Common productions
+
+func (p *parser) newName(value string) *Name {
+ n := new(Name)
+ n.pos = p.pos()
+ n.Value = value
+ return n
+}
+
+func (p *parser) name() *Name {
+ // no tracing to avoid overly verbose output
+
+ if p.tok == _Name {
+ n := p.newName(p.lit)
+ p.next()
+ return n
+ }
+
+ n := p.newName("_")
+ p.syntaxError("expecting name")
+ p.advance()
+ return n
+}
+
+// IdentifierList = identifier { "," identifier } .
+// The first name must be provided.
+func (p *parser) nameList(first *Name) []*Name {
+ if trace {
+ defer p.trace("nameList")()
+ }
+
+ if debug && first == nil {
+ panic("first name not provided")
+ }
+
+ l := []*Name{first}
+ for p.got(_Comma) {
+ l = append(l, p.name())
+ }
+
+ return l
+}
+
+// The first name may be provided, or nil.
+func (p *parser) qualifiedName(name *Name) Expr {
+ if trace {
+ defer p.trace("qualifiedName")()
+ }
+
+ switch {
+ case name != nil:
+ // name is provided
+ case p.tok == _Name:
+ name = p.name()
+ default:
+ name = p.newName("_")
+ p.syntaxError("expecting name")
+ p.advance(_Dot, _Semi, _Rbrace)
+ }
+
+ return p.dotname(name)
+}
+
+// ExpressionList = Expression { "," Expression } .
+func (p *parser) exprList() Expr {
+ if trace {
+ defer p.trace("exprList")()
+ }
+
+ x := p.expr()
+ if p.got(_Comma) {
+ list := []Expr{x, p.expr()}
+ for p.got(_Comma) {
+ list = append(list, p.expr())
+ }
+ t := new(ListExpr)
+ t.pos = x.Pos()
+ t.ElemList = list
+ x = t
+ }
+ return x
+}
+
+// unparen removes all parentheses around an expression.
+func unparen(x Expr) Expr {
+ for {
+ p, ok := x.(*ParenExpr)
+ if !ok {
+ break
+ }
+ x = p.X
+ }
+ return x
+}
diff --git a/src/cmd/compile/internal/syntax/parser_test.go b/src/cmd/compile/internal/syntax/parser_test.go
new file mode 100644
index 0000000..81945fa
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/parser_test.go
@@ -0,0 +1,349 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+var (
+ fast = flag.Bool("fast", false, "parse package files in parallel")
+ verify = flag.Bool("verify", false, "verify idempotent printing")
+ src_ = flag.String("src", "parser.go", "source file to parse")
+ skip = flag.String("skip", "", "files matching this regular expression are skipped by TestStdLib")
+)
+
+func TestParse(t *testing.T) {
+ ParseFile(*src_, func(err error) { t.Error(err) }, nil, 0)
+}
+
+func TestStdLib(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
+ var skipRx *regexp.Regexp
+ if *skip != "" {
+ var err error
+ skipRx, err = regexp.Compile(*skip)
+ if err != nil {
+ t.Fatalf("invalid argument for -skip (%v)", err)
+ }
+ }
+
+ var m1 runtime.MemStats
+ runtime.ReadMemStats(&m1)
+ start := time.Now()
+
+ type parseResult struct {
+ filename string
+ lines uint
+ }
+
+ results := make(chan parseResult)
+ go func() {
+ defer close(results)
+ for _, dir := range []string{
+ runtime.GOROOT(),
+ } {
+ walkDirs(t, dir, func(filename string) {
+ if skipRx != nil && skipRx.MatchString(filename) {
+ // Always report skipped files since regexp
+ // typos can lead to surprising results.
+ fmt.Printf("skipping %s\n", filename)
+ return
+ }
+ if debug {
+ fmt.Printf("parsing %s\n", filename)
+ }
+ ast, err := ParseFile(filename, nil, nil, 0)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if *verify {
+ verifyPrint(filename, ast)
+ }
+ results <- parseResult{filename, ast.Lines}
+ })
+ }
+ }()
+
+ var count, lines uint
+ for res := range results {
+ count++
+ lines += res.lines
+ if testing.Verbose() {
+ fmt.Printf("%5d %s (%d lines)\n", count, res.filename, res.lines)
+ }
+ }
+
+ dt := time.Since(start)
+ var m2 runtime.MemStats
+ runtime.ReadMemStats(&m2)
+ dm := float64(m2.TotalAlloc-m1.TotalAlloc) / 1e6
+
+ fmt.Printf("parsed %d lines (%d files) in %v (%d lines/s)\n", lines, count, dt, int64(float64(lines)/dt.Seconds()))
+ fmt.Printf("allocated %.3fMb (%.3fMb/s)\n", dm, dm/dt.Seconds())
+}
+
+func walkDirs(t *testing.T, dir string, action func(string)) {
+ fis, err := ioutil.ReadDir(dir)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ var files, dirs []string
+ for _, fi := range fis {
+ if fi.Mode().IsRegular() {
+ if strings.HasSuffix(fi.Name(), ".go") {
+ path := filepath.Join(dir, fi.Name())
+ files = append(files, path)
+ }
+ } else if fi.IsDir() && fi.Name() != "testdata" {
+ path := filepath.Join(dir, fi.Name())
+ if !strings.HasSuffix(path, string(filepath.Separator)+"test") {
+ dirs = append(dirs, path)
+ }
+ }
+ }
+
+ if *fast {
+ var wg sync.WaitGroup
+ wg.Add(len(files))
+ for _, filename := range files {
+ go func(filename string) {
+ defer wg.Done()
+ action(filename)
+ }(filename)
+ }
+ wg.Wait()
+ } else {
+ for _, filename := range files {
+ action(filename)
+ }
+ }
+
+ for _, dir := range dirs {
+ walkDirs(t, dir, action)
+ }
+}
+
+func verifyPrint(filename string, ast1 *File) {
+ var buf1 bytes.Buffer
+ _, err := Fprint(&buf1, ast1, true)
+ if err != nil {
+ panic(err)
+ }
+
+ ast2, err := Parse(NewFileBase(filename), &buf1, nil, nil, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ var buf2 bytes.Buffer
+ _, err = Fprint(&buf2, ast2, true)
+ if err != nil {
+ panic(err)
+ }
+
+ if bytes.Compare(buf1.Bytes(), buf2.Bytes()) != 0 {
+ fmt.Printf("--- %s ---\n", filename)
+ fmt.Printf("%s\n", buf1.Bytes())
+ fmt.Println()
+
+ fmt.Printf("--- %s ---\n", filename)
+ fmt.Printf("%s\n", buf2.Bytes())
+ fmt.Println()
+ panic("not equal")
+ }
+}
+
+func TestIssue17697(t *testing.T) {
+ _, err := Parse(nil, bytes.NewReader(nil), nil, nil, 0) // return with parser error, don't panic
+ if err == nil {
+ t.Errorf("no error reported")
+ }
+}
+
+func TestParseFile(t *testing.T) {
+ _, err := ParseFile("", nil, nil, 0)
+ if err == nil {
+ t.Error("missing io error")
+ }
+
+ var first error
+ _, err = ParseFile("", func(err error) {
+ if first == nil {
+ first = err
+ }
+ }, nil, 0)
+ if err == nil || first == nil {
+ t.Error("missing io error")
+ }
+ if err != first {
+ t.Errorf("got %v; want first error %v", err, first)
+ }
+}
+
+// Make sure (PosMax + 1) doesn't overflow when converted to default
+// type int (when passed as argument to fmt.Sprintf) on 32bit platforms
+// (see test cases below).
+var tooLarge int = PosMax + 1
+
+func TestLineDirectives(t *testing.T) {
+ // valid line directives lead to a syntax error after them
+ const valid = "syntax error: package statement must be first"
+ const filename = "directives.go"
+
+ for _, test := range []struct {
+ src, msg string
+ filename string
+ line, col uint // 1-based; 0 means unknown
+ }{
+ // ignored //line directives
+ {"//\n", valid, filename, 2, 1}, // no directive
+ {"//line\n", valid, filename, 2, 1}, // missing colon
+ {"//line foo\n", valid, filename, 2, 1}, // missing colon
+ {" //line foo:\n", valid, filename, 2, 1}, // not a line start
+ {"// line foo:\n", valid, filename, 2, 1}, // space between // and line
+
+ // invalid //line directives with one colon
+ {"//line :\n", "invalid line number: ", filename, 1, 9},
+ {"//line :x\n", "invalid line number: x", filename, 1, 9},
+ {"//line foo :\n", "invalid line number: ", filename, 1, 13},
+ {"//line foo:x\n", "invalid line number: x", filename, 1, 12},
+ {"//line foo:0\n", "invalid line number: 0", filename, 1, 12},
+ {"//line foo:1 \n", "invalid line number: 1 ", filename, 1, 12},
+ {"//line foo:-12\n", "invalid line number: -12", filename, 1, 12},
+ {"//line C:foo:0\n", "invalid line number: 0", filename, 1, 14},
+ {fmt.Sprintf("//line foo:%d\n", tooLarge), fmt.Sprintf("invalid line number: %d", tooLarge), filename, 1, 12},
+
+ // invalid //line directives with two colons
+ {"//line ::\n", "invalid line number: ", filename, 1, 10},
+ {"//line ::x\n", "invalid line number: x", filename, 1, 10},
+ {"//line foo::123abc\n", "invalid line number: 123abc", filename, 1, 13},
+ {"//line foo::0\n", "invalid line number: 0", filename, 1, 13},
+ {"//line foo:0:1\n", "invalid line number: 0", filename, 1, 12},
+
+ {"//line :123:0\n", "invalid column number: 0", filename, 1, 13},
+ {"//line foo:123:0\n", "invalid column number: 0", filename, 1, 16},
+ {fmt.Sprintf("//line foo:10:%d\n", tooLarge), fmt.Sprintf("invalid column number: %d", tooLarge), filename, 1, 15},
+
+ // effect of valid //line directives on lines
+ {"//line foo:123\n foo", valid, "foo", 123, 0},
+ {"//line foo:123\n foo", valid, " foo", 123, 0},
+ {"//line foo:123\n//line bar:345\nfoo", valid, "bar", 345, 0},
+ {"//line C:foo:123\n", valid, "C:foo", 123, 0},
+ {"//line /src/a/a.go:123\n foo", valid, "/src/a/a.go", 123, 0},
+ {"//line :x:1\n", valid, ":x", 1, 0},
+ {"//line foo ::1\n", valid, "foo :", 1, 0},
+ {"//line foo:123abc:1\n", valid, "foo:123abc", 1, 0},
+ {"//line foo :123:1\n", valid, "foo ", 123, 1},
+ {"//line ::123\n", valid, ":", 123, 0},
+
+ // effect of valid //line directives on columns
+ {"//line :x:1:10\n", valid, ":x", 1, 10},
+ {"//line foo ::1:2\n", valid, "foo :", 1, 2},
+ {"//line foo:123abc:1:1000\n", valid, "foo:123abc", 1, 1000},
+ {"//line foo :123:1000\n\n", valid, "foo ", 124, 1},
+ {"//line ::123:1234\n", valid, ":", 123, 1234},
+
+ // //line directives with omitted filenames lead to empty filenames
+ {"//line :10\n", valid, "", 10, 0},
+ {"//line :10:20\n", valid, filename, 10, 20},
+ {"//line bar:1\n//line :10\n", valid, "", 10, 0},
+ {"//line bar:1\n//line :10:20\n", valid, "bar", 10, 20},
+
+ // ignored /*line directives
+ {"/**/", valid, filename, 1, 5}, // no directive
+ {"/*line*/", valid, filename, 1, 9}, // missing colon
+ {"/*line foo*/", valid, filename, 1, 13}, // missing colon
+ {" //line foo:*/", valid, filename, 1, 16}, // not a line start
+ {"/* line foo:*/", valid, filename, 1, 16}, // space between // and line
+
+ // invalid /*line directives with one colon
+ {"/*line :*/", "invalid line number: ", filename, 1, 9},
+ {"/*line :x*/", "invalid line number: x", filename, 1, 9},
+ {"/*line foo :*/", "invalid line number: ", filename, 1, 13},
+ {"/*line foo:x*/", "invalid line number: x", filename, 1, 12},
+ {"/*line foo:0*/", "invalid line number: 0", filename, 1, 12},
+ {"/*line foo:1 */", "invalid line number: 1 ", filename, 1, 12},
+ {"/*line C:foo:0*/", "invalid line number: 0", filename, 1, 14},
+ {fmt.Sprintf("/*line foo:%d*/", tooLarge), fmt.Sprintf("invalid line number: %d", tooLarge), filename, 1, 12},
+
+ // invalid /*line directives with two colons
+ {"/*line ::*/", "invalid line number: ", filename, 1, 10},
+ {"/*line ::x*/", "invalid line number: x", filename, 1, 10},
+ {"/*line foo::123abc*/", "invalid line number: 123abc", filename, 1, 13},
+ {"/*line foo::0*/", "invalid line number: 0", filename, 1, 13},
+ {"/*line foo:0:1*/", "invalid line number: 0", filename, 1, 12},
+
+ {"/*line :123:0*/", "invalid column number: 0", filename, 1, 13},
+ {"/*line foo:123:0*/", "invalid column number: 0", filename, 1, 16},
+ {fmt.Sprintf("/*line foo:10:%d*/", tooLarge), fmt.Sprintf("invalid column number: %d", tooLarge), filename, 1, 15},
+
+ // effect of valid /*line directives on lines
+ {"/*line foo:123*/ foo", valid, "foo", 123, 0},
+ {"/*line foo:123*/\n//line bar:345\nfoo", valid, "bar", 345, 0},
+ {"/*line C:foo:123*/", valid, "C:foo", 123, 0},
+ {"/*line /src/a/a.go:123*/ foo", valid, "/src/a/a.go", 123, 0},
+ {"/*line :x:1*/", valid, ":x", 1, 0},
+ {"/*line foo ::1*/", valid, "foo :", 1, 0},
+ {"/*line foo:123abc:1*/", valid, "foo:123abc", 1, 0},
+ {"/*line foo :123:10*/", valid, "foo ", 123, 10},
+ {"/*line ::123*/", valid, ":", 123, 0},
+
+ // effect of valid /*line directives on columns
+ {"/*line :x:1:10*/", valid, ":x", 1, 10},
+ {"/*line foo ::1:2*/", valid, "foo :", 1, 2},
+ {"/*line foo:123abc:1:1000*/", valid, "foo:123abc", 1, 1000},
+ {"/*line foo :123:1000*/\n", valid, "foo ", 124, 1},
+ {"/*line ::123:1234*/", valid, ":", 123, 1234},
+
+ // /*line directives with omitted filenames lead to the previously used filenames
+ {"/*line :10*/", valid, "", 10, 0},
+ {"/*line :10:20*/", valid, filename, 10, 20},
+ {"//line bar:1\n/*line :10*/", valid, "", 10, 0},
+ {"//line bar:1\n/*line :10:20*/", valid, "bar", 10, 20},
+ } {
+ base := NewFileBase(filename)
+ _, err := Parse(base, strings.NewReader(test.src), nil, nil, 0)
+ if err == nil {
+ t.Errorf("%s: no error reported", test.src)
+ continue
+ }
+ perr, ok := err.(Error)
+ if !ok {
+ t.Errorf("%s: got %v; want parser error", test.src, err)
+ continue
+ }
+ if msg := perr.Msg; msg != test.msg {
+ t.Errorf("%s: got msg = %q; want %q", test.src, msg, test.msg)
+ }
+
+ pos := perr.Pos
+ if filename := pos.RelFilename(); filename != test.filename {
+ t.Errorf("%s: got filename = %q; want %q", test.src, filename, test.filename)
+ }
+ if line := pos.RelLine(); line != test.line {
+ t.Errorf("%s: got line = %d; want %d", test.src, line, test.line)
+ }
+ if col := pos.RelCol(); col != test.col {
+ t.Errorf("%s: got col = %d; want %d", test.src, col, test.col)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/pos.go b/src/cmd/compile/internal/syntax/pos.go
new file mode 100644
index 0000000..c683c7f
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/pos.go
@@ -0,0 +1,156 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import "fmt"
+
+// PosMax is the largest line or column value that can be represented without loss.
+// Incoming values (arguments) larger than PosMax will be set to PosMax.
+const PosMax = 1 << 30
+
+// A Pos represents an absolute (line, col) source position
+// with a reference to position base for computing relative
+// (to a file, or line directive) position information.
+// Pos values are intentionally light-weight so that they
+// can be created without too much concern about space use.
+type Pos struct {
+ base *PosBase
+ line, col uint32
+}
+
+// MakePos returns a new Pos for the given PosBase, line and column.
+func MakePos(base *PosBase, line, col uint) Pos { return Pos{base, sat32(line), sat32(col)} }
+
+// TODO(gri) IsKnown makes an assumption about linebase < 1.
+// Maybe we should check for Base() != nil instead.
+
+func (pos Pos) IsKnown() bool { return pos.line > 0 }
+func (pos Pos) Base() *PosBase { return pos.base }
+func (pos Pos) Line() uint { return uint(pos.line) }
+func (pos Pos) Col() uint { return uint(pos.col) }
+
+func (pos Pos) RelFilename() string { return pos.base.Filename() }
+
+func (pos Pos) RelLine() uint {
+ b := pos.base
+ if b.Line() == 0 {
+ // base line is unknown => relative line is unknown
+ return 0
+ }
+ return b.Line() + (pos.Line() - b.Pos().Line())
+}
+
+func (pos Pos) RelCol() uint {
+ b := pos.base
+ if b.Col() == 0 {
+ // base column is unknown => relative column is unknown
+ // (the current specification for line directives requires
+ // this to apply until the next PosBase/line directive,
+ // not just until the new newline)
+ return 0
+ }
+ if pos.Line() == b.Pos().Line() {
+ // pos on same line as pos base => column is relative to pos base
+ return b.Col() + (pos.Col() - b.Pos().Col())
+ }
+ return pos.Col()
+}
+
+func (pos Pos) String() string {
+ rel := position_{pos.RelFilename(), pos.RelLine(), pos.RelCol()}
+ abs := position_{pos.Base().Pos().RelFilename(), pos.Line(), pos.Col()}
+ s := rel.String()
+ if rel != abs {
+ s += "[" + abs.String() + "]"
+ }
+ return s
+}
+
+// TODO(gri) cleanup: find better name, avoid conflict with position in error_test.go
+type position_ struct {
+ filename string
+ line, col uint
+}
+
+func (p position_) String() string {
+ if p.line == 0 {
+ if p.filename == "" {
+ return "<unknown position>"
+ }
+ return p.filename
+ }
+ if p.col == 0 {
+ return fmt.Sprintf("%s:%d", p.filename, p.line)
+ }
+ return fmt.Sprintf("%s:%d:%d", p.filename, p.line, p.col)
+}
+
+// A PosBase represents the base for relative position information:
+// At position pos, the relative position is filename:line:col.
+type PosBase struct {
+ pos Pos
+ filename string
+ line, col uint32
+}
+
+// NewFileBase returns a new PosBase for the given filename.
+// A file PosBase's position is relative to itself, with the
+// position being filename:1:1.
+func NewFileBase(filename string) *PosBase {
+ base := &PosBase{MakePos(nil, linebase, colbase), filename, linebase, colbase}
+ base.pos.base = base
+ return base
+}
+
+// NewLineBase returns a new PosBase for a line directive "line filename:line:col"
+// relative to pos, which is the position of the character immediately following
+// the comment containing the line directive. For a directive in a line comment,
+// that position is the beginning of the next line (i.e., the newline character
+// belongs to the line comment).
+func NewLineBase(pos Pos, filename string, line, col uint) *PosBase {
+ return &PosBase{pos, filename, sat32(line), sat32(col)}
+}
+
+func (base *PosBase) IsFileBase() bool {
+ if base == nil {
+ return false
+ }
+ return base.pos.base == base
+}
+
+func (base *PosBase) Pos() (_ Pos) {
+ if base == nil {
+ return
+ }
+ return base.pos
+}
+
+func (base *PosBase) Filename() string {
+ if base == nil {
+ return ""
+ }
+ return base.filename
+}
+
+func (base *PosBase) Line() uint {
+ if base == nil {
+ return 0
+ }
+ return uint(base.line)
+}
+
+func (base *PosBase) Col() uint {
+ if base == nil {
+ return 0
+ }
+ return uint(base.col)
+}
+
+func sat32(x uint) uint32 {
+ if x > PosMax {
+ return PosMax
+ }
+ return uint32(x)
+}
diff --git a/src/cmd/compile/internal/syntax/printer.go b/src/cmd/compile/internal/syntax/printer.go
new file mode 100644
index 0000000..8ff3bfa
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/printer.go
@@ -0,0 +1,938 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements printing of syntax trees in source format.
+
+package syntax
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// TODO(gri) Consider removing the linebreaks flag from this signature.
+// Its likely rarely used in common cases.
+
+func Fprint(w io.Writer, x Node, linebreaks bool) (n int, err error) {
+ p := printer{
+ output: w,
+ linebreaks: linebreaks,
+ }
+
+ defer func() {
+ n = p.written
+ if e := recover(); e != nil {
+ err = e.(localError).err // re-panics if it's not a localError
+ }
+ }()
+
+ p.print(x)
+ p.flush(_EOF)
+
+ return
+}
+
+func String(n Node) string {
+ var buf bytes.Buffer
+ _, err := Fprint(&buf, n, false)
+ if err != nil {
+ panic(err) // TODO(gri) print something sensible into buf instead
+ }
+ return buf.String()
+}
+
+type ctrlSymbol int
+
+const (
+ none ctrlSymbol = iota
+ semi
+ blank
+ newline
+ indent
+ outdent
+ // comment
+ // eolComment
+)
+
+type whitespace struct {
+ last token
+ kind ctrlSymbol
+ //text string // comment text (possibly ""); valid if kind == comment
+}
+
+type printer struct {
+ output io.Writer
+ written int // number of bytes written
+ linebreaks bool // print linebreaks instead of semis
+
+ indent int // current indentation level
+ nlcount int // number of consecutive newlines
+
+ pending []whitespace // pending whitespace
+ lastTok token // last token (after any pending semi) processed by print
+}
+
+// write is a thin wrapper around p.output.Write
+// that takes care of accounting and error handling.
+func (p *printer) write(data []byte) {
+ n, err := p.output.Write(data)
+ p.written += n
+ if err != nil {
+ panic(localError{err})
+ }
+}
+
+var (
+ tabBytes = []byte("\t\t\t\t\t\t\t\t")
+ newlineByte = []byte("\n")
+ blankByte = []byte(" ")
+)
+
+func (p *printer) writeBytes(data []byte) {
+ if len(data) == 0 {
+ panic("expected non-empty []byte")
+ }
+ if p.nlcount > 0 && p.indent > 0 {
+ // write indentation
+ n := p.indent
+ for n > len(tabBytes) {
+ p.write(tabBytes)
+ n -= len(tabBytes)
+ }
+ p.write(tabBytes[:n])
+ }
+ p.write(data)
+ p.nlcount = 0
+}
+
+func (p *printer) writeString(s string) {
+ p.writeBytes([]byte(s))
+}
+
+// If impliesSemi returns true for a non-blank line's final token tok,
+// a semicolon is automatically inserted. Vice versa, a semicolon may
+// be omitted in those cases.
+func impliesSemi(tok token) bool {
+ switch tok {
+ case _Name,
+ _Break, _Continue, _Fallthrough, _Return,
+ /*_Inc, _Dec,*/ _Rparen, _Rbrack, _Rbrace: // TODO(gri) fix this
+ return true
+ }
+ return false
+}
+
+// TODO(gri) provide table of []byte values for all tokens to avoid repeated string conversion
+
+func lineComment(text string) bool {
+ return strings.HasPrefix(text, "//")
+}
+
+func (p *printer) addWhitespace(kind ctrlSymbol, text string) {
+ p.pending = append(p.pending, whitespace{p.lastTok, kind /*text*/})
+ switch kind {
+ case semi:
+ p.lastTok = _Semi
+ case newline:
+ p.lastTok = 0
+ // TODO(gri) do we need to handle /*-style comments containing newlines here?
+ }
+}
+
+func (p *printer) flush(next token) {
+ // eliminate semis and redundant whitespace
+ sawNewline := next == _EOF
+ sawParen := next == _Rparen || next == _Rbrace
+ for i := len(p.pending) - 1; i >= 0; i-- {
+ switch p.pending[i].kind {
+ case semi:
+ k := semi
+ if sawParen {
+ sawParen = false
+ k = none // eliminate semi
+ } else if sawNewline && impliesSemi(p.pending[i].last) {
+ sawNewline = false
+ k = none // eliminate semi
+ }
+ p.pending[i].kind = k
+ case newline:
+ sawNewline = true
+ case blank, indent, outdent:
+ // nothing to do
+ // case comment:
+ // // A multi-line comment acts like a newline; and a ""
+ // // comment implies by definition at least one newline.
+ // if text := p.pending[i].text; strings.HasPrefix(text, "/*") && strings.ContainsRune(text, '\n') {
+ // sawNewline = true
+ // }
+ // case eolComment:
+ // // TODO(gri) act depending on sawNewline
+ default:
+ panic("unreachable")
+ }
+ }
+
+ // print pending
+ prev := none
+ for i := range p.pending {
+ switch p.pending[i].kind {
+ case none:
+ // nothing to do
+ case semi:
+ p.writeString(";")
+ p.nlcount = 0
+ prev = semi
+ case blank:
+ if prev != blank {
+ // at most one blank
+ p.writeBytes(blankByte)
+ p.nlcount = 0
+ prev = blank
+ }
+ case newline:
+ const maxEmptyLines = 1
+ if p.nlcount <= maxEmptyLines {
+ p.write(newlineByte)
+ p.nlcount++
+ prev = newline
+ }
+ case indent:
+ p.indent++
+ case outdent:
+ p.indent--
+ if p.indent < 0 {
+ panic("negative indentation")
+ }
+ // case comment:
+ // if text := p.pending[i].text; text != "" {
+ // p.writeString(text)
+ // p.nlcount = 0
+ // prev = comment
+ // }
+ // // TODO(gri) should check that line comments are always followed by newline
+ default:
+ panic("unreachable")
+ }
+ }
+
+ p.pending = p.pending[:0] // re-use underlying array
+}
+
+func mayCombine(prev token, next byte) (b bool) {
+ return // for now
+ // switch prev {
+ // case lexical.Int:
+ // b = next == '.' // 1.
+ // case lexical.Add:
+ // b = next == '+' // ++
+ // case lexical.Sub:
+ // b = next == '-' // --
+ // case lexical.Quo:
+ // b = next == '*' // /*
+ // case lexical.Lss:
+ // b = next == '-' || next == '<' // <- or <<
+ // case lexical.And:
+ // b = next == '&' || next == '^' // && or &^
+ // }
+ // return
+}
+
+func (p *printer) print(args ...interface{}) {
+ for i := 0; i < len(args); i++ {
+ switch x := args[i].(type) {
+ case nil:
+ // we should not reach here but don't crash
+
+ case Node:
+ p.printNode(x)
+
+ case token:
+ // _Name implies an immediately following string
+ // argument which is the actual value to print.
+ var s string
+ if x == _Name {
+ i++
+ if i >= len(args) {
+ panic("missing string argument after _Name")
+ }
+ s = args[i].(string)
+ } else {
+ s = x.String()
+ }
+
+ // TODO(gri) This check seems at the wrong place since it doesn't
+ // take into account pending white space.
+ if mayCombine(p.lastTok, s[0]) {
+ panic("adjacent tokens combine without whitespace")
+ }
+
+ if x == _Semi {
+ // delay printing of semi
+ p.addWhitespace(semi, "")
+ } else {
+ p.flush(x)
+ p.writeString(s)
+ p.nlcount = 0
+ p.lastTok = x
+ }
+
+ case Operator:
+ if x != 0 {
+ p.flush(_Operator)
+ p.writeString(x.String())
+ }
+
+ case ctrlSymbol:
+ switch x {
+ case none, semi /*, comment*/ :
+ panic("unreachable")
+ case newline:
+ // TODO(gri) need to handle mandatory newlines after a //-style comment
+ if !p.linebreaks {
+ x = blank
+ }
+ }
+ p.addWhitespace(x, "")
+
+ // case *Comment: // comments are not Nodes
+ // p.addWhitespace(comment, x.Text)
+
+ default:
+ panic(fmt.Sprintf("unexpected argument %v (%T)", x, x))
+ }
+ }
+}
+
+func (p *printer) printNode(n Node) {
+ // ncom := *n.Comments()
+ // if ncom != nil {
+ // // TODO(gri) in general we cannot make assumptions about whether
+ // // a comment is a /*- or a //-style comment since the syntax
+ // // tree may have been manipulated. Need to make sure the correct
+ // // whitespace is emitted.
+ // for _, c := range ncom.Alone {
+ // p.print(c, newline)
+ // }
+ // for _, c := range ncom.Before {
+ // if c.Text == "" || lineComment(c.Text) {
+ // panic("unexpected empty line or //-style 'before' comment")
+ // }
+ // p.print(c, blank)
+ // }
+ // }
+
+ p.printRawNode(n)
+
+ // if ncom != nil && len(ncom.After) > 0 {
+ // for i, c := range ncom.After {
+ // if i+1 < len(ncom.After) {
+ // if c.Text == "" || lineComment(c.Text) {
+ // panic("unexpected empty line or //-style non-final 'after' comment")
+ // }
+ // }
+ // p.print(blank, c)
+ // }
+ // //p.print(newline)
+ // }
+}
+
+func (p *printer) printRawNode(n Node) {
+ switch n := n.(type) {
+ case nil:
+ // we should not reach here but don't crash
+
+ // expressions and types
+ case *BadExpr:
+ p.print(_Name, "<bad expr>")
+
+ case *Name:
+ p.print(_Name, n.Value) // _Name requires actual value following immediately
+
+ case *BasicLit:
+ p.print(_Name, n.Value) // _Name requires actual value following immediately
+
+ case *FuncLit:
+ p.print(n.Type, blank, n.Body)
+
+ case *CompositeLit:
+ if n.Type != nil {
+ p.print(n.Type)
+ }
+ p.print(_Lbrace)
+ if n.NKeys > 0 && n.NKeys == len(n.ElemList) {
+ p.printExprLines(n.ElemList)
+ } else {
+ p.printExprList(n.ElemList)
+ }
+ p.print(_Rbrace)
+
+ case *ParenExpr:
+ p.print(_Lparen, n.X, _Rparen)
+
+ case *SelectorExpr:
+ p.print(n.X, _Dot, n.Sel)
+
+ case *IndexExpr:
+ p.print(n.X, _Lbrack, n.Index, _Rbrack)
+
+ case *SliceExpr:
+ p.print(n.X, _Lbrack)
+ if i := n.Index[0]; i != nil {
+ p.printNode(i)
+ }
+ p.print(_Colon)
+ if j := n.Index[1]; j != nil {
+ p.printNode(j)
+ }
+ if k := n.Index[2]; k != nil {
+ p.print(_Colon, k)
+ }
+ p.print(_Rbrack)
+
+ case *AssertExpr:
+ p.print(n.X, _Dot, _Lparen, n.Type, _Rparen)
+
+ case *TypeSwitchGuard:
+ if n.Lhs != nil {
+ p.print(n.Lhs, blank, _Define, blank)
+ }
+ p.print(n.X, _Dot, _Lparen, _Type, _Rparen)
+
+ case *CallExpr:
+ p.print(n.Fun, _Lparen)
+ p.printExprList(n.ArgList)
+ if n.HasDots {
+ p.print(_DotDotDot)
+ }
+ p.print(_Rparen)
+
+ case *Operation:
+ if n.Y == nil {
+ // unary expr
+ p.print(n.Op)
+ // if n.Op == lexical.Range {
+ // p.print(blank)
+ // }
+ p.print(n.X)
+ } else {
+ // binary expr
+ // TODO(gri) eventually take precedence into account
+ // to control possibly missing parentheses
+ p.print(n.X, blank, n.Op, blank, n.Y)
+ }
+
+ case *KeyValueExpr:
+ p.print(n.Key, _Colon, blank, n.Value)
+
+ case *ListExpr:
+ p.printExprList(n.ElemList)
+
+ case *ArrayType:
+ var len interface{} = _DotDotDot
+ if n.Len != nil {
+ len = n.Len
+ }
+ p.print(_Lbrack, len, _Rbrack, n.Elem)
+
+ case *SliceType:
+ p.print(_Lbrack, _Rbrack, n.Elem)
+
+ case *DotsType:
+ p.print(_DotDotDot, n.Elem)
+
+ case *StructType:
+ p.print(_Struct)
+ if len(n.FieldList) > 0 && p.linebreaks {
+ p.print(blank)
+ }
+ p.print(_Lbrace)
+ if len(n.FieldList) > 0 {
+ p.print(newline, indent)
+ p.printFieldList(n.FieldList, n.TagList)
+ p.print(outdent, newline)
+ }
+ p.print(_Rbrace)
+
+ case *FuncType:
+ p.print(_Func)
+ p.printSignature(n)
+
+ case *InterfaceType:
+ p.print(_Interface)
+ if len(n.MethodList) > 0 && p.linebreaks {
+ p.print(blank)
+ }
+ p.print(_Lbrace)
+ if len(n.MethodList) > 0 {
+ p.print(newline, indent)
+ p.printMethodList(n.MethodList)
+ p.print(outdent, newline)
+ }
+ p.print(_Rbrace)
+
+ case *MapType:
+ p.print(_Map, _Lbrack, n.Key, _Rbrack, n.Value)
+
+ case *ChanType:
+ if n.Dir == RecvOnly {
+ p.print(_Arrow)
+ }
+ p.print(_Chan)
+ if n.Dir == SendOnly {
+ p.print(_Arrow)
+ }
+ p.print(blank, n.Elem)
+
+ // statements
+ case *DeclStmt:
+ p.printDecl(n.DeclList)
+
+ case *EmptyStmt:
+ // nothing to print
+
+ case *LabeledStmt:
+ p.print(outdent, n.Label, _Colon, indent, newline, n.Stmt)
+
+ case *ExprStmt:
+ p.print(n.X)
+
+ case *SendStmt:
+ p.print(n.Chan, blank, _Arrow, blank, n.Value)
+
+ case *AssignStmt:
+ p.print(n.Lhs)
+ if n.Rhs == ImplicitOne {
+ // TODO(gri) This is going to break the mayCombine
+ // check once we enable that again.
+ p.print(n.Op, n.Op) // ++ or --
+ } else {
+ p.print(blank, n.Op, _Assign, blank)
+ p.print(n.Rhs)
+ }
+
+ case *CallStmt:
+ p.print(n.Tok, blank, n.Call)
+
+ case *ReturnStmt:
+ p.print(_Return)
+ if n.Results != nil {
+ p.print(blank, n.Results)
+ }
+
+ case *BranchStmt:
+ p.print(n.Tok)
+ if n.Label != nil {
+ p.print(blank, n.Label)
+ }
+
+ case *BlockStmt:
+ p.print(_Lbrace)
+ if len(n.List) > 0 {
+ p.print(newline, indent)
+ p.printStmtList(n.List, true)
+ p.print(outdent, newline)
+ }
+ p.print(_Rbrace)
+
+ case *IfStmt:
+ p.print(_If, blank)
+ if n.Init != nil {
+ p.print(n.Init, _Semi, blank)
+ }
+ p.print(n.Cond, blank, n.Then)
+ if n.Else != nil {
+ p.print(blank, _Else, blank, n.Else)
+ }
+
+ case *SwitchStmt:
+ p.print(_Switch, blank)
+ if n.Init != nil {
+ p.print(n.Init, _Semi, blank)
+ }
+ if n.Tag != nil {
+ p.print(n.Tag, blank)
+ }
+ p.printSwitchBody(n.Body)
+
+ case *SelectStmt:
+ p.print(_Select, blank) // for now
+ p.printSelectBody(n.Body)
+
+ case *RangeClause:
+ if n.Lhs != nil {
+ tok := _Assign
+ if n.Def {
+ tok = _Define
+ }
+ p.print(n.Lhs, blank, tok, blank)
+ }
+ p.print(_Range, blank, n.X)
+
+ case *ForStmt:
+ p.print(_For, blank)
+ if n.Init == nil && n.Post == nil {
+ if n.Cond != nil {
+ p.print(n.Cond, blank)
+ }
+ } else {
+ if n.Init != nil {
+ p.print(n.Init)
+ // TODO(gri) clean this up
+ if _, ok := n.Init.(*RangeClause); ok {
+ p.print(blank, n.Body)
+ break
+ }
+ }
+ p.print(_Semi, blank)
+ if n.Cond != nil {
+ p.print(n.Cond)
+ }
+ p.print(_Semi, blank)
+ if n.Post != nil {
+ p.print(n.Post, blank)
+ }
+ }
+ p.print(n.Body)
+
+ case *ImportDecl:
+ if n.Group == nil {
+ p.print(_Import, blank)
+ }
+ if n.LocalPkgName != nil {
+ p.print(n.LocalPkgName, blank)
+ }
+ p.print(n.Path)
+
+ case *ConstDecl:
+ if n.Group == nil {
+ p.print(_Const, blank)
+ }
+ p.printNameList(n.NameList)
+ if n.Type != nil {
+ p.print(blank, n.Type)
+ }
+ if n.Values != nil {
+ p.print(blank, _Assign, blank, n.Values)
+ }
+
+ case *TypeDecl:
+ if n.Group == nil {
+ p.print(_Type, blank)
+ }
+ p.print(n.Name, blank)
+ if n.Alias {
+ p.print(_Assign, blank)
+ }
+ p.print(n.Type)
+
+ case *VarDecl:
+ if n.Group == nil {
+ p.print(_Var, blank)
+ }
+ p.printNameList(n.NameList)
+ if n.Type != nil {
+ p.print(blank, n.Type)
+ }
+ if n.Values != nil {
+ p.print(blank, _Assign, blank, n.Values)
+ }
+
+ case *FuncDecl:
+ p.print(_Func, blank)
+ if r := n.Recv; r != nil {
+ p.print(_Lparen)
+ if r.Name != nil {
+ p.print(r.Name, blank)
+ }
+ p.printNode(r.Type)
+ p.print(_Rparen, blank)
+ }
+ p.print(n.Name)
+ p.printSignature(n.Type)
+ if n.Body != nil {
+ p.print(blank, n.Body)
+ }
+
+ case *printGroup:
+ p.print(n.Tok, blank, _Lparen)
+ if len(n.Decls) > 0 {
+ p.print(newline, indent)
+ for _, d := range n.Decls {
+ p.printNode(d)
+ p.print(_Semi, newline)
+ }
+ p.print(outdent)
+ }
+ p.print(_Rparen)
+
+ // files
+ case *File:
+ p.print(_Package, blank, n.PkgName)
+ if len(n.DeclList) > 0 {
+ p.print(_Semi, newline, newline)
+ p.printDeclList(n.DeclList)
+ }
+
+ default:
+ panic(fmt.Sprintf("syntax.Iterate: unexpected node type %T", n))
+ }
+}
+
+func (p *printer) printFields(fields []*Field, tags []*BasicLit, i, j int) {
+ if i+1 == j && fields[i].Name == nil {
+ // anonymous field
+ p.printNode(fields[i].Type)
+ } else {
+ for k, f := range fields[i:j] {
+ if k > 0 {
+ p.print(_Comma, blank)
+ }
+ p.printNode(f.Name)
+ }
+ p.print(blank)
+ p.printNode(fields[i].Type)
+ }
+ if i < len(tags) && tags[i] != nil {
+ p.print(blank)
+ p.printNode(tags[i])
+ }
+}
+
+func (p *printer) printFieldList(fields []*Field, tags []*BasicLit) {
+ i0 := 0
+ var typ Expr
+ for i, f := range fields {
+ if f.Name == nil || f.Type != typ {
+ if i0 < i {
+ p.printFields(fields, tags, i0, i)
+ p.print(_Semi, newline)
+ i0 = i
+ }
+ typ = f.Type
+ }
+ }
+ p.printFields(fields, tags, i0, len(fields))
+}
+
+func (p *printer) printMethodList(methods []*Field) {
+ for i, m := range methods {
+ if i > 0 {
+ p.print(_Semi, newline)
+ }
+ if m.Name != nil {
+ p.printNode(m.Name)
+ p.printSignature(m.Type.(*FuncType))
+ } else {
+ p.printNode(m.Type)
+ }
+ }
+}
+
+func (p *printer) printNameList(list []*Name) {
+ for i, x := range list {
+ if i > 0 {
+ p.print(_Comma, blank)
+ }
+ p.printNode(x)
+ }
+}
+
+func (p *printer) printExprList(list []Expr) {
+ for i, x := range list {
+ if i > 0 {
+ p.print(_Comma, blank)
+ }
+ p.printNode(x)
+ }
+}
+
+func (p *printer) printExprLines(list []Expr) {
+ if len(list) > 0 {
+ p.print(newline, indent)
+ for _, x := range list {
+ p.print(x, _Comma, newline)
+ }
+ p.print(outdent)
+ }
+}
+
+func groupFor(d Decl) (token, *Group) {
+ switch d := d.(type) {
+ case *ImportDecl:
+ return _Import, d.Group
+ case *ConstDecl:
+ return _Const, d.Group
+ case *TypeDecl:
+ return _Type, d.Group
+ case *VarDecl:
+ return _Var, d.Group
+ case *FuncDecl:
+ return _Func, nil
+ default:
+ panic("unreachable")
+ }
+}
+
+type printGroup struct {
+ node
+ Tok token
+ Decls []Decl
+}
+
+func (p *printer) printDecl(list []Decl) {
+ tok, group := groupFor(list[0])
+
+ if group == nil {
+ if len(list) != 1 {
+ panic("unreachable")
+ }
+ p.printNode(list[0])
+ return
+ }
+
+ // if _, ok := list[0].(*EmptyDecl); ok {
+ // if len(list) != 1 {
+ // panic("unreachable")
+ // }
+ // // TODO(gri) if there are comments inside the empty
+ // // group, we may need to keep the list non-nil
+ // list = nil
+ // }
+
+ // printGroup is here for consistent comment handling
+ // (this is not yet used)
+ var pg printGroup
+ // *pg.Comments() = *group.Comments()
+ pg.Tok = tok
+ pg.Decls = list
+ p.printNode(&pg)
+}
+
+func (p *printer) printDeclList(list []Decl) {
+ i0 := 0
+ var tok token
+ var group *Group
+ for i, x := range list {
+ if s, g := groupFor(x); g == nil || g != group {
+ if i0 < i {
+ p.printDecl(list[i0:i])
+ p.print(_Semi, newline)
+ // print empty line between different declaration groups,
+ // different kinds of declarations, or between functions
+ if g != group || s != tok || s == _Func {
+ p.print(newline)
+ }
+ i0 = i
+ }
+ tok, group = s, g
+ }
+ }
+ p.printDecl(list[i0:])
+}
+
+func (p *printer) printSignature(sig *FuncType) {
+ p.printParameterList(sig.ParamList)
+ if list := sig.ResultList; list != nil {
+ p.print(blank)
+ if len(list) == 1 && list[0].Name == nil {
+ p.printNode(list[0].Type)
+ } else {
+ p.printParameterList(list)
+ }
+ }
+}
+
+func (p *printer) printParameterList(list []*Field) {
+ p.print(_Lparen)
+ if len(list) > 0 {
+ for i, f := range list {
+ if i > 0 {
+ p.print(_Comma, blank)
+ }
+ if f.Name != nil {
+ p.printNode(f.Name)
+ if i+1 < len(list) {
+ f1 := list[i+1]
+ if f1.Name != nil && f1.Type == f.Type {
+ continue // no need to print type
+ }
+ }
+ p.print(blank)
+ }
+ p.printNode(f.Type)
+ }
+ }
+ p.print(_Rparen)
+}
+
+func (p *printer) printStmtList(list []Stmt, braces bool) {
+ for i, x := range list {
+ p.print(x, _Semi)
+ if i+1 < len(list) {
+ p.print(newline)
+ } else if braces {
+ // Print an extra semicolon if the last statement is
+ // an empty statement and we are in a braced block
+ // because one semicolon is automatically removed.
+ if _, ok := x.(*EmptyStmt); ok {
+ p.print(x, _Semi)
+ }
+ }
+ }
+}
+
+func (p *printer) printSwitchBody(list []*CaseClause) {
+ p.print(_Lbrace)
+ if len(list) > 0 {
+ p.print(newline)
+ for i, c := range list {
+ p.printCaseClause(c, i+1 == len(list))
+ p.print(newline)
+ }
+ }
+ p.print(_Rbrace)
+}
+
+func (p *printer) printSelectBody(list []*CommClause) {
+ p.print(_Lbrace)
+ if len(list) > 0 {
+ p.print(newline)
+ for i, c := range list {
+ p.printCommClause(c, i+1 == len(list))
+ p.print(newline)
+ }
+ }
+ p.print(_Rbrace)
+}
+
+func (p *printer) printCaseClause(c *CaseClause, braces bool) {
+ if c.Cases != nil {
+ p.print(_Case, blank, c.Cases)
+ } else {
+ p.print(_Default)
+ }
+ p.print(_Colon)
+ if len(c.Body) > 0 {
+ p.print(newline, indent)
+ p.printStmtList(c.Body, braces)
+ p.print(outdent)
+ }
+}
+
+func (p *printer) printCommClause(c *CommClause, braces bool) {
+ if c.Comm != nil {
+ p.print(_Case, blank)
+ p.print(c.Comm)
+ } else {
+ p.print(_Default)
+ }
+ p.print(_Colon)
+ if len(c.Body) > 0 {
+ p.print(newline, indent)
+ p.printStmtList(c.Body, braces)
+ p.print(outdent)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/printer_test.go b/src/cmd/compile/internal/syntax/printer_test.go
new file mode 100644
index 0000000..c3b9aca
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/printer_test.go
@@ -0,0 +1,55 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+)
+
+func TestPrint(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
+ // provide a dummy error handler so parsing doesn't stop after first error
+ ast, err := ParseFile(*src_, func(error) {}, nil, 0)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if ast != nil {
+ Fprint(testOut(), ast, true)
+ fmt.Println()
+ }
+}
+
+func TestPrintString(t *testing.T) {
+ for _, want := range []string{
+ "package p",
+ "package p; type _ = int; type T1 = struct{}; type ( _ = *struct{}; T2 = float32 )",
+ // TODO(gri) expand
+ } {
+ ast, err := Parse(nil, strings.NewReader(want), nil, nil, 0)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if got := String(ast); got != want {
+ t.Errorf("%q: got %q", want, got)
+ }
+ }
+}
+
+func testOut() io.Writer {
+ if testing.Verbose() {
+ return os.Stdout
+ }
+ return ioutil.Discard
+}
diff --git a/src/cmd/compile/internal/syntax/scanner.go b/src/cmd/compile/internal/syntax/scanner.go
new file mode 100644
index 0000000..9fe4965
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/scanner.go
@@ -0,0 +1,876 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements scanner, a lexical tokenizer for
+// Go source. After initialization, consecutive calls of
+// next advance the scanner one token at a time.
+//
+// This file, source.go, tokens.go, and token_string.go are self-contained
+// (`go tool compile scanner.go source.go tokens.go token_string.go` compiles)
+// and thus could be made into their own package.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "unicode"
+ "unicode/utf8"
+)
+
+// The mode flags below control which comments are reported
+// by calling the error handler. If no flag is set, comments
+// are ignored.
+const (
+ comments uint = 1 << iota // call handler for all comments
+ directives // call handler for directives only
+)
+
+type scanner struct {
+ source
+ mode uint
+ nlsemi bool // if set '\n' and EOF translate to ';'
+
+ // current token, valid after calling next()
+ line, col uint
+ blank bool // line is blank up to col
+ tok token
+ lit string // valid if tok is _Name, _Literal, or _Semi ("semicolon", "newline", or "EOF"); may be malformed if bad is true
+ bad bool // valid if tok is _Literal, true if a syntax error occurred, lit may be malformed
+ kind LitKind // valid if tok is _Literal
+ op Operator // valid if tok is _Operator, _AssignOp, or _IncOp
+ prec int // valid if tok is _Operator, _AssignOp, or _IncOp
+}
+
+func (s *scanner) init(src io.Reader, errh func(line, col uint, msg string), mode uint) {
+ s.source.init(src, errh)
+ s.mode = mode
+ s.nlsemi = false
+}
+
+// errorf reports an error at the most recently read character position.
+func (s *scanner) errorf(format string, args ...interface{}) {
+ s.error(fmt.Sprintf(format, args...))
+}
+
+// errorAtf reports an error at a byte column offset relative to the current token start.
+func (s *scanner) errorAtf(offset int, format string, args ...interface{}) {
+ s.errh(s.line, s.col+uint(offset), fmt.Sprintf(format, args...))
+}
+
+// setLit sets the scanner state for a recognized _Literal token.
+func (s *scanner) setLit(kind LitKind, ok bool) {
+ s.nlsemi = true
+ s.tok = _Literal
+ s.lit = string(s.segment())
+ s.bad = !ok
+ s.kind = kind
+}
+
+// next advances the scanner by reading the next token.
+//
+// If a read, source encoding, or lexical error occurs, next calls
+// the installed error handler with the respective error position
+// and message. The error message is guaranteed to be non-empty and
+// never starts with a '/'. The error handler must exist.
+//
+// If the scanner mode includes the comments flag and a comment
+// (including comments containing directives) is encountered, the
+// error handler is also called with each comment position and text
+// (including opening /* or // and closing */, but without a newline
+// at the end of line comments). Comment text always starts with a /
+// which can be used to distinguish these handler calls from errors.
+//
+// If the scanner mode includes the directives (but not the comments)
+// flag, only comments containing a //line, /*line, or //go: directive
+// are reported, in the same way as regular comments.
+func (s *scanner) next() {
+ nlsemi := s.nlsemi
+ s.nlsemi = false
+
+redo:
+ // skip white space
+ s.stop()
+ startLine, startCol := s.pos()
+ for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !nlsemi || s.ch == '\r' {
+ s.nextch()
+ }
+
+ // token start
+ s.line, s.col = s.pos()
+ s.blank = s.line > startLine || startCol == colbase
+ s.start()
+ if isLetter(s.ch) || s.ch >= utf8.RuneSelf && s.atIdentChar(true) {
+ s.nextch()
+ s.ident()
+ return
+ }
+
+ switch s.ch {
+ case -1:
+ if nlsemi {
+ s.lit = "EOF"
+ s.tok = _Semi
+ break
+ }
+ s.tok = _EOF
+
+ case '\n':
+ s.nextch()
+ s.lit = "newline"
+ s.tok = _Semi
+
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ s.number(false)
+
+ case '"':
+ s.stdString()
+
+ case '`':
+ s.rawString()
+
+ case '\'':
+ s.rune()
+
+ case '(':
+ s.nextch()
+ s.tok = _Lparen
+
+ case '[':
+ s.nextch()
+ s.tok = _Lbrack
+
+ case '{':
+ s.nextch()
+ s.tok = _Lbrace
+
+ case ',':
+ s.nextch()
+ s.tok = _Comma
+
+ case ';':
+ s.nextch()
+ s.lit = "semicolon"
+ s.tok = _Semi
+
+ case ')':
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _Rparen
+
+ case ']':
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _Rbrack
+
+ case '}':
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _Rbrace
+
+ case ':':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.tok = _Define
+ break
+ }
+ s.tok = _Colon
+
+ case '.':
+ s.nextch()
+ if isDecimal(s.ch) {
+ s.number(true)
+ break
+ }
+ if s.ch == '.' {
+ s.nextch()
+ if s.ch == '.' {
+ s.nextch()
+ s.tok = _DotDotDot
+ break
+ }
+ s.rewind() // now s.ch holds 1st '.'
+ s.nextch() // consume 1st '.' again
+ }
+ s.tok = _Dot
+
+ case '+':
+ s.nextch()
+ s.op, s.prec = Add, precAdd
+ if s.ch != '+' {
+ goto assignop
+ }
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _IncOp
+
+ case '-':
+ s.nextch()
+ s.op, s.prec = Sub, precAdd
+ if s.ch != '-' {
+ goto assignop
+ }
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _IncOp
+
+ case '*':
+ s.nextch()
+ s.op, s.prec = Mul, precMul
+ // don't goto assignop - want _Star token
+ if s.ch == '=' {
+ s.nextch()
+ s.tok = _AssignOp
+ break
+ }
+ s.tok = _Star
+
+ case '/':
+ s.nextch()
+ if s.ch == '/' {
+ s.nextch()
+ s.lineComment()
+ goto redo
+ }
+ if s.ch == '*' {
+ s.nextch()
+ s.fullComment()
+ if line, _ := s.pos(); line > s.line && nlsemi {
+ // A multi-line comment acts like a newline;
+ // it translates to a ';' if nlsemi is set.
+ s.lit = "newline"
+ s.tok = _Semi
+ break
+ }
+ goto redo
+ }
+ s.op, s.prec = Div, precMul
+ goto assignop
+
+ case '%':
+ s.nextch()
+ s.op, s.prec = Rem, precMul
+ goto assignop
+
+ case '&':
+ s.nextch()
+ if s.ch == '&' {
+ s.nextch()
+ s.op, s.prec = AndAnd, precAndAnd
+ s.tok = _Operator
+ break
+ }
+ s.op, s.prec = And, precMul
+ if s.ch == '^' {
+ s.nextch()
+ s.op = AndNot
+ }
+ goto assignop
+
+ case '|':
+ s.nextch()
+ if s.ch == '|' {
+ s.nextch()
+ s.op, s.prec = OrOr, precOrOr
+ s.tok = _Operator
+ break
+ }
+ s.op, s.prec = Or, precAdd
+ goto assignop
+
+ case '^':
+ s.nextch()
+ s.op, s.prec = Xor, precAdd
+ goto assignop
+
+ case '<':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.op, s.prec = Leq, precCmp
+ s.tok = _Operator
+ break
+ }
+ if s.ch == '<' {
+ s.nextch()
+ s.op, s.prec = Shl, precMul
+ goto assignop
+ }
+ if s.ch == '-' {
+ s.nextch()
+ s.tok = _Arrow
+ break
+ }
+ s.op, s.prec = Lss, precCmp
+ s.tok = _Operator
+
+ case '>':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.op, s.prec = Geq, precCmp
+ s.tok = _Operator
+ break
+ }
+ if s.ch == '>' {
+ s.nextch()
+ s.op, s.prec = Shr, precMul
+ goto assignop
+ }
+ s.op, s.prec = Gtr, precCmp
+ s.tok = _Operator
+
+ case '=':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.op, s.prec = Eql, precCmp
+ s.tok = _Operator
+ break
+ }
+ s.tok = _Assign
+
+ case '!':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.op, s.prec = Neq, precCmp
+ s.tok = _Operator
+ break
+ }
+ s.op, s.prec = Not, 0
+ s.tok = _Operator
+
+ default:
+ s.errorf("invalid character %#U", s.ch)
+ s.nextch()
+ goto redo
+ }
+
+ return
+
+assignop:
+ if s.ch == '=' {
+ s.nextch()
+ s.tok = _AssignOp
+ return
+ }
+ s.tok = _Operator
+}
+
+func (s *scanner) ident() {
+ // accelerate common case (7bit ASCII)
+ for isLetter(s.ch) || isDecimal(s.ch) {
+ s.nextch()
+ }
+
+ // general case
+ if s.ch >= utf8.RuneSelf {
+ for s.atIdentChar(false) {
+ s.nextch()
+ }
+ }
+
+ // possibly a keyword
+ lit := s.segment()
+ if len(lit) >= 2 {
+ if tok := keywordMap[hash(lit)]; tok != 0 && tokStrFast(tok) == string(lit) {
+ s.nlsemi = contains(1<<_Break|1<<_Continue|1<<_Fallthrough|1<<_Return, tok)
+ s.tok = tok
+ return
+ }
+ }
+
+ s.nlsemi = true
+ s.lit = string(lit)
+ s.tok = _Name
+}
+
+// tokStrFast is a faster version of token.String, which assumes that tok
+// is one of the valid tokens - and can thus skip bounds checks.
+func tokStrFast(tok token) string {
+ return _token_name[_token_index[tok-1]:_token_index[tok]]
+}
+
+func (s *scanner) atIdentChar(first bool) bool {
+ switch {
+ case unicode.IsLetter(s.ch) || s.ch == '_':
+ // ok
+ case unicode.IsDigit(s.ch):
+ if first {
+ s.errorf("identifier cannot begin with digit %#U", s.ch)
+ }
+ case s.ch >= utf8.RuneSelf:
+ s.errorf("invalid character %#U in identifier", s.ch)
+ default:
+ return false
+ }
+ return true
+}
+
+// hash is a perfect hash function for keywords.
+// It assumes that s has at least length 2.
+func hash(s []byte) uint {
+ return (uint(s[0])<<4 ^ uint(s[1]) + uint(len(s))) & uint(len(keywordMap)-1)
+}
+
+var keywordMap [1 << 6]token // size must be power of two
+
+func init() {
+ // populate keywordMap
+ for tok := _Break; tok <= _Var; tok++ {
+ h := hash([]byte(tok.String()))
+ if keywordMap[h] != 0 {
+ panic("imperfect hash")
+ }
+ keywordMap[h] = tok
+ }
+}
+
+func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter
+func isLetter(ch rune) bool { return 'a' <= lower(ch) && lower(ch) <= 'z' || ch == '_' }
+func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
+func isHex(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f' }
+
+// digits accepts the sequence { digit | '_' }.
+// If base <= 10, digits accepts any decimal digit but records
+// the index (relative to the literal start) of a digit >= base
+// in *invalid, if *invalid < 0.
+// digits returns a bitset describing whether the sequence contained
+// digits (bit 0 is set), or separators '_' (bit 1 is set).
+func (s *scanner) digits(base int, invalid *int) (digsep int) {
+ if base <= 10 {
+ max := rune('0' + base)
+ for isDecimal(s.ch) || s.ch == '_' {
+ ds := 1
+ if s.ch == '_' {
+ ds = 2
+ } else if s.ch >= max && *invalid < 0 {
+ _, col := s.pos()
+ *invalid = int(col - s.col) // record invalid rune index
+ }
+ digsep |= ds
+ s.nextch()
+ }
+ } else {
+ for isHex(s.ch) || s.ch == '_' {
+ ds := 1
+ if s.ch == '_' {
+ ds = 2
+ }
+ digsep |= ds
+ s.nextch()
+ }
+ }
+ return
+}
+
+func (s *scanner) number(seenPoint bool) {
+ ok := true
+ kind := IntLit
+ base := 10 // number base
+ prefix := rune(0) // one of 0 (decimal), '0' (0-octal), 'x', 'o', or 'b'
+ digsep := 0 // bit 0: digit present, bit 1: '_' present
+ invalid := -1 // index of invalid digit in literal, or < 0
+
+ // integer part
+ if !seenPoint {
+ if s.ch == '0' {
+ s.nextch()
+ switch lower(s.ch) {
+ case 'x':
+ s.nextch()
+ base, prefix = 16, 'x'
+ case 'o':
+ s.nextch()
+ base, prefix = 8, 'o'
+ case 'b':
+ s.nextch()
+ base, prefix = 2, 'b'
+ default:
+ base, prefix = 8, '0'
+ digsep = 1 // leading 0
+ }
+ }
+ digsep |= s.digits(base, &invalid)
+ if s.ch == '.' {
+ if prefix == 'o' || prefix == 'b' {
+ s.errorf("invalid radix point in %s literal", baseName(base))
+ ok = false
+ }
+ s.nextch()
+ seenPoint = true
+ }
+ }
+
+ // fractional part
+ if seenPoint {
+ kind = FloatLit
+ digsep |= s.digits(base, &invalid)
+ }
+
+ if digsep&1 == 0 && ok {
+ s.errorf("%s literal has no digits", baseName(base))
+ ok = false
+ }
+
+ // exponent
+ if e := lower(s.ch); e == 'e' || e == 'p' {
+ if ok {
+ switch {
+ case e == 'e' && prefix != 0 && prefix != '0':
+ s.errorf("%q exponent requires decimal mantissa", s.ch)
+ ok = false
+ case e == 'p' && prefix != 'x':
+ s.errorf("%q exponent requires hexadecimal mantissa", s.ch)
+ ok = false
+ }
+ }
+ s.nextch()
+ kind = FloatLit
+ if s.ch == '+' || s.ch == '-' {
+ s.nextch()
+ }
+ digsep = s.digits(10, nil) | digsep&2 // don't lose sep bit
+ if digsep&1 == 0 && ok {
+ s.errorf("exponent has no digits")
+ ok = false
+ }
+ } else if prefix == 'x' && kind == FloatLit && ok {
+ s.errorf("hexadecimal mantissa requires a 'p' exponent")
+ ok = false
+ }
+
+ // suffix 'i'
+ if s.ch == 'i' {
+ kind = ImagLit
+ s.nextch()
+ }
+
+ s.setLit(kind, ok) // do this now so we can use s.lit below
+
+ if kind == IntLit && invalid >= 0 && ok {
+ s.errorAtf(invalid, "invalid digit %q in %s literal", s.lit[invalid], baseName(base))
+ ok = false
+ }
+
+ if digsep&2 != 0 && ok {
+ if i := invalidSep(s.lit); i >= 0 {
+ s.errorAtf(i, "'_' must separate successive digits")
+ ok = false
+ }
+ }
+
+ s.bad = !ok // correct s.bad
+}
+
+func baseName(base int) string {
+ switch base {
+ case 2:
+ return "binary"
+ case 8:
+ return "octal"
+ case 10:
+ return "decimal"
+ case 16:
+ return "hexadecimal"
+ }
+ panic("invalid base")
+}
+
+// invalidSep returns the index of the first invalid separator in x, or -1.
+func invalidSep(x string) int {
+ x1 := ' ' // prefix char, we only care if it's 'x'
+ d := '.' // digit, one of '_', '0' (a digit), or '.' (anything else)
+ i := 0
+
+ // a prefix counts as a digit
+ if len(x) >= 2 && x[0] == '0' {
+ x1 = lower(rune(x[1]))
+ if x1 == 'x' || x1 == 'o' || x1 == 'b' {
+ d = '0'
+ i = 2
+ }
+ }
+
+ // mantissa and exponent
+ for ; i < len(x); i++ {
+ p := d // previous digit
+ d = rune(x[i])
+ switch {
+ case d == '_':
+ if p != '0' {
+ return i
+ }
+ case isDecimal(d) || x1 == 'x' && isHex(d):
+ d = '0'
+ default:
+ if p == '_' {
+ return i - 1
+ }
+ d = '.'
+ }
+ }
+ if d == '_' {
+ return len(x) - 1
+ }
+
+ return -1
+}
+
+func (s *scanner) rune() {
+ ok := true
+ s.nextch()
+
+ n := 0
+ for ; ; n++ {
+ if s.ch == '\'' {
+ if ok {
+ if n == 0 {
+ s.errorf("empty rune literal or unescaped '")
+ ok = false
+ } else if n != 1 {
+ s.errorAtf(0, "more than one character in rune literal")
+ ok = false
+ }
+ }
+ s.nextch()
+ break
+ }
+ if s.ch == '\\' {
+ s.nextch()
+ if !s.escape('\'') {
+ ok = false
+ }
+ continue
+ }
+ if s.ch == '\n' {
+ if ok {
+ s.errorf("newline in rune literal")
+ ok = false
+ }
+ break
+ }
+ if s.ch < 0 {
+ if ok {
+ s.errorAtf(0, "rune literal not terminated")
+ ok = false
+ }
+ break
+ }
+ s.nextch()
+ }
+
+ s.setLit(RuneLit, ok)
+}
+
+func (s *scanner) stdString() {
+ ok := true
+ s.nextch()
+
+ for {
+ if s.ch == '"' {
+ s.nextch()
+ break
+ }
+ if s.ch == '\\' {
+ s.nextch()
+ if !s.escape('"') {
+ ok = false
+ }
+ continue
+ }
+ if s.ch == '\n' {
+ s.errorf("newline in string")
+ ok = false
+ break
+ }
+ if s.ch < 0 {
+ s.errorAtf(0, "string not terminated")
+ ok = false
+ break
+ }
+ s.nextch()
+ }
+
+ s.setLit(StringLit, ok)
+}
+
+func (s *scanner) rawString() {
+ ok := true
+ s.nextch()
+
+ for {
+ if s.ch == '`' {
+ s.nextch()
+ break
+ }
+ if s.ch < 0 {
+ s.errorAtf(0, "string not terminated")
+ ok = false
+ break
+ }
+ s.nextch()
+ }
+ // We leave CRs in the string since they are part of the
+ // literal (even though they are not part of the literal
+ // value).
+
+ s.setLit(StringLit, ok)
+}
+
+func (s *scanner) comment(text string) {
+ s.errorAtf(0, "%s", text)
+}
+
+func (s *scanner) skipLine() {
+ // don't consume '\n' - needed for nlsemi logic
+ for s.ch >= 0 && s.ch != '\n' {
+ s.nextch()
+ }
+}
+
+func (s *scanner) lineComment() {
+ // opening has already been consumed
+
+ if s.mode&comments != 0 {
+ s.skipLine()
+ s.comment(string(s.segment()))
+ return
+ }
+
+ // are we saving directives? or is this definitely not a directive?
+ if s.mode&directives == 0 || (s.ch != 'g' && s.ch != 'l') {
+ s.stop()
+ s.skipLine()
+ return
+ }
+
+ // recognize go: or line directives
+ prefix := "go:"
+ if s.ch == 'l' {
+ prefix = "line "
+ }
+ for _, m := range prefix {
+ if s.ch != m {
+ s.stop()
+ s.skipLine()
+ return
+ }
+ s.nextch()
+ }
+
+ // directive text
+ s.skipLine()
+ s.comment(string(s.segment()))
+}
+
+func (s *scanner) skipComment() bool {
+ for s.ch >= 0 {
+ for s.ch == '*' {
+ s.nextch()
+ if s.ch == '/' {
+ s.nextch()
+ return true
+ }
+ }
+ s.nextch()
+ }
+ s.errorAtf(0, "comment not terminated")
+ return false
+}
+
+func (s *scanner) fullComment() {
+ /* opening has already been consumed */
+
+ if s.mode&comments != 0 {
+ if s.skipComment() {
+ s.comment(string(s.segment()))
+ }
+ return
+ }
+
+ if s.mode&directives == 0 || s.ch != 'l' {
+ s.stop()
+ s.skipComment()
+ return
+ }
+
+ // recognize line directive
+ const prefix = "line "
+ for _, m := range prefix {
+ if s.ch != m {
+ s.stop()
+ s.skipComment()
+ return
+ }
+ s.nextch()
+ }
+
+ // directive text
+ if s.skipComment() {
+ s.comment(string(s.segment()))
+ }
+}
+
+func (s *scanner) escape(quote rune) bool {
+ var n int
+ var base, max uint32
+
+ switch s.ch {
+ case quote, 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\':
+ s.nextch()
+ return true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ n, base, max = 3, 8, 255
+ case 'x':
+ s.nextch()
+ n, base, max = 2, 16, 255
+ case 'u':
+ s.nextch()
+ n, base, max = 4, 16, unicode.MaxRune
+ case 'U':
+ s.nextch()
+ n, base, max = 8, 16, unicode.MaxRune
+ default:
+ if s.ch < 0 {
+ return true // complain in caller about EOF
+ }
+ s.errorf("unknown escape")
+ return false
+ }
+
+ var x uint32
+ for i := n; i > 0; i-- {
+ if s.ch < 0 {
+ return true // complain in caller about EOF
+ }
+ d := base
+ if isDecimal(s.ch) {
+ d = uint32(s.ch) - '0'
+ } else if 'a' <= lower(s.ch) && lower(s.ch) <= 'f' {
+ d = uint32(lower(s.ch)) - 'a' + 10
+ }
+ if d >= base {
+ s.errorf("invalid character %q in %s escape", s.ch, baseName(int(base)))
+ return false
+ }
+ // d < base
+ x = x*base + d
+ s.nextch()
+ }
+
+ if x > max && base == 8 {
+ s.errorf("octal escape value %d > 255", x)
+ return false
+ }
+
+ if x > max || 0xD800 <= x && x < 0xE000 /* surrogate range */ {
+ s.errorf("escape is invalid Unicode code point %#U", x)
+ return false
+ }
+
+ return true
+}
diff --git a/src/cmd/compile/internal/syntax/scanner_test.go b/src/cmd/compile/internal/syntax/scanner_test.go
new file mode 100644
index 0000000..0433862
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/scanner_test.go
@@ -0,0 +1,764 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+)
+
+// errh is a default error handler for basic tests.
+func errh(line, col uint, msg string) {
+ panic(fmt.Sprintf("%d:%d: %s", line, col, msg))
+}
+
+// Don't bother with other tests if TestSmoke doesn't pass.
+func TestSmoke(t *testing.T) {
+ const src = "if (+foo\t+=..123/***/0.9_0e-0i'a'`raw`\"string\"..f;//$"
+ tokens := []token{_If, _Lparen, _Operator, _Name, _AssignOp, _Dot, _Literal, _Literal, _Literal, _Literal, _Literal, _Dot, _Dot, _Name, _Semi, _EOF}
+
+ var got scanner
+ got.init(strings.NewReader(src), errh, 0)
+ for _, want := range tokens {
+ got.next()
+ if got.tok != want {
+ t.Errorf("%d:%d: got %s; want %s", got.line, got.col, got.tok, want)
+ continue
+ }
+ }
+}
+
+// Once TestSmoke passes, run TestTokens next.
+func TestTokens(t *testing.T) {
+ var got scanner
+ for _, want := range sampleTokens {
+ got.init(strings.NewReader(want.src), func(line, col uint, msg string) {
+ t.Errorf("%s:%d:%d: %s", want.src, line, col, msg)
+ }, 0)
+ got.next()
+ if got.tok != want.tok {
+ t.Errorf("%s: got %s; want %s", want.src, got.tok, want.tok)
+ continue
+ }
+ if (got.tok == _Name || got.tok == _Literal) && got.lit != want.src {
+ t.Errorf("%s: got %q; want %q", want.src, got.lit, want.src)
+ }
+ }
+}
+
+func TestScanner(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
+ filename := *src_ // can be changed via -src flag
+ src, err := os.Open(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer src.Close()
+
+ var s scanner
+ s.init(src, errh, 0)
+ for {
+ s.next()
+ if s.tok == _EOF {
+ break
+ }
+ if !testing.Verbose() {
+ continue
+ }
+ switch s.tok {
+ case _Name, _Literal:
+ fmt.Printf("%s:%d:%d: %s => %s\n", filename, s.line, s.col, s.tok, s.lit)
+ case _Operator:
+ fmt.Printf("%s:%d:%d: %s => %s (prec = %d)\n", filename, s.line, s.col, s.tok, s.op, s.prec)
+ default:
+ fmt.Printf("%s:%d:%d: %s\n", filename, s.line, s.col, s.tok)
+ }
+ }
+}
+
+func TestEmbeddedTokens(t *testing.T) {
+ // make source
+ var buf bytes.Buffer
+ for i, s := range sampleTokens {
+ buf.WriteString("\t\t\t\t"[:i&3]) // leading indentation
+ buf.WriteString(s.src) // token
+ buf.WriteString(" "[:i&7]) // trailing spaces
+ buf.WriteString(fmt.Sprintf("/*line foo:%d */ // bar\n", i)) // comments + newline (don't crash w/o directive handler)
+ }
+
+ // scan source
+ var got scanner
+ var src string
+ got.init(&buf, func(line, col uint, msg string) {
+ t.Fatalf("%s:%d:%d: %s", src, line, col, msg)
+ }, 0)
+ got.next()
+ for i, want := range sampleTokens {
+ src = want.src
+ nlsemi := false
+
+ if got.line-linebase != uint(i) {
+ t.Errorf("%s: got line %d; want %d", src, got.line-linebase, i)
+ }
+
+ if got.tok != want.tok {
+ t.Errorf("%s: got tok %s; want %s", src, got.tok, want.tok)
+ continue
+ }
+
+ switch want.tok {
+ case _Semi:
+ if got.lit != "semicolon" {
+ t.Errorf("%s: got %s; want semicolon", src, got.lit)
+ }
+
+ case _Name, _Literal:
+ if got.lit != want.src {
+ t.Errorf("%s: got lit %q; want %q", src, got.lit, want.src)
+ continue
+ }
+ nlsemi = true
+
+ case _Operator, _AssignOp, _IncOp:
+ if got.op != want.op {
+ t.Errorf("%s: got op %s; want %s", src, got.op, want.op)
+ continue
+ }
+ if got.prec != want.prec {
+ t.Errorf("%s: got prec %d; want %d", src, got.prec, want.prec)
+ continue
+ }
+ nlsemi = want.tok == _IncOp
+
+ case _Rparen, _Rbrack, _Rbrace, _Break, _Continue, _Fallthrough, _Return:
+ nlsemi = true
+ }
+
+ if nlsemi {
+ got.next()
+ if got.tok != _Semi {
+ t.Errorf("%s: got tok %s; want ;", src, got.tok)
+ continue
+ }
+ if got.lit != "newline" {
+ t.Errorf("%s: got %s; want newline", src, got.lit)
+ }
+ }
+
+ got.next()
+ }
+
+ if got.tok != _EOF {
+ t.Errorf("got %q; want _EOF", got.tok)
+ }
+}
+
+var sampleTokens = [...]struct {
+ tok token
+ src string
+ op Operator
+ prec int
+}{
+ // name samples
+ {_Name, "x", 0, 0},
+ {_Name, "X123", 0, 0},
+ {_Name, "foo", 0, 0},
+ {_Name, "Foo123", 0, 0},
+ {_Name, "foo_bar", 0, 0},
+ {_Name, "_", 0, 0},
+ {_Name, "_foobar", 0, 0},
+ {_Name, "a۰۱۸", 0, 0},
+ {_Name, "foo६४", 0, 0},
+ {_Name, "bar9876", 0, 0},
+ {_Name, "ŝ", 0, 0},
+ {_Name, "ŝfoo", 0, 0},
+
+ // literal samples
+ {_Literal, "0", 0, 0},
+ {_Literal, "1", 0, 0},
+ {_Literal, "12345", 0, 0},
+ {_Literal, "123456789012345678890123456789012345678890", 0, 0},
+ {_Literal, "01234567", 0, 0},
+ {_Literal, "0_1_234_567", 0, 0},
+ {_Literal, "0X0", 0, 0},
+ {_Literal, "0xcafebabe", 0, 0},
+ {_Literal, "0x_cafe_babe", 0, 0},
+ {_Literal, "0O0", 0, 0},
+ {_Literal, "0o000", 0, 0},
+ {_Literal, "0o_000", 0, 0},
+ {_Literal, "0B1", 0, 0},
+ {_Literal, "0b01100110", 0, 0},
+ {_Literal, "0b_0110_0110", 0, 0},
+ {_Literal, "0.", 0, 0},
+ {_Literal, "0.e0", 0, 0},
+ {_Literal, "0.e-1", 0, 0},
+ {_Literal, "0.e+123", 0, 0},
+ {_Literal, ".0", 0, 0},
+ {_Literal, ".0E00", 0, 0},
+ {_Literal, ".0E-0123", 0, 0},
+ {_Literal, ".0E+12345678901234567890", 0, 0},
+ {_Literal, ".45e1", 0, 0},
+ {_Literal, "3.14159265", 0, 0},
+ {_Literal, "1e0", 0, 0},
+ {_Literal, "1e+100", 0, 0},
+ {_Literal, "1e-100", 0, 0},
+ {_Literal, "2.71828e-1000", 0, 0},
+ {_Literal, "0i", 0, 0},
+ {_Literal, "1i", 0, 0},
+ {_Literal, "012345678901234567889i", 0, 0},
+ {_Literal, "123456789012345678890i", 0, 0},
+ {_Literal, "0.i", 0, 0},
+ {_Literal, ".0i", 0, 0},
+ {_Literal, "3.14159265i", 0, 0},
+ {_Literal, "1e0i", 0, 0},
+ {_Literal, "1e+100i", 0, 0},
+ {_Literal, "1e-100i", 0, 0},
+ {_Literal, "2.71828e-1000i", 0, 0},
+ {_Literal, "'a'", 0, 0},
+ {_Literal, "'\\000'", 0, 0},
+ {_Literal, "'\\xFF'", 0, 0},
+ {_Literal, "'\\uff16'", 0, 0},
+ {_Literal, "'\\U0000ff16'", 0, 0},
+ {_Literal, "`foobar`", 0, 0},
+ {_Literal, "`foo\tbar`", 0, 0},
+ {_Literal, "`\r`", 0, 0},
+
+ // operators
+ {_Operator, "||", OrOr, precOrOr},
+
+ {_Operator, "&&", AndAnd, precAndAnd},
+
+ {_Operator, "==", Eql, precCmp},
+ {_Operator, "!=", Neq, precCmp},
+ {_Operator, "<", Lss, precCmp},
+ {_Operator, "<=", Leq, precCmp},
+ {_Operator, ">", Gtr, precCmp},
+ {_Operator, ">=", Geq, precCmp},
+
+ {_Operator, "+", Add, precAdd},
+ {_Operator, "-", Sub, precAdd},
+ {_Operator, "|", Or, precAdd},
+ {_Operator, "^", Xor, precAdd},
+
+ {_Star, "*", Mul, precMul},
+ {_Operator, "/", Div, precMul},
+ {_Operator, "%", Rem, precMul},
+ {_Operator, "&", And, precMul},
+ {_Operator, "&^", AndNot, precMul},
+ {_Operator, "<<", Shl, precMul},
+ {_Operator, ">>", Shr, precMul},
+
+ // assignment operations
+ {_AssignOp, "+=", Add, precAdd},
+ {_AssignOp, "-=", Sub, precAdd},
+ {_AssignOp, "|=", Or, precAdd},
+ {_AssignOp, "^=", Xor, precAdd},
+
+ {_AssignOp, "*=", Mul, precMul},
+ {_AssignOp, "/=", Div, precMul},
+ {_AssignOp, "%=", Rem, precMul},
+ {_AssignOp, "&=", And, precMul},
+ {_AssignOp, "&^=", AndNot, precMul},
+ {_AssignOp, "<<=", Shl, precMul},
+ {_AssignOp, ">>=", Shr, precMul},
+
+ // other operations
+ {_IncOp, "++", Add, precAdd},
+ {_IncOp, "--", Sub, precAdd},
+ {_Assign, "=", 0, 0},
+ {_Define, ":=", 0, 0},
+ {_Arrow, "<-", 0, 0},
+
+ // delimiters
+ {_Lparen, "(", 0, 0},
+ {_Lbrack, "[", 0, 0},
+ {_Lbrace, "{", 0, 0},
+ {_Rparen, ")", 0, 0},
+ {_Rbrack, "]", 0, 0},
+ {_Rbrace, "}", 0, 0},
+ {_Comma, ",", 0, 0},
+ {_Semi, ";", 0, 0},
+ {_Colon, ":", 0, 0},
+ {_Dot, ".", 0, 0},
+ {_DotDotDot, "...", 0, 0},
+
+ // keywords
+ {_Break, "break", 0, 0},
+ {_Case, "case", 0, 0},
+ {_Chan, "chan", 0, 0},
+ {_Const, "const", 0, 0},
+ {_Continue, "continue", 0, 0},
+ {_Default, "default", 0, 0},
+ {_Defer, "defer", 0, 0},
+ {_Else, "else", 0, 0},
+ {_Fallthrough, "fallthrough", 0, 0},
+ {_For, "for", 0, 0},
+ {_Func, "func", 0, 0},
+ {_Go, "go", 0, 0},
+ {_Goto, "goto", 0, 0},
+ {_If, "if", 0, 0},
+ {_Import, "import", 0, 0},
+ {_Interface, "interface", 0, 0},
+ {_Map, "map", 0, 0},
+ {_Package, "package", 0, 0},
+ {_Range, "range", 0, 0},
+ {_Return, "return", 0, 0},
+ {_Select, "select", 0, 0},
+ {_Struct, "struct", 0, 0},
+ {_Switch, "switch", 0, 0},
+ {_Type, "type", 0, 0},
+ {_Var, "var", 0, 0},
+}
+
+func TestComments(t *testing.T) {
+ type comment struct {
+ line, col uint // 0-based
+ text string
+ }
+
+ for _, test := range []struct {
+ src string
+ want comment
+ }{
+ // no comments
+ {"no comment here", comment{0, 0, ""}},
+ {" /", comment{0, 0, ""}},
+ {"\n /*/", comment{0, 0, ""}},
+
+ //-style comments
+ {"// line comment\n", comment{0, 0, "// line comment"}},
+ {"package p // line comment\n", comment{0, 10, "// line comment"}},
+ {"//\n//\n\t// want this one\r\n", comment{2, 1, "// want this one\r"}},
+ {"\n\n//\n", comment{2, 0, "//"}},
+ {"//", comment{0, 0, "//"}},
+
+ /*-style comments */
+ {"123/* regular comment */", comment{0, 3, "/* regular comment */"}},
+ {"package p /* regular comment", comment{0, 0, ""}},
+ {"\n\n\n/*\n*//* want this one */", comment{4, 2, "/* want this one */"}},
+ {"\n\n/**/", comment{2, 0, "/**/"}},
+ {"/*", comment{0, 0, ""}},
+ } {
+ var s scanner
+ var got comment
+ s.init(strings.NewReader(test.src), func(line, col uint, msg string) {
+ if msg[0] != '/' {
+ // error
+ if msg != "comment not terminated" {
+ t.Errorf("%q: %s", test.src, msg)
+ }
+ return
+ }
+ got = comment{line - linebase, col - colbase, msg} // keep last one
+ }, comments)
+
+ for {
+ s.next()
+ if s.tok == _EOF {
+ break
+ }
+ }
+
+ want := test.want
+ if got.line != want.line || got.col != want.col {
+ t.Errorf("%q: got position %d:%d; want %d:%d", test.src, got.line, got.col, want.line, want.col)
+ }
+ if got.text != want.text {
+ t.Errorf("%q: got %q; want %q", test.src, got.text, want.text)
+ }
+ }
+}
+
+func TestNumbers(t *testing.T) {
+ for _, test := range []struct {
+ kind LitKind
+ src, tokens, err string
+ }{
+ // binaries
+ {IntLit, "0b0", "0b0", ""},
+ {IntLit, "0b1010", "0b1010", ""},
+ {IntLit, "0B1110", "0B1110", ""},
+
+ {IntLit, "0b", "0b", "binary literal has no digits"},
+ {IntLit, "0b0190", "0b0190", "invalid digit '9' in binary literal"},
+ {IntLit, "0b01a0", "0b01 a0", ""}, // only accept 0-9
+
+ {FloatLit, "0b.", "0b.", "invalid radix point in binary literal"},
+ {FloatLit, "0b.1", "0b.1", "invalid radix point in binary literal"},
+ {FloatLit, "0b1.0", "0b1.0", "invalid radix point in binary literal"},
+ {FloatLit, "0b1e10", "0b1e10", "'e' exponent requires decimal mantissa"},
+ {FloatLit, "0b1P-1", "0b1P-1", "'P' exponent requires hexadecimal mantissa"},
+
+ {ImagLit, "0b10i", "0b10i", ""},
+ {ImagLit, "0b10.0i", "0b10.0i", "invalid radix point in binary literal"},
+
+ // octals
+ {IntLit, "0o0", "0o0", ""},
+ {IntLit, "0o1234", "0o1234", ""},
+ {IntLit, "0O1234", "0O1234", ""},
+
+ {IntLit, "0o", "0o", "octal literal has no digits"},
+ {IntLit, "0o8123", "0o8123", "invalid digit '8' in octal literal"},
+ {IntLit, "0o1293", "0o1293", "invalid digit '9' in octal literal"},
+ {IntLit, "0o12a3", "0o12 a3", ""}, // only accept 0-9
+
+ {FloatLit, "0o.", "0o.", "invalid radix point in octal literal"},
+ {FloatLit, "0o.2", "0o.2", "invalid radix point in octal literal"},
+ {FloatLit, "0o1.2", "0o1.2", "invalid radix point in octal literal"},
+ {FloatLit, "0o1E+2", "0o1E+2", "'E' exponent requires decimal mantissa"},
+ {FloatLit, "0o1p10", "0o1p10", "'p' exponent requires hexadecimal mantissa"},
+
+ {ImagLit, "0o10i", "0o10i", ""},
+ {ImagLit, "0o10e0i", "0o10e0i", "'e' exponent requires decimal mantissa"},
+
+ // 0-octals
+ {IntLit, "0", "0", ""},
+ {IntLit, "0123", "0123", ""},
+
+ {IntLit, "08123", "08123", "invalid digit '8' in octal literal"},
+ {IntLit, "01293", "01293", "invalid digit '9' in octal literal"},
+ {IntLit, "0F.", "0 F .", ""}, // only accept 0-9
+ {IntLit, "0123F.", "0123 F .", ""},
+ {IntLit, "0123456x", "0123456 x", ""},
+
+ // decimals
+ {IntLit, "1", "1", ""},
+ {IntLit, "1234", "1234", ""},
+
+ {IntLit, "1f", "1 f", ""}, // only accept 0-9
+
+ {ImagLit, "0i", "0i", ""},
+ {ImagLit, "0678i", "0678i", ""},
+
+ // decimal floats
+ {FloatLit, "0.", "0.", ""},
+ {FloatLit, "123.", "123.", ""},
+ {FloatLit, "0123.", "0123.", ""},
+
+ {FloatLit, ".0", ".0", ""},
+ {FloatLit, ".123", ".123", ""},
+ {FloatLit, ".0123", ".0123", ""},
+
+ {FloatLit, "0.0", "0.0", ""},
+ {FloatLit, "123.123", "123.123", ""},
+ {FloatLit, "0123.0123", "0123.0123", ""},
+
+ {FloatLit, "0e0", "0e0", ""},
+ {FloatLit, "123e+0", "123e+0", ""},
+ {FloatLit, "0123E-1", "0123E-1", ""},
+
+ {FloatLit, "0.e+1", "0.e+1", ""},
+ {FloatLit, "123.E-10", "123.E-10", ""},
+ {FloatLit, "0123.e123", "0123.e123", ""},
+
+ {FloatLit, ".0e-1", ".0e-1", ""},
+ {FloatLit, ".123E+10", ".123E+10", ""},
+ {FloatLit, ".0123E123", ".0123E123", ""},
+
+ {FloatLit, "0.0e1", "0.0e1", ""},
+ {FloatLit, "123.123E-10", "123.123E-10", ""},
+ {FloatLit, "0123.0123e+456", "0123.0123e+456", ""},
+
+ {FloatLit, "0e", "0e", "exponent has no digits"},
+ {FloatLit, "0E+", "0E+", "exponent has no digits"},
+ {FloatLit, "1e+f", "1e+ f", "exponent has no digits"},
+ {FloatLit, "0p0", "0p0", "'p' exponent requires hexadecimal mantissa"},
+ {FloatLit, "1.0P-1", "1.0P-1", "'P' exponent requires hexadecimal mantissa"},
+
+ {ImagLit, "0.i", "0.i", ""},
+ {ImagLit, ".123i", ".123i", ""},
+ {ImagLit, "123.123i", "123.123i", ""},
+ {ImagLit, "123e+0i", "123e+0i", ""},
+ {ImagLit, "123.E-10i", "123.E-10i", ""},
+ {ImagLit, ".123E+10i", ".123E+10i", ""},
+
+ // hexadecimals
+ {IntLit, "0x0", "0x0", ""},
+ {IntLit, "0x1234", "0x1234", ""},
+ {IntLit, "0xcafef00d", "0xcafef00d", ""},
+ {IntLit, "0XCAFEF00D", "0XCAFEF00D", ""},
+
+ {IntLit, "0x", "0x", "hexadecimal literal has no digits"},
+ {IntLit, "0x1g", "0x1 g", ""},
+
+ {ImagLit, "0xf00i", "0xf00i", ""},
+
+ // hexadecimal floats
+ {FloatLit, "0x0p0", "0x0p0", ""},
+ {FloatLit, "0x12efp-123", "0x12efp-123", ""},
+ {FloatLit, "0xABCD.p+0", "0xABCD.p+0", ""},
+ {FloatLit, "0x.0189P-0", "0x.0189P-0", ""},
+ {FloatLit, "0x1.ffffp+1023", "0x1.ffffp+1023", ""},
+
+ {FloatLit, "0x.", "0x.", "hexadecimal literal has no digits"},
+ {FloatLit, "0x0.", "0x0.", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x.0", "0x.0", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x1.1", "0x1.1", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x1.1e0", "0x1.1e0", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x1.2gp1a", "0x1.2 gp1a", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x0p", "0x0p", "exponent has no digits"},
+ {FloatLit, "0xeP-", "0xeP-", "exponent has no digits"},
+ {FloatLit, "0x1234PAB", "0x1234P AB", "exponent has no digits"},
+ {FloatLit, "0x1.2p1a", "0x1.2p1 a", ""},
+
+ {ImagLit, "0xf00.bap+12i", "0xf00.bap+12i", ""},
+
+ // separators
+ {IntLit, "0b_1000_0001", "0b_1000_0001", ""},
+ {IntLit, "0o_600", "0o_600", ""},
+ {IntLit, "0_466", "0_466", ""},
+ {IntLit, "1_000", "1_000", ""},
+ {FloatLit, "1_000.000_1", "1_000.000_1", ""},
+ {ImagLit, "10e+1_2_3i", "10e+1_2_3i", ""},
+ {IntLit, "0x_f00d", "0x_f00d", ""},
+ {FloatLit, "0x_f00d.0p1_2", "0x_f00d.0p1_2", ""},
+
+ {IntLit, "0b__1000", "0b__1000", "'_' must separate successive digits"},
+ {IntLit, "0o60___0", "0o60___0", "'_' must separate successive digits"},
+ {IntLit, "0466_", "0466_", "'_' must separate successive digits"},
+ {FloatLit, "1_.", "1_.", "'_' must separate successive digits"},
+ {FloatLit, "0._1", "0._1", "'_' must separate successive digits"},
+ {FloatLit, "2.7_e0", "2.7_e0", "'_' must separate successive digits"},
+ {ImagLit, "10e+12_i", "10e+12_i", "'_' must separate successive digits"},
+ {IntLit, "0x___0", "0x___0", "'_' must separate successive digits"},
+ {FloatLit, "0x1.0_p0", "0x1.0_p0", "'_' must separate successive digits"},
+ } {
+ var s scanner
+ var err string
+ s.init(strings.NewReader(test.src), func(_, _ uint, msg string) {
+ if err == "" {
+ err = msg
+ }
+ }, 0)
+
+ for i, want := range strings.Split(test.tokens, " ") {
+ err = ""
+ s.next()
+
+ if err != "" && !s.bad {
+ t.Errorf("%q: got error but bad not set", test.src)
+ }
+
+ // compute lit where where s.lit is not defined
+ var lit string
+ switch s.tok {
+ case _Name, _Literal:
+ lit = s.lit
+ case _Dot:
+ lit = "."
+ }
+
+ if i == 0 {
+ if s.tok != _Literal || s.kind != test.kind {
+ t.Errorf("%q: got token %s (kind = %d); want literal (kind = %d)", test.src, s.tok, s.kind, test.kind)
+ }
+ if err != test.err {
+ t.Errorf("%q: got error %q; want %q", test.src, err, test.err)
+ }
+ }
+
+ if lit != want {
+ t.Errorf("%q: got literal %q (%s); want %s", test.src, lit, s.tok, want)
+ }
+ }
+
+ // make sure we read all
+ s.next()
+ if s.tok == _Semi {
+ s.next()
+ }
+ if s.tok != _EOF {
+ t.Errorf("%q: got %s; want EOF", test.src, s.tok)
+ }
+ }
+}
+
+func TestScanErrors(t *testing.T) {
+ for _, test := range []struct {
+ src, err string
+ line, col uint // 0-based
+ }{
+ // Note: Positions for lexical errors are the earliest position
+ // where the error is apparent, not the beginning of the respective
+ // token.
+
+ // rune-level errors
+ {"fo\x00o", "invalid NUL character", 0, 2},
+ {"foo\n\ufeff bar", "invalid BOM in the middle of the file", 1, 0},
+ {"foo\n\n\xff ", "invalid UTF-8 encoding", 2, 0},
+
+ // token-level errors
+ {"\u00BD" /* ½ */, "invalid character U+00BD '½' in identifier", 0, 0},
+ {"\U0001d736\U0001d737\U0001d738_½" /* 𝜶𝜷𝜸_½ */, "invalid character U+00BD '½' in identifier", 0, 13 /* byte offset */},
+ {"\U0001d7d8" /* 𝟘 */, "identifier cannot begin with digit U+1D7D8 '𝟘'", 0, 0},
+ {"foo\U0001d7d8_½" /* foo𝟘_½ */, "invalid character U+00BD '½' in identifier", 0, 8 /* byte offset */},
+
+ {"x + ~y", "invalid character U+007E '~'", 0, 4},
+ {"foo$bar = 0", "invalid character U+0024 '$'", 0, 3},
+ {"0123456789", "invalid digit '8' in octal literal", 0, 8},
+ {"0123456789. /* foobar", "comment not terminated", 0, 12}, // valid float constant
+ {"0123456789e0 /*\nfoobar", "comment not terminated", 0, 13}, // valid float constant
+ {"var a, b = 09, 07\n", "invalid digit '9' in octal literal", 0, 12},
+
+ {`''`, "empty rune literal or unescaped '", 0, 1},
+ {"'\n", "newline in rune literal", 0, 1},
+ {`'\`, "rune literal not terminated", 0, 0},
+ {`'\'`, "rune literal not terminated", 0, 0},
+ {`'\x`, "rune literal not terminated", 0, 0},
+ {`'\x'`, "invalid character '\\'' in hexadecimal escape", 0, 3},
+ {`'\y'`, "unknown escape", 0, 2},
+ {`'\x0'`, "invalid character '\\'' in hexadecimal escape", 0, 4},
+ {`'\00'`, "invalid character '\\'' in octal escape", 0, 4},
+ {`'\377' /*`, "comment not terminated", 0, 7}, // valid octal escape
+ {`'\378`, "invalid character '8' in octal escape", 0, 4},
+ {`'\400'`, "octal escape value 256 > 255", 0, 5},
+ {`'xx`, "rune literal not terminated", 0, 0},
+ {`'xx'`, "more than one character in rune literal", 0, 0},
+
+ {"\n \"foo\n", "newline in string", 1, 7},
+ {`"`, "string not terminated", 0, 0},
+ {`"foo`, "string not terminated", 0, 0},
+ {"`", "string not terminated", 0, 0},
+ {"`foo", "string not terminated", 0, 0},
+ {"/*/", "comment not terminated", 0, 0},
+ {"/*\n\nfoo", "comment not terminated", 0, 0},
+ {`"\`, "string not terminated", 0, 0},
+ {`"\"`, "string not terminated", 0, 0},
+ {`"\x`, "string not terminated", 0, 0},
+ {`"\x"`, "invalid character '\"' in hexadecimal escape", 0, 3},
+ {`"\y"`, "unknown escape", 0, 2},
+ {`"\x0"`, "invalid character '\"' in hexadecimal escape", 0, 4},
+ {`"\00"`, "invalid character '\"' in octal escape", 0, 4},
+ {`"\377" /*`, "comment not terminated", 0, 7}, // valid octal escape
+ {`"\378"`, "invalid character '8' in octal escape", 0, 4},
+ {`"\400"`, "octal escape value 256 > 255", 0, 5},
+
+ {`s := "foo\z"`, "unknown escape", 0, 10},
+ {`s := "foo\z00\nbar"`, "unknown escape", 0, 10},
+ {`"\x`, "string not terminated", 0, 0},
+ {`"\x"`, "invalid character '\"' in hexadecimal escape", 0, 3},
+ {`var s string = "\x"`, "invalid character '\"' in hexadecimal escape", 0, 18},
+ {`return "\Uffffffff"`, "escape is invalid Unicode code point U+FFFFFFFF", 0, 18},
+
+ {"0b.0", "invalid radix point in binary literal", 0, 2},
+ {"0x.p0\n", "hexadecimal literal has no digits", 0, 3},
+
+ // former problem cases
+ {"package p\n\n\xef", "invalid UTF-8 encoding", 2, 0},
+ } {
+ var s scanner
+ var line, col uint
+ var err string
+ s.init(strings.NewReader(test.src), func(l, c uint, msg string) {
+ if err == "" {
+ line, col = l-linebase, c-colbase
+ err = msg
+ }
+ }, 0)
+
+ for {
+ s.next()
+ if s.tok == _EOF {
+ break
+ }
+ }
+
+ if err != "" {
+ if err != test.err {
+ t.Errorf("%q: got err = %q; want %q", test.src, err, test.err)
+ }
+ if line != test.line {
+ t.Errorf("%q: got line = %d; want %d", test.src, line, test.line)
+ }
+ if col != test.col {
+ t.Errorf("%q: got col = %d; want %d", test.src, col, test.col)
+ }
+ } else {
+ t.Errorf("%q: got no error; want %q", test.src, test.err)
+ }
+ }
+}
+
+func TestDirectives(t *testing.T) {
+ for _, src := range []string{
+ "line",
+ "// line",
+ "//line",
+ "//line foo",
+ "//line foo%bar",
+
+ "go",
+ "// go:",
+ "//go:",
+ "//go :foo",
+ "//go:foo",
+ "//go:foo%bar",
+ } {
+ got := ""
+ var s scanner
+ s.init(strings.NewReader(src), func(_, col uint, msg string) {
+ if col != colbase {
+ t.Errorf("%s: got col = %d; want %d", src, col, colbase)
+ }
+ if msg == "" {
+ t.Errorf("%s: handler called with empty msg", src)
+ }
+ got = msg
+ }, directives)
+
+ s.next()
+ if strings.HasPrefix(src, "//line ") || strings.HasPrefix(src, "//go:") {
+ // handler should have been called
+ if got != src {
+ t.Errorf("got %s; want %s", got, src)
+ }
+ } else {
+ // handler should not have been called
+ if got != "" {
+ t.Errorf("got %s for %s", got, src)
+ }
+ }
+ }
+}
+
+func TestIssue21938(t *testing.T) {
+ s := "/*" + strings.Repeat(" ", 4089) + "*/ .5"
+
+ var got scanner
+ got.init(strings.NewReader(s), errh, 0)
+ got.next()
+
+ if got.tok != _Literal || got.lit != ".5" {
+ t.Errorf("got %s %q; want %s %q", got.tok, got.lit, _Literal, ".5")
+ }
+}
+
+func TestIssue33961(t *testing.T) {
+ literals := `08__ 0b.p 0b_._p 0x.e 0x.p`
+ for _, lit := range strings.Split(literals, " ") {
+ n := 0
+ var got scanner
+ got.init(strings.NewReader(lit), func(_, _ uint, msg string) {
+ // fmt.Printf("%s: %s\n", lit, msg) // uncomment for debugging
+ n++
+ }, 0)
+ got.next()
+
+ if n != 1 {
+ t.Errorf("%q: got %d errors; want 1", lit, n)
+ continue
+ }
+
+ if !got.bad {
+ t.Errorf("%q: got error but bad not set", lit)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/source.go b/src/cmd/compile/internal/syntax/source.go
new file mode 100644
index 0000000..01b5921
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/source.go
@@ -0,0 +1,218 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements source, a buffered rune reader
+// specialized for scanning Go code: Reading
+// ASCII characters, maintaining current (line, col)
+// position information, and recording of the most
+// recently read source segment are highly optimized.
+// This file is self-contained (go tool compile source.go
+// compiles) and thus could be made into its own package.
+
+package syntax
+
+import (
+ "io"
+ "unicode/utf8"
+)
+
+// The source buffer is accessed using three indices b (begin),
+// r (read), and e (end):
+//
+// - If b >= 0, it points to the beginning of a segment of most
+// recently read characters (typically a Go literal).
+//
+// - r points to the byte immediately following the most recently
+// read character ch, which starts at r-chw.
+//
+// - e points to the byte immediately following the last byte that
+// was read into the buffer.
+//
+// The buffer content is terminated at buf[e] with the sentinel
+// character utf8.RuneSelf. This makes it possible to test for
+// the common case of ASCII characters with a single 'if' (see
+// nextch method).
+//
+// +------ content in use -------+
+// v v
+// buf [...read...|...segment...|ch|...unread...|s|...free...]
+// ^ ^ ^ ^
+// | | | |
+// b r-chw r e
+//
+// Invariant: -1 <= b < r <= e < len(buf) && buf[e] == sentinel
+
+type source struct {
+ in io.Reader
+ errh func(line, col uint, msg string)
+
+ buf []byte // source buffer
+ ioerr error // pending I/O error, or nil
+ b, r, e int // buffer indices (see comment above)
+ line, col uint // source position of ch (0-based)
+ ch rune // most recently read character
+ chw int // width of ch
+}
+
+const sentinel = utf8.RuneSelf
+
+func (s *source) init(in io.Reader, errh func(line, col uint, msg string)) {
+ s.in = in
+ s.errh = errh
+
+ if s.buf == nil {
+ s.buf = make([]byte, nextSize(0))
+ }
+ s.buf[0] = sentinel
+ s.ioerr = nil
+ s.b, s.r, s.e = -1, 0, 0
+ s.line, s.col = 0, 0
+ s.ch = ' '
+ s.chw = 0
+}
+
+// starting points for line and column numbers
+const linebase = 1
+const colbase = 1
+
+// pos returns the (line, col) source position of s.ch.
+func (s *source) pos() (line, col uint) {
+ return linebase + s.line, colbase + s.col
+}
+
+// error reports the error msg at source position s.pos().
+func (s *source) error(msg string) {
+ line, col := s.pos()
+ s.errh(line, col, msg)
+}
+
+// start starts a new active source segment (including s.ch).
+// As long as stop has not been called, the active segment's
+// bytes (excluding s.ch) may be retrieved by calling segment.
+func (s *source) start() { s.b = s.r - s.chw }
+func (s *source) stop() { s.b = -1 }
+func (s *source) segment() []byte { return s.buf[s.b : s.r-s.chw] }
+
+// rewind rewinds the scanner's read position and character s.ch
+// to the start of the currently active segment, which must not
+// contain any newlines (otherwise position information will be
+// incorrect). Currently, rewind is only needed for handling the
+// source sequence ".."; it must not be called outside an active
+// segment.
+func (s *source) rewind() {
+ // ok to verify precondition - rewind is rarely called
+ if s.b < 0 {
+ panic("no active segment")
+ }
+ s.col -= uint(s.r - s.b)
+ s.r = s.b
+ s.nextch()
+}
+
+func (s *source) nextch() {
+redo:
+ s.col += uint(s.chw)
+ if s.ch == '\n' {
+ s.line++
+ s.col = 0
+ }
+
+ // fast common case: at least one ASCII character
+ if s.ch = rune(s.buf[s.r]); s.ch < sentinel {
+ s.r++
+ s.chw = 1
+ if s.ch == 0 {
+ s.error("invalid NUL character")
+ goto redo
+ }
+ return
+ }
+
+ // slower general case: add more bytes to buffer if we don't have a full rune
+ for s.e-s.r < utf8.UTFMax && !utf8.FullRune(s.buf[s.r:s.e]) && s.ioerr == nil {
+ s.fill()
+ }
+
+ // EOF
+ if s.r == s.e {
+ if s.ioerr != io.EOF {
+ // ensure we never start with a '/' (e.g., rooted path) in the error message
+ s.error("I/O error: " + s.ioerr.Error())
+ s.ioerr = nil
+ }
+ s.ch = -1
+ s.chw = 0
+ return
+ }
+
+ s.ch, s.chw = utf8.DecodeRune(s.buf[s.r:s.e])
+ s.r += s.chw
+
+ if s.ch == utf8.RuneError && s.chw == 1 {
+ s.error("invalid UTF-8 encoding")
+ goto redo
+ }
+
+ // BOM's are only allowed as the first character in a file
+ const BOM = 0xfeff
+ if s.ch == BOM {
+ if s.line > 0 || s.col > 0 {
+ s.error("invalid BOM in the middle of the file")
+ }
+ goto redo
+ }
+}
+
+// fill reads more source bytes into s.buf.
+// It returns with at least one more byte in the buffer, or with s.ioerr != nil.
+func (s *source) fill() {
+ // determine content to preserve
+ b := s.r
+ if s.b >= 0 {
+ b = s.b
+ s.b = 0 // after buffer has grown or content has been moved down
+ }
+ content := s.buf[b:s.e]
+
+ // grow buffer or move content down
+ if len(content)*2 > len(s.buf) {
+ s.buf = make([]byte, nextSize(len(s.buf)))
+ copy(s.buf, content)
+ } else if b > 0 {
+ copy(s.buf, content)
+ }
+ s.r -= b
+ s.e -= b
+
+ // read more data: try a limited number of times
+ for i := 0; i < 10; i++ {
+ var n int
+ n, s.ioerr = s.in.Read(s.buf[s.e : len(s.buf)-1]) // -1 to leave space for sentinel
+ if n < 0 {
+ panic("negative read") // incorrect underlying io.Reader implementation
+ }
+ if n > 0 || s.ioerr != nil {
+ s.e += n
+ s.buf[s.e] = sentinel
+ return
+ }
+ // n == 0
+ }
+
+ s.buf[s.e] = sentinel
+ s.ioerr = io.ErrNoProgress
+}
+
+// nextSize returns the next bigger size for a buffer of a given size.
+func nextSize(size int) int {
+ const min = 4 << 10 // 4K: minimum buffer size
+ const max = 1 << 20 // 1M: maximum buffer size which is still doubled
+ if size < min {
+ return min
+ }
+ if size <= max {
+ return size << 1
+ }
+ return size + max
+}
diff --git a/src/cmd/compile/internal/syntax/syntax.go b/src/cmd/compile/internal/syntax/syntax.go
new file mode 100644
index 0000000..e51b553
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/syntax.go
@@ -0,0 +1,95 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "os"
+)
+
+// Mode describes the parser mode.
+type Mode uint
+
+// Modes supported by the parser.
+const (
+ CheckBranches Mode = 1 << iota // check correct use of labels, break, continue, and goto statements
+)
+
+// Error describes a syntax error. Error implements the error interface.
+type Error struct {
+ Pos Pos
+ Msg string
+}
+
+func (err Error) Error() string {
+ return fmt.Sprintf("%s: %s", err.Pos, err.Msg)
+}
+
+var _ error = Error{} // verify that Error implements error
+
+// An ErrorHandler is called for each error encountered reading a .go file.
+type ErrorHandler func(err error)
+
+// A Pragma value augments a package, import, const, func, type, or var declaration.
+// Its meaning is entirely up to the PragmaHandler,
+// except that nil is used to mean “no pragma seen.”
+type Pragma interface{}
+
+// A PragmaHandler is used to process //go: directives while scanning.
+// It is passed the current pragma value, which starts out being nil,
+// and it returns an updated pragma value.
+// The text is the directive, with the "//" prefix stripped.
+// The current pragma is saved at each package, import, const, func, type, or var
+// declaration, into the File, ImportDecl, ConstDecl, FuncDecl, TypeDecl, or VarDecl node.
+//
+// If text is the empty string, the pragma is being returned
+// to the handler unused, meaning it appeared before a non-declaration.
+// The handler may wish to report an error. In this case, pos is the
+// current parser position, not the position of the pragma itself.
+// Blank specifies whether the line is blank before the pragma.
+type PragmaHandler func(pos Pos, blank bool, text string, current Pragma) Pragma
+
+// Parse parses a single Go source file from src and returns the corresponding
+// syntax tree. If there are errors, Parse will return the first error found,
+// and a possibly partially constructed syntax tree, or nil.
+//
+// If errh != nil, it is called with each error encountered, and Parse will
+// process as much source as possible. In this case, the returned syntax tree
+// is only nil if no correct package clause was found.
+// If errh is nil, Parse will terminate immediately upon encountering the first
+// error, and the returned syntax tree is nil.
+//
+// If pragh != nil, it is called with each pragma encountered.
+//
+func Parse(base *PosBase, src io.Reader, errh ErrorHandler, pragh PragmaHandler, mode Mode) (_ *File, first error) {
+ defer func() {
+ if p := recover(); p != nil {
+ if err, ok := p.(Error); ok {
+ first = err
+ return
+ }
+ panic(p)
+ }
+ }()
+
+ var p parser
+ p.init(base, src, errh, pragh, mode)
+ p.next()
+ return p.fileOrNil(), p.first
+}
+
+// ParseFile behaves like Parse but it reads the source from the named file.
+func ParseFile(filename string, errh ErrorHandler, pragh PragmaHandler, mode Mode) (*File, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ if errh != nil {
+ errh(err)
+ }
+ return nil, err
+ }
+ defer f.Close()
+ return Parse(NewFileBase(filename), f, errh, pragh, mode)
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue20789.src b/src/cmd/compile/internal/syntax/testdata/issue20789.src
new file mode 100644
index 0000000..5f150db
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue20789.src
@@ -0,0 +1,9 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Make sure this doesn't crash the compiler.
+// Line 9 must end in EOF for this test (no newline).
+
+package e
+func([<-chan<-[func /* ERROR unexpected u */ u){go /* ERROR must be function call */ \ No newline at end of file
diff --git a/src/cmd/compile/internal/syntax/testdata/issue23385.src b/src/cmd/compile/internal/syntax/testdata/issue23385.src
new file mode 100644
index 0000000..2459a73
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue23385.src
@@ -0,0 +1,17 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check error message for use of = instead of == .
+
+package p
+
+func _() {
+ if true || 0 /* ERROR cannot use assignment .* as value */ = 1 {
+ }
+}
+
+func _(a, b string) {
+ if a == "a" && b /* ERROR cannot use assignment .* as value */ = "b" {
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue23434.src b/src/cmd/compile/internal/syntax/testdata/issue23434.src
new file mode 100644
index 0000000..5a72a7f
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue23434.src
@@ -0,0 +1,31 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for issue 23434: Better synchronization of
+// parser after missing type. There should be exactly
+// one error each time, with now follow errors.
+
+package p
+
+type T /* ERROR unexpected newline */
+
+type Map map[int] /* ERROR unexpected newline */
+
+// Examples from #23434:
+
+func g() {
+ m := make(map[string] /* ERROR unexpected ! */ !)
+ for {
+ x := 1
+ print(x)
+ }
+}
+
+func f() {
+ m := make(map[string] /* ERROR unexpected \) */ )
+ for {
+ x := 1
+ print(x)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue31092.src b/src/cmd/compile/internal/syntax/testdata/issue31092.src
new file mode 100644
index 0000000..b1839b8
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue31092.src
@@ -0,0 +1,16 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test cases for issue 31092: Better synchronization of
+// parser after seeing an := rather than an = in a const,
+// type, or variable declaration.
+
+package p
+
+const _ /* ERROR unexpected := */ := 0
+type _ /* ERROR unexpected := */ := int
+var _ /* ERROR unexpected := */ := 0
+
+const _ int /* ERROR unexpected := */ := 0
+var _ int /* ERROR unexpected := */ := 0
diff --git a/src/cmd/compile/internal/syntax/testdata/sample.src b/src/cmd/compile/internal/syntax/testdata/sample.src
new file mode 100644
index 0000000..5a2b4bf
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/sample.src
@@ -0,0 +1,33 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a sample test file illustrating the use
+// of error comments with the error test harness.
+
+package p
+
+// The following are invalid error comments; they are
+// silently ignored. The prefix must be exactly one of
+// "/* ERROR " or "// ERROR ".
+//
+/*ERROR*/
+/*ERROR foo*/
+/* ERRORfoo */
+/* ERROR foo */
+//ERROR
+// ERROR
+// ERRORfoo
+// ERROR foo
+
+// This is a valid error comment; it applies to the
+// immediately following token.
+import "math" /* ERROR unexpected comma */ ,
+
+// If there are multiple /*-style error comments before
+// the next token, only the last one is considered.
+type x = /* ERROR ignored */ /* ERROR literal 0 in type declaration */ 0
+
+// A //-style error comment matches any error position
+// on the same line.
+func () foo() // ERROR method has no receiver
diff --git a/src/cmd/compile/internal/syntax/token_string.go b/src/cmd/compile/internal/syntax/token_string.go
new file mode 100644
index 0000000..3cf5473
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/token_string.go
@@ -0,0 +1,17 @@
+// Code generated by "stringer -type token -linecomment"; DO NOT EDIT.
+
+package syntax
+
+import "strconv"
+
+const _token_name = "EOFnameliteralopop=opop=:=<-*([{)]},;:....breakcasechanconstcontinuedefaultdeferelsefallthroughforfuncgogotoifimportinterfacemappackagerangereturnselectstructswitchtypevar"
+
+var _token_index = [...]uint8{0, 3, 7, 14, 16, 19, 23, 24, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 42, 47, 51, 55, 60, 68, 75, 80, 84, 95, 98, 102, 104, 108, 110, 116, 125, 128, 135, 140, 146, 152, 158, 164, 168, 171, 171}
+
+func (i token) String() string {
+ i -= 1
+ if i >= token(len(_token_index)-1) {
+ return "token(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _token_name[_token_index[i]:_token_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/syntax/tokens.go b/src/cmd/compile/internal/syntax/tokens.go
new file mode 100644
index 0000000..3b97cb6
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/tokens.go
@@ -0,0 +1,156 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+type token uint
+
+//go:generate stringer -type token -linecomment
+
+const (
+ _ token = iota
+ _EOF // EOF
+
+ // names and literals
+ _Name // name
+ _Literal // literal
+
+ // operators and operations
+ // _Operator is excluding '*' (_Star)
+ _Operator // op
+ _AssignOp // op=
+ _IncOp // opop
+ _Assign // =
+ _Define // :=
+ _Arrow // <-
+ _Star // *
+
+ // delimiters
+ _Lparen // (
+ _Lbrack // [
+ _Lbrace // {
+ _Rparen // )
+ _Rbrack // ]
+ _Rbrace // }
+ _Comma // ,
+ _Semi // ;
+ _Colon // :
+ _Dot // .
+ _DotDotDot // ...
+
+ // keywords
+ _Break // break
+ _Case // case
+ _Chan // chan
+ _Const // const
+ _Continue // continue
+ _Default // default
+ _Defer // defer
+ _Else // else
+ _Fallthrough // fallthrough
+ _For // for
+ _Func // func
+ _Go // go
+ _Goto // goto
+ _If // if
+ _Import // import
+ _Interface // interface
+ _Map // map
+ _Package // package
+ _Range // range
+ _Return // return
+ _Select // select
+ _Struct // struct
+ _Switch // switch
+ _Type // type
+ _Var // var
+
+ // empty line comment to exclude it from .String
+ tokenCount //
+)
+
+const (
+ // for BranchStmt
+ Break = _Break
+ Continue = _Continue
+ Fallthrough = _Fallthrough
+ Goto = _Goto
+
+ // for CallStmt
+ Go = _Go
+ Defer = _Defer
+)
+
+// Make sure we have at most 64 tokens so we can use them in a set.
+const _ uint64 = 1 << (tokenCount - 1)
+
+// contains reports whether tok is in tokset.
+func contains(tokset uint64, tok token) bool {
+ return tokset&(1<<tok) != 0
+}
+
+type LitKind uint8
+
+// TODO(gri) With the 'i' (imaginary) suffix now permitted on integer
+// and floating-point numbers, having a single ImagLit does
+// not represent the literal kind well anymore. Remove it?
+const (
+ IntLit LitKind = iota
+ FloatLit
+ ImagLit
+ RuneLit
+ StringLit
+)
+
+type Operator uint
+
+//go:generate stringer -type Operator -linecomment
+
+const (
+ _ Operator = iota
+
+ // Def is the : in :=
+ Def // :
+ Not // !
+ Recv // <-
+
+ // precOrOr
+ OrOr // ||
+
+ // precAndAnd
+ AndAnd // &&
+
+ // precCmp
+ Eql // ==
+ Neq // !=
+ Lss // <
+ Leq // <=
+ Gtr // >
+ Geq // >=
+
+ // precAdd
+ Add // +
+ Sub // -
+ Or // |
+ Xor // ^
+
+ // precMul
+ Mul // *
+ Div // /
+ Rem // %
+ And // &
+ AndNot // &^
+ Shl // <<
+ Shr // >>
+)
+
+// Operator precedences
+const (
+ _ = iota
+ precOrOr
+ precAndAnd
+ precCmp
+ precAdd
+ precMul
+)
diff --git a/src/cmd/compile/internal/test/README b/src/cmd/compile/internal/test/README
new file mode 100644
index 0000000..242ff79
--- /dev/null
+++ b/src/cmd/compile/internal/test/README
@@ -0,0 +1,4 @@
+This directory holds small tests and benchmarks of code
+generated by the compiler. This code is not for importing,
+and the tests are intended to verify that specific optimzations
+are applied and correct.
diff --git a/src/cmd/compile/internal/test/divconst_test.go b/src/cmd/compile/internal/test/divconst_test.go
new file mode 100644
index 0000000..9358a60
--- /dev/null
+++ b/src/cmd/compile/internal/test/divconst_test.go
@@ -0,0 +1,325 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "testing"
+)
+
+var boolres bool
+
+var i64res int64
+
+func BenchmarkDivconstI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i64res = int64(i) / 7
+ }
+}
+
+func BenchmarkModconstI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i64res = int64(i) % 7
+ }
+}
+
+func BenchmarkDivisiblePow2constI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int64(i)%16 == 0
+ }
+}
+func BenchmarkDivisibleconstI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int64(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i64res = int64(i) / 7
+ boolres = int64(i)%7 == 0
+ }
+}
+
+var u64res uint64
+
+func TestDivmodConstU64(t *testing.T) {
+ // Test division by c. Function f must be func(n) { return n/c, n%c }
+ testdiv := func(c uint64, f func(uint64) (uint64, uint64)) func(*testing.T) {
+ return func(t *testing.T) {
+ x := uint64(12345)
+ for i := 0; i < 10000; i++ {
+ x += x << 2
+ q, r := f(x)
+ if r < 0 || r >= c || q*c+r != x {
+ t.Errorf("divmod(%d, %d) returned incorrect (%d, %d)", x, c, q, r)
+ }
+ }
+ max := uint64(1<<64-1) / c * c
+ xs := []uint64{0, 1, c - 1, c, c + 1, 2*c - 1, 2 * c, 2*c + 1,
+ c*c - 1, c * c, c*c + 1, max - 1, max, max + 1, 1<<64 - 1}
+ for _, x := range xs {
+ q, r := f(x)
+ if r < 0 || r >= c || q*c+r != x {
+ t.Errorf("divmod(%d, %d) returned incorrect (%d, %d)", x, c, q, r)
+ }
+ }
+ }
+ }
+ t.Run("2", testdiv(2, func(n uint64) (uint64, uint64) { return n / 2, n % 2 }))
+ t.Run("3", testdiv(3, func(n uint64) (uint64, uint64) { return n / 3, n % 3 }))
+ t.Run("4", testdiv(4, func(n uint64) (uint64, uint64) { return n / 4, n % 4 }))
+ t.Run("5", testdiv(5, func(n uint64) (uint64, uint64) { return n / 5, n % 5 }))
+ t.Run("6", testdiv(6, func(n uint64) (uint64, uint64) { return n / 6, n % 6 }))
+ t.Run("7", testdiv(7, func(n uint64) (uint64, uint64) { return n / 7, n % 7 }))
+ t.Run("8", testdiv(8, func(n uint64) (uint64, uint64) { return n / 8, n % 8 }))
+ t.Run("9", testdiv(9, func(n uint64) (uint64, uint64) { return n / 9, n % 9 }))
+ t.Run("10", testdiv(10, func(n uint64) (uint64, uint64) { return n / 10, n % 10 }))
+ t.Run("11", testdiv(11, func(n uint64) (uint64, uint64) { return n / 11, n % 11 }))
+ t.Run("12", testdiv(12, func(n uint64) (uint64, uint64) { return n / 12, n % 12 }))
+ t.Run("13", testdiv(13, func(n uint64) (uint64, uint64) { return n / 13, n % 13 }))
+ t.Run("14", testdiv(14, func(n uint64) (uint64, uint64) { return n / 14, n % 14 }))
+ t.Run("15", testdiv(15, func(n uint64) (uint64, uint64) { return n / 15, n % 15 }))
+ t.Run("16", testdiv(16, func(n uint64) (uint64, uint64) { return n / 16, n % 16 }))
+ t.Run("17", testdiv(17, func(n uint64) (uint64, uint64) { return n / 17, n % 17 }))
+ t.Run("255", testdiv(255, func(n uint64) (uint64, uint64) { return n / 255, n % 255 }))
+ t.Run("256", testdiv(256, func(n uint64) (uint64, uint64) { return n / 256, n % 256 }))
+ t.Run("257", testdiv(257, func(n uint64) (uint64, uint64) { return n / 257, n % 257 }))
+ t.Run("65535", testdiv(65535, func(n uint64) (uint64, uint64) { return n / 65535, n % 65535 }))
+ t.Run("65536", testdiv(65536, func(n uint64) (uint64, uint64) { return n / 65536, n % 65536 }))
+ t.Run("65537", testdiv(65537, func(n uint64) (uint64, uint64) { return n / 65537, n % 65537 }))
+ t.Run("1<<32-1", testdiv(1<<32-1, func(n uint64) (uint64, uint64) { return n / (1<<32 - 1), n % (1<<32 - 1) }))
+ t.Run("1<<32+1", testdiv(1<<32+1, func(n uint64) (uint64, uint64) { return n / (1<<32 + 1), n % (1<<32 + 1) }))
+ t.Run("1<<64-1", testdiv(1<<64-1, func(n uint64) (uint64, uint64) { return n / (1<<64 - 1), n % (1<<64 - 1) }))
+}
+
+func BenchmarkDivconstU64(b *testing.B) {
+ b.Run("3", func(b *testing.B) {
+ x := uint64(123456789123456789)
+ for i := 0; i < b.N; i++ {
+ x += x << 4
+ u64res = uint64(x) / 3
+ }
+ })
+ b.Run("5", func(b *testing.B) {
+ x := uint64(123456789123456789)
+ for i := 0; i < b.N; i++ {
+ x += x << 4
+ u64res = uint64(x) / 5
+ }
+ })
+ b.Run("37", func(b *testing.B) {
+ x := uint64(123456789123456789)
+ for i := 0; i < b.N; i++ {
+ x += x << 4
+ u64res = uint64(x) / 37
+ }
+ })
+ b.Run("1234567", func(b *testing.B) {
+ x := uint64(123456789123456789)
+ for i := 0; i < b.N; i++ {
+ x += x << 4
+ u64res = uint64(x) / 1234567
+ }
+ })
+}
+
+func BenchmarkModconstU64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u64res = uint64(i) % 7
+ }
+}
+
+func BenchmarkDivisibleconstU64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = uint64(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstU64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u64res = uint64(i) / 7
+ boolres = uint64(i)%7 == 0
+ }
+}
+
+var i32res int32
+
+func BenchmarkDivconstI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i32res = int32(i) / 7
+ }
+}
+
+func BenchmarkModconstI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i32res = int32(i) % 7
+ }
+}
+
+func BenchmarkDivisiblePow2constI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int32(i)%16 == 0
+ }
+}
+
+func BenchmarkDivisibleconstI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int32(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i32res = int32(i) / 7
+ boolres = int32(i)%7 == 0
+ }
+}
+
+var u32res uint32
+
+func BenchmarkDivconstU32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u32res = uint32(i) / 7
+ }
+}
+
+func BenchmarkModconstU32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u32res = uint32(i) % 7
+ }
+}
+
+func BenchmarkDivisibleconstU32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = uint32(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstU32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u32res = uint32(i) / 7
+ boolres = uint32(i)%7 == 0
+ }
+}
+
+var i16res int16
+
+func BenchmarkDivconstI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i16res = int16(i) / 7
+ }
+}
+
+func BenchmarkModconstI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i16res = int16(i) % 7
+ }
+}
+
+func BenchmarkDivisiblePow2constI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int16(i)%16 == 0
+ }
+}
+
+func BenchmarkDivisibleconstI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int16(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i16res = int16(i) / 7
+ boolres = int16(i)%7 == 0
+ }
+}
+
+var u16res uint16
+
+func BenchmarkDivconstU16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u16res = uint16(i) / 7
+ }
+}
+
+func BenchmarkModconstU16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u16res = uint16(i) % 7
+ }
+}
+
+func BenchmarkDivisibleconstU16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = uint16(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstU16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u16res = uint16(i) / 7
+ boolres = uint16(i)%7 == 0
+ }
+}
+
+var i8res int8
+
+func BenchmarkDivconstI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i8res = int8(i) / 7
+ }
+}
+
+func BenchmarkModconstI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i8res = int8(i) % 7
+ }
+}
+
+func BenchmarkDivisiblePow2constI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int8(i)%16 == 0
+ }
+}
+
+func BenchmarkDivisibleconstI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int8(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i8res = int8(i) / 7
+ boolres = int8(i)%7 == 0
+ }
+}
+
+var u8res uint8
+
+func BenchmarkDivconstU8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u8res = uint8(i) / 7
+ }
+}
+
+func BenchmarkModconstU8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u8res = uint8(i) % 7
+ }
+}
+
+func BenchmarkDivisibleconstU8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = uint8(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstU8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u8res = uint8(i) / 7
+ boolres = uint8(i)%7 == 0
+ }
+}
diff --git a/src/cmd/compile/internal/test/mulconst_test.go b/src/cmd/compile/internal/test/mulconst_test.go
new file mode 100644
index 0000000..314cab3
--- /dev/null
+++ b/src/cmd/compile/internal/test/mulconst_test.go
@@ -0,0 +1,242 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import "testing"
+
+// Benchmark multiplication of an integer by various constants.
+//
+// The comment above each sub-benchmark provides an example of how the
+// target multiplication operation might be implemented using shift
+// (multiplication by a power of 2), addition and subtraction
+// operations. It is platform-dependent whether these transformations
+// are actually applied.
+
+var (
+ mulSinkI32 int32
+ mulSinkI64 int64
+ mulSinkU32 uint32
+ mulSinkU64 uint64
+)
+
+func BenchmarkMulconstI32(b *testing.B) {
+ // 3x = 2x + x
+ b.Run("3", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 3
+ }
+ mulSinkI32 = x
+ })
+ // 5x = 4x + x
+ b.Run("5", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 5
+ }
+ mulSinkI32 = x
+ })
+ // 12x = 8x + 4x
+ b.Run("12", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 12
+ }
+ mulSinkI32 = x
+ })
+ // 120x = 128x - 8x
+ b.Run("120", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 120
+ }
+ mulSinkI32 = x
+ })
+ // -120x = 8x - 120x
+ b.Run("-120", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= -120
+ }
+ mulSinkI32 = x
+ })
+ // 65537x = 65536x + x
+ b.Run("65537", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65537
+ }
+ mulSinkI32 = x
+ })
+ // 65538x = 65536x + 2x
+ b.Run("65538", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65538
+ }
+ mulSinkI32 = x
+ })
+}
+
+func BenchmarkMulconstI64(b *testing.B) {
+ // 3x = 2x + x
+ b.Run("3", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 3
+ }
+ mulSinkI64 = x
+ })
+ // 5x = 4x + x
+ b.Run("5", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 5
+ }
+ mulSinkI64 = x
+ })
+ // 12x = 8x + 4x
+ b.Run("12", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 12
+ }
+ mulSinkI64 = x
+ })
+ // 120x = 128x - 8x
+ b.Run("120", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 120
+ }
+ mulSinkI64 = x
+ })
+ // -120x = 8x - 120x
+ b.Run("-120", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= -120
+ }
+ mulSinkI64 = x
+ })
+ // 65537x = 65536x + x
+ b.Run("65537", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65537
+ }
+ mulSinkI64 = x
+ })
+ // 65538x = 65536x + 2x
+ b.Run("65538", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65538
+ }
+ mulSinkI64 = x
+ })
+}
+
+func BenchmarkMulconstU32(b *testing.B) {
+ // 3x = 2x + x
+ b.Run("3", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 3
+ }
+ mulSinkU32 = x
+ })
+ // 5x = 4x + x
+ b.Run("5", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 5
+ }
+ mulSinkU32 = x
+ })
+ // 12x = 8x + 4x
+ b.Run("12", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 12
+ }
+ mulSinkU32 = x
+ })
+ // 120x = 128x - 8x
+ b.Run("120", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 120
+ }
+ mulSinkU32 = x
+ })
+ // 65537x = 65536x + x
+ b.Run("65537", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65537
+ }
+ mulSinkU32 = x
+ })
+ // 65538x = 65536x + 2x
+ b.Run("65538", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65538
+ }
+ mulSinkU32 = x
+ })
+}
+
+func BenchmarkMulconstU64(b *testing.B) {
+ // 3x = 2x + x
+ b.Run("3", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 3
+ }
+ mulSinkU64 = x
+ })
+ // 5x = 4x + x
+ b.Run("5", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 5
+ }
+ mulSinkU64 = x
+ })
+ // 12x = 8x + 4x
+ b.Run("12", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 12
+ }
+ mulSinkU64 = x
+ })
+ // 120x = 128x - 8x
+ b.Run("120", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 120
+ }
+ mulSinkU64 = x
+ })
+ // 65537x = 65536x + x
+ b.Run("65537", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65537
+ }
+ mulSinkU64 = x
+ })
+ // 65538x = 65536x + 2x
+ b.Run("65538", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65538
+ }
+ mulSinkU64 = x
+ })
+}
diff --git a/src/cmd/compile/internal/test/test.go b/src/cmd/compile/internal/test/test.go
new file mode 100644
index 0000000..56e5404
--- /dev/null
+++ b/src/cmd/compile/internal/test/test.go
@@ -0,0 +1 @@
+package test
diff --git a/src/cmd/compile/internal/types/etype_string.go b/src/cmd/compile/internal/types/etype_string.go
new file mode 100644
index 0000000..14fd5b7
--- /dev/null
+++ b/src/cmd/compile/internal/types/etype_string.go
@@ -0,0 +1,60 @@
+// Code generated by "stringer -type EType -trimprefix T"; DO NOT EDIT.
+
+package types
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Txxx-0]
+ _ = x[TINT8-1]
+ _ = x[TUINT8-2]
+ _ = x[TINT16-3]
+ _ = x[TUINT16-4]
+ _ = x[TINT32-5]
+ _ = x[TUINT32-6]
+ _ = x[TINT64-7]
+ _ = x[TUINT64-8]
+ _ = x[TINT-9]
+ _ = x[TUINT-10]
+ _ = x[TUINTPTR-11]
+ _ = x[TCOMPLEX64-12]
+ _ = x[TCOMPLEX128-13]
+ _ = x[TFLOAT32-14]
+ _ = x[TFLOAT64-15]
+ _ = x[TBOOL-16]
+ _ = x[TPTR-17]
+ _ = x[TFUNC-18]
+ _ = x[TSLICE-19]
+ _ = x[TARRAY-20]
+ _ = x[TSTRUCT-21]
+ _ = x[TCHAN-22]
+ _ = x[TMAP-23]
+ _ = x[TINTER-24]
+ _ = x[TFORW-25]
+ _ = x[TANY-26]
+ _ = x[TSTRING-27]
+ _ = x[TUNSAFEPTR-28]
+ _ = x[TIDEAL-29]
+ _ = x[TNIL-30]
+ _ = x[TBLANK-31]
+ _ = x[TFUNCARGS-32]
+ _ = x[TCHANARGS-33]
+ _ = x[TSSA-34]
+ _ = x[TTUPLE-35]
+ _ = x[TRESULTS-36]
+ _ = x[NTYPE-37]
+}
+
+const _EType_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE"
+
+var _EType_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 158, 161, 166, 174, 182, 185, 190, 197, 202}
+
+func (i EType) String() string {
+ if i >= EType(len(_EType_index)-1) {
+ return "EType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _EType_name[_EType_index[i]:_EType_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/types/identity.go b/src/cmd/compile/internal/types/identity.go
new file mode 100644
index 0000000..a77f514
--- /dev/null
+++ b/src/cmd/compile/internal/types/identity.go
@@ -0,0 +1,126 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// Identical reports whether t1 and t2 are identical types, following
+// the spec rules. Receiver parameter types are ignored.
+func Identical(t1, t2 *Type) bool {
+ return identical(t1, t2, true, nil)
+}
+
+// IdenticalIgnoreTags is like Identical, but it ignores struct tags
+// for struct identity.
+func IdenticalIgnoreTags(t1, t2 *Type) bool {
+ return identical(t1, t2, false, nil)
+}
+
+type typePair struct {
+ t1 *Type
+ t2 *Type
+}
+
+func identical(t1, t2 *Type, cmpTags bool, assumedEqual map[typePair]struct{}) bool {
+ if t1 == t2 {
+ return true
+ }
+ if t1 == nil || t2 == nil || t1.Etype != t2.Etype || t1.Broke() || t2.Broke() {
+ return false
+ }
+ if t1.Sym != nil || t2.Sym != nil {
+ // Special case: we keep byte/uint8 and rune/int32
+ // separate for error messages. Treat them as equal.
+ switch t1.Etype {
+ case TUINT8:
+ return (t1 == Types[TUINT8] || t1 == Bytetype) && (t2 == Types[TUINT8] || t2 == Bytetype)
+ case TINT32:
+ return (t1 == Types[TINT32] || t1 == Runetype) && (t2 == Types[TINT32] || t2 == Runetype)
+ default:
+ return false
+ }
+ }
+
+ // Any cyclic type must go through a named type, and if one is
+ // named, it is only identical to the other if they are the
+ // same pointer (t1 == t2), so there's no chance of chasing
+ // cycles ad infinitum, so no need for a depth counter.
+ if assumedEqual == nil {
+ assumedEqual = make(map[typePair]struct{})
+ } else if _, ok := assumedEqual[typePair{t1, t2}]; ok {
+ return true
+ }
+ assumedEqual[typePair{t1, t2}] = struct{}{}
+
+ switch t1.Etype {
+ case TIDEAL:
+ // Historically, cmd/compile used a single "untyped
+ // number" type, so all untyped number types were
+ // identical. Match this behavior.
+ // TODO(mdempsky): Revisit this.
+ return true
+
+ case TINTER:
+ if t1.NumFields() != t2.NumFields() {
+ return false
+ }
+ for i, f1 := range t1.FieldSlice() {
+ f2 := t2.Field(i)
+ if f1.Sym != f2.Sym || !identical(f1.Type, f2.Type, cmpTags, assumedEqual) {
+ return false
+ }
+ }
+ return true
+
+ case TSTRUCT:
+ if t1.NumFields() != t2.NumFields() {
+ return false
+ }
+ for i, f1 := range t1.FieldSlice() {
+ f2 := t2.Field(i)
+ if f1.Sym != f2.Sym || f1.Embedded != f2.Embedded || !identical(f1.Type, f2.Type, cmpTags, assumedEqual) {
+ return false
+ }
+ if cmpTags && f1.Note != f2.Note {
+ return false
+ }
+ }
+ return true
+
+ case TFUNC:
+ // Check parameters and result parameters for type equality.
+ // We intentionally ignore receiver parameters for type
+ // equality, because they're never relevant.
+ for _, f := range ParamsResults {
+ // Loop over fields in structs, ignoring argument names.
+ fs1, fs2 := f(t1).FieldSlice(), f(t2).FieldSlice()
+ if len(fs1) != len(fs2) {
+ return false
+ }
+ for i, f1 := range fs1 {
+ f2 := fs2[i]
+ if f1.IsDDD() != f2.IsDDD() || !identical(f1.Type, f2.Type, cmpTags, assumedEqual) {
+ return false
+ }
+ }
+ }
+ return true
+
+ case TARRAY:
+ if t1.NumElem() != t2.NumElem() {
+ return false
+ }
+
+ case TCHAN:
+ if t1.ChanDir() != t2.ChanDir() {
+ return false
+ }
+
+ case TMAP:
+ if !identical(t1.Key(), t2.Key(), cmpTags, assumedEqual) {
+ return false
+ }
+ }
+
+ return identical(t1.Elem(), t2.Elem(), cmpTags, assumedEqual)
+}
diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go
new file mode 100644
index 0000000..bcc6789
--- /dev/null
+++ b/src/cmd/compile/internal/types/pkg.go
@@ -0,0 +1,146 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "fmt"
+ "sort"
+ "sync"
+)
+
+// pkgMap maps a package path to a package.
+var pkgMap = make(map[string]*Pkg)
+
+// MaxPkgHeight is a height greater than any likely package height.
+const MaxPkgHeight = 1e9
+
+type Pkg struct {
+ Path string // string literal used in import statement, e.g. "runtime/internal/sys"
+ Name string // package name, e.g. "sys"
+ Prefix string // escaped path for use in symbol table
+ Syms map[string]*Sym
+ Pathsym *obj.LSym
+
+ // Height is the package's height in the import graph. Leaf
+ // packages (i.e., packages with no imports) have height 0,
+ // and all other packages have height 1 plus the maximum
+ // height of their imported packages.
+ Height int
+
+ Imported bool // export data of this package was parsed
+ Direct bool // imported directly
+}
+
+// NewPkg returns a new Pkg for the given package path and name.
+// Unless name is the empty string, if the package exists already,
+// the existing package name and the provided name must match.
+func NewPkg(path, name string) *Pkg {
+ if p := pkgMap[path]; p != nil {
+ if name != "" && p.Name != name {
+ panic(fmt.Sprintf("conflicting package names %s and %s for path %q", p.Name, name, path))
+ }
+ return p
+ }
+
+ p := new(Pkg)
+ p.Path = path
+ p.Name = name
+ p.Prefix = objabi.PathToPrefix(path)
+ p.Syms = make(map[string]*Sym)
+ pkgMap[path] = p
+
+ return p
+}
+
+// ImportedPkgList returns the list of directly imported packages.
+// The list is sorted by package path.
+func ImportedPkgList() []*Pkg {
+ var list []*Pkg
+ for _, p := range pkgMap {
+ if p.Direct {
+ list = append(list, p)
+ }
+ }
+ sort.Sort(byPath(list))
+ return list
+}
+
+type byPath []*Pkg
+
+func (a byPath) Len() int { return len(a) }
+func (a byPath) Less(i, j int) bool { return a[i].Path < a[j].Path }
+func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+var nopkg = &Pkg{
+ Syms: make(map[string]*Sym),
+}
+
+func (pkg *Pkg) Lookup(name string) *Sym {
+ s, _ := pkg.LookupOK(name)
+ return s
+}
+
+// List of .inittask entries in imported packages, in source code order.
+var InitSyms []*Sym
+
+// LookupOK looks up name in pkg and reports whether it previously existed.
+func (pkg *Pkg) LookupOK(name string) (s *Sym, existed bool) {
+ // TODO(gri) remove this check in favor of specialized lookup
+ if pkg == nil {
+ pkg = nopkg
+ }
+ if s := pkg.Syms[name]; s != nil {
+ return s, true
+ }
+
+ s = &Sym{
+ Name: name,
+ Pkg: pkg,
+ }
+ if name == ".inittask" {
+ InitSyms = append(InitSyms, s)
+ }
+ pkg.Syms[name] = s
+ return s, false
+}
+
+func (pkg *Pkg) LookupBytes(name []byte) *Sym {
+ // TODO(gri) remove this check in favor of specialized lookup
+ if pkg == nil {
+ pkg = nopkg
+ }
+ if s := pkg.Syms[string(name)]; s != nil {
+ return s
+ }
+ str := InternString(name)
+ return pkg.Lookup(str)
+}
+
+var (
+ internedStringsmu sync.Mutex // protects internedStrings
+ internedStrings = map[string]string{}
+)
+
+func InternString(b []byte) string {
+ internedStringsmu.Lock()
+ s, ok := internedStrings[string(b)] // string(b) here doesn't allocate
+ if !ok {
+ s = string(b)
+ internedStrings[s] = s
+ }
+ internedStringsmu.Unlock()
+ return s
+}
+
+// CleanroomDo invokes f in an environment with no preexisting packages.
+// For testing of import/export only.
+func CleanroomDo(f func()) {
+ saved := pkgMap
+ pkgMap = make(map[string]*Pkg)
+ f()
+ pkgMap = saved
+}
diff --git a/src/cmd/compile/internal/types/scope.go b/src/cmd/compile/internal/types/scope.go
new file mode 100644
index 0000000..40d3d86
--- /dev/null
+++ b/src/cmd/compile/internal/types/scope.go
@@ -0,0 +1,103 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import "cmd/internal/src"
+
+// Declaration stack & operations
+
+var blockgen int32 = 1 // max block number
+var Block int32 // current block number
+
+// A dsym stores a symbol's shadowed declaration so that it can be
+// restored once the block scope ends.
+type dsym struct {
+ sym *Sym // sym == nil indicates stack mark
+ def *Node
+ block int32
+ lastlineno src.XPos // last declaration for diagnostic
+}
+
+// dclstack maintains a stack of shadowed symbol declarations so that
+// Popdcl can restore their declarations when a block scope ends.
+var dclstack []dsym
+
+// Pushdcl pushes the current declaration for symbol s (if any) so that
+// it can be shadowed by a new declaration within a nested block scope.
+func Pushdcl(s *Sym) {
+ dclstack = append(dclstack, dsym{
+ sym: s,
+ def: s.Def,
+ block: s.Block,
+ lastlineno: s.Lastlineno,
+ })
+}
+
+// Popdcl pops the innermost block scope and restores all symbol declarations
+// to their previous state.
+func Popdcl() {
+ for i := len(dclstack); i > 0; i-- {
+ d := &dclstack[i-1]
+ s := d.sym
+ if s == nil {
+ // pop stack mark
+ Block = d.block
+ dclstack = dclstack[:i-1]
+ return
+ }
+
+ s.Def = d.def
+ s.Block = d.block
+ s.Lastlineno = d.lastlineno
+
+ // Clear dead pointer fields.
+ d.sym = nil
+ d.def = nil
+ }
+ Fatalf("popdcl: no stack mark")
+}
+
+// Markdcl records the start of a new block scope for declarations.
+func Markdcl() {
+ dclstack = append(dclstack, dsym{
+ sym: nil, // stack mark
+ block: Block,
+ })
+ blockgen++
+ Block = blockgen
+}
+
+func IsDclstackValid() bool {
+ for _, d := range dclstack {
+ if d.sym == nil {
+ return false
+ }
+ }
+ return true
+}
+
+// PkgDef returns the definition associated with s at package scope.
+func (s *Sym) PkgDef() *Node {
+ return *s.pkgDefPtr()
+}
+
+// SetPkgDef sets the definition associated with s at package scope.
+func (s *Sym) SetPkgDef(n *Node) {
+ *s.pkgDefPtr() = n
+}
+
+func (s *Sym) pkgDefPtr() **Node {
+ // Look for outermost saved declaration, which must be the
+ // package scope definition, if present.
+ for _, d := range dclstack {
+ if s == d.sym {
+ return &d.def
+ }
+ }
+
+ // Otherwise, the declaration hasn't been shadowed within a
+ // function scope.
+ return &s.Def
+}
diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go
new file mode 100644
index 0000000..ea947d8
--- /dev/null
+++ b/src/cmd/compile/internal/types/sizeof_test.go
@@ -0,0 +1,48 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// Assert that the size of important structures do not change unexpectedly.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ {Sym{}, 52, 88},
+ {Type{}, 52, 88},
+ {Map{}, 20, 40},
+ {Forward{}, 20, 32},
+ {Func{}, 32, 56},
+ {Struct{}, 16, 32},
+ {Interface{}, 8, 16},
+ {Chan{}, 8, 16},
+ {Array{}, 12, 16},
+ {FuncArgs{}, 4, 8},
+ {ChanArgs{}, 4, 8},
+ {Ptr{}, 4, 8},
+ {Slice{}, 4, 8},
+ }
+
+ for _, tt := range tests {
+ want := tt._32bit
+ if _64bit {
+ want = tt._64bit
+ }
+ got := reflect.TypeOf(tt.val).Size()
+ if want != got {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go
new file mode 100644
index 0000000..07bce4d
--- /dev/null
+++ b/src/cmd/compile/internal/types/sym.go
@@ -0,0 +1,142 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Sym represents an object name in a segmented (pkg, name) namespace.
+// Most commonly, this is a Go identifier naming an object declared within a package,
+// but Syms are also used to name internal synthesized objects.
+//
+// As an exception, field and method names that are exported use the Sym
+// associated with localpkg instead of the package that declared them. This
+// allows using Sym pointer equality to test for Go identifier uniqueness when
+// handling selector expressions.
+//
+// Ideally, Sym should be used for representing Go language constructs,
+// while cmd/internal/obj.LSym is used for representing emitted artifacts.
+//
+// NOTE: In practice, things can be messier than the description above
+// for various reasons (historical, convenience).
+type Sym struct {
+ Importdef *Pkg // where imported definition was found
+ Linkname string // link name
+
+ Pkg *Pkg
+ Name string // object name
+
+ // saved and restored by dcopy
+ Def *Node // definition: ONAME OTYPE OPACK or OLITERAL
+ Block int32 // blocknumber to catch redeclaration
+ Lastlineno src.XPos // last declaration for diagnostic
+
+ flags bitset8
+ Label *Node // corresponding label (ephemeral)
+ Origpkg *Pkg // original package for . import
+}
+
+const (
+ symOnExportList = 1 << iota // added to exportlist (no need to add again)
+ symUniq
+ symSiggen // type symbol has been generated
+ symAsm // on asmlist, for writing to -asmhdr
+ symFunc // function symbol; uses internal ABI
+)
+
+func (sym *Sym) OnExportList() bool { return sym.flags&symOnExportList != 0 }
+func (sym *Sym) Uniq() bool { return sym.flags&symUniq != 0 }
+func (sym *Sym) Siggen() bool { return sym.flags&symSiggen != 0 }
+func (sym *Sym) Asm() bool { return sym.flags&symAsm != 0 }
+func (sym *Sym) Func() bool { return sym.flags&symFunc != 0 }
+
+func (sym *Sym) SetOnExportList(b bool) { sym.flags.set(symOnExportList, b) }
+func (sym *Sym) SetUniq(b bool) { sym.flags.set(symUniq, b) }
+func (sym *Sym) SetSiggen(b bool) { sym.flags.set(symSiggen, b) }
+func (sym *Sym) SetAsm(b bool) { sym.flags.set(symAsm, b) }
+func (sym *Sym) SetFunc(b bool) { sym.flags.set(symFunc, b) }
+
+func (sym *Sym) IsBlank() bool {
+ return sym != nil && sym.Name == "_"
+}
+
+func (sym *Sym) LinksymName() string {
+ if sym.IsBlank() {
+ return "_"
+ }
+ if sym.Linkname != "" {
+ return sym.Linkname
+ }
+ return sym.Pkg.Prefix + "." + sym.Name
+}
+
+func (sym *Sym) Linksym() *obj.LSym {
+ if sym == nil {
+ return nil
+ }
+ initPkg := func(r *obj.LSym) {
+ if sym.Linkname != "" {
+ r.Pkg = "_"
+ } else {
+ r.Pkg = sym.Pkg.Prefix
+ }
+ }
+ if sym.Func() {
+ // This is a function symbol. Mark it as "internal ABI".
+ return Ctxt.LookupABIInit(sym.LinksymName(), obj.ABIInternal, initPkg)
+ }
+ return Ctxt.LookupInit(sym.LinksymName(), initPkg)
+}
+
+// Less reports whether symbol a is ordered before symbol b.
+//
+// Symbols are ordered exported before non-exported, then by name, and
+// finally (for non-exported symbols) by package height and path.
+//
+// Ordering by package height is necessary to establish a consistent
+// ordering for non-exported names with the same spelling but from
+// different packages. We don't necessarily know the path for the
+// package being compiled, but by definition it will have a height
+// greater than any other packages seen within the compilation unit.
+// For more background, see issue #24693.
+func (a *Sym) Less(b *Sym) bool {
+ if a == b {
+ return false
+ }
+
+ // Exported symbols before non-exported.
+ ea := IsExported(a.Name)
+ eb := IsExported(b.Name)
+ if ea != eb {
+ return ea
+ }
+
+ // Order by name and then (for non-exported names) by package
+ // height and path.
+ if a.Name != b.Name {
+ return a.Name < b.Name
+ }
+ if !ea {
+ if a.Pkg.Height != b.Pkg.Height {
+ return a.Pkg.Height < b.Pkg.Height
+ }
+ return a.Pkg.Path < b.Pkg.Path
+ }
+ return false
+}
+
+// IsExported reports whether name is an exported Go symbol (that is,
+// whether it begins with an upper-case letter).
+func IsExported(name string) bool {
+ if r := name[0]; r < utf8.RuneSelf {
+ return 'A' <= r && r <= 'Z'
+ }
+ r, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(r)
+}
diff --git a/src/cmd/compile/internal/types/sym_test.go b/src/cmd/compile/internal/types/sym_test.go
new file mode 100644
index 0000000..94efd42
--- /dev/null
+++ b/src/cmd/compile/internal/types/sym_test.go
@@ -0,0 +1,59 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types_test
+
+import (
+ "cmd/compile/internal/types"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestSymLess(t *testing.T) {
+ var (
+ local = types.NewPkg("", "")
+ abc = types.NewPkg("abc", "")
+ uvw = types.NewPkg("uvw", "")
+ xyz = types.NewPkg("xyz", "")
+ gr = types.NewPkg("gr", "")
+ )
+
+ data := []*types.Sym{
+ abc.Lookup("b"),
+ local.Lookup("B"),
+ local.Lookup("C"),
+ uvw.Lookup("c"),
+ local.Lookup("C"),
+ gr.Lookup("φ"),
+ local.Lookup("Φ"),
+ xyz.Lookup("b"),
+ abc.Lookup("a"),
+ local.Lookup("B"),
+ }
+ want := []*types.Sym{
+ local.Lookup("B"),
+ local.Lookup("B"),
+ local.Lookup("C"),
+ local.Lookup("C"),
+ local.Lookup("Φ"),
+ abc.Lookup("a"),
+ abc.Lookup("b"),
+ xyz.Lookup("b"),
+ uvw.Lookup("c"),
+ gr.Lookup("φ"),
+ }
+ if len(data) != len(want) {
+ t.Fatal("want and data must match")
+ }
+ if reflect.DeepEqual(data, want) {
+ t.Fatal("data must be shuffled")
+ }
+ sort.Slice(data, func(i, j int) bool { return data[i].Less(data[j]) })
+ if !reflect.DeepEqual(data, want) {
+ t.Logf("want: %#v", want)
+ t.Logf("data: %#v", data)
+ t.Errorf("sorting failed")
+ }
+}
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
new file mode 100644
index 0000000..023ab9a
--- /dev/null
+++ b/src/cmd/compile/internal/types/type.go
@@ -0,0 +1,1525 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+)
+
+// Dummy Node so we can refer to *Node without actually
+// having a gc.Node. Necessary to break import cycles.
+// TODO(gri) try to eliminate soon
+type Node struct{ _ int }
+
+//go:generate stringer -type EType -trimprefix T
+
+// EType describes a kind of type.
+type EType uint8
+
+const (
+ Txxx EType = iota
+
+ TINT8
+ TUINT8
+ TINT16
+ TUINT16
+ TINT32
+ TUINT32
+ TINT64
+ TUINT64
+ TINT
+ TUINT
+ TUINTPTR
+
+ TCOMPLEX64
+ TCOMPLEX128
+
+ TFLOAT32
+ TFLOAT64
+
+ TBOOL
+
+ TPTR
+ TFUNC
+ TSLICE
+ TARRAY
+ TSTRUCT
+ TCHAN
+ TMAP
+ TINTER
+ TFORW
+ TANY
+ TSTRING
+ TUNSAFEPTR
+
+ // pseudo-types for literals
+ TIDEAL // untyped numeric constants
+ TNIL
+ TBLANK
+
+ // pseudo-types for frame layout
+ TFUNCARGS
+ TCHANARGS
+
+ // SSA backend types
+ TSSA // internal types used by SSA backend (flags, memory, etc.)
+ TTUPLE // a pair of types, used by SSA backend
+ TRESULTS // multiple types; the result of calling a function or method, with a memory at the end.
+
+ NTYPE
+)
+
+// ChanDir is whether a channel can send, receive, or both.
+type ChanDir uint8
+
+func (c ChanDir) CanRecv() bool { return c&Crecv != 0 }
+func (c ChanDir) CanSend() bool { return c&Csend != 0 }
+
+const (
+ // types of channel
+ // must match ../../../../reflect/type.go:/ChanDir
+ Crecv ChanDir = 1 << 0
+ Csend ChanDir = 1 << 1
+ Cboth ChanDir = Crecv | Csend
+)
+
+// Types stores pointers to predeclared named types.
+//
+// It also stores pointers to several special types:
+// - Types[TANY] is the placeholder "any" type recognized by substArgTypes.
+// - Types[TBLANK] represents the blank variable's type.
+// - Types[TNIL] represents the predeclared "nil" value's type.
+// - Types[TUNSAFEPTR] is package unsafe's Pointer type.
+var Types [NTYPE]*Type
+
+var (
+ // Predeclared alias types. Kept separate for better error messages.
+ Bytetype *Type
+ Runetype *Type
+
+ // Predeclared error interface type.
+ Errortype *Type
+
+ // Types to represent untyped string and boolean constants.
+ UntypedString *Type
+ UntypedBool *Type
+
+ // Types to represent untyped numeric constants.
+ UntypedInt = New(TIDEAL)
+ UntypedRune = New(TIDEAL)
+ UntypedFloat = New(TIDEAL)
+ UntypedComplex = New(TIDEAL)
+)
+
+// A Type represents a Go type.
+type Type struct {
+ // Extra contains extra etype-specific fields.
+ // As an optimization, those etype-specific structs which contain exactly
+ // one pointer-shaped field are stored as values rather than pointers when possible.
+ //
+ // TMAP: *Map
+ // TFORW: *Forward
+ // TFUNC: *Func
+ // TSTRUCT: *Struct
+ // TINTER: *Interface
+ // TFUNCARGS: FuncArgs
+ // TCHANARGS: ChanArgs
+ // TCHAN: *Chan
+ // TPTR: Ptr
+ // TARRAY: *Array
+ // TSLICE: Slice
+ // TSSA: string
+ Extra interface{}
+
+ // Width is the width of this Type in bytes.
+ Width int64 // valid if Align > 0
+
+ methods Fields
+ allMethods Fields
+
+ Nod *Node // canonical OTYPE node
+ Orig *Type // original type (type literal or predefined type)
+
+ // Cache of composite types, with this type being the element type.
+ Cache struct {
+ ptr *Type // *T, or nil
+ slice *Type // []T, or nil
+ }
+
+ Sym *Sym // symbol containing name, for named types
+ Vargen int32 // unique name for OTYPE/ONAME
+
+ Etype EType // kind of type
+ Align uint8 // the required alignment of this type, in bytes (0 means Width and Align have not yet been computed)
+
+ flags bitset8
+}
+
+const (
+ typeNotInHeap = 1 << iota // type cannot be heap allocated
+ typeBroke // broken type definition
+ typeNoalg // suppress hash and eq algorithm generation
+ typeDeferwidth // width computation has been deferred and type is on deferredTypeStack
+ typeRecur
+)
+
+func (t *Type) NotInHeap() bool { return t.flags&typeNotInHeap != 0 }
+func (t *Type) Broke() bool { return t.flags&typeBroke != 0 }
+func (t *Type) Noalg() bool { return t.flags&typeNoalg != 0 }
+func (t *Type) Deferwidth() bool { return t.flags&typeDeferwidth != 0 }
+func (t *Type) Recur() bool { return t.flags&typeRecur != 0 }
+
+func (t *Type) SetNotInHeap(b bool) { t.flags.set(typeNotInHeap, b) }
+func (t *Type) SetBroke(b bool) { t.flags.set(typeBroke, b) }
+func (t *Type) SetNoalg(b bool) { t.flags.set(typeNoalg, b) }
+func (t *Type) SetDeferwidth(b bool) { t.flags.set(typeDeferwidth, b) }
+func (t *Type) SetRecur(b bool) { t.flags.set(typeRecur, b) }
+
+// Pkg returns the package that t appeared in.
+//
+// Pkg is only defined for function, struct, and interface types
+// (i.e., types with named elements). This information isn't used by
+// cmd/compile itself, but we need to track it because it's exposed by
+// the go/types API.
+func (t *Type) Pkg() *Pkg {
+ switch t.Etype {
+ case TFUNC:
+ return t.Extra.(*Func).pkg
+ case TSTRUCT:
+ return t.Extra.(*Struct).pkg
+ case TINTER:
+ return t.Extra.(*Interface).pkg
+ default:
+ Fatalf("Pkg: unexpected kind: %v", t)
+ return nil
+ }
+}
+
+// SetPkg sets the package that t appeared in.
+func (t *Type) SetPkg(pkg *Pkg) {
+ switch t.Etype {
+ case TFUNC:
+ t.Extra.(*Func).pkg = pkg
+ case TSTRUCT:
+ t.Extra.(*Struct).pkg = pkg
+ case TINTER:
+ t.Extra.(*Interface).pkg = pkg
+ default:
+ Fatalf("Pkg: unexpected kind: %v", t)
+ }
+}
+
+// Map contains Type fields specific to maps.
+type Map struct {
+ Key *Type // Key type
+ Elem *Type // Val (elem) type
+
+ Bucket *Type // internal struct type representing a hash bucket
+ Hmap *Type // internal struct type representing the Hmap (map header object)
+ Hiter *Type // internal struct type representing hash iterator state
+}
+
+// MapType returns t's extra map-specific fields.
+func (t *Type) MapType() *Map {
+ t.wantEtype(TMAP)
+ return t.Extra.(*Map)
+}
+
+// Forward contains Type fields specific to forward types.
+type Forward struct {
+ Copyto []*Type // where to copy the eventual value to
+ Embedlineno src.XPos // first use of this type as an embedded type
+}
+
+// ForwardType returns t's extra forward-type-specific fields.
+func (t *Type) ForwardType() *Forward {
+ t.wantEtype(TFORW)
+ return t.Extra.(*Forward)
+}
+
+// Func contains Type fields specific to func types.
+type Func struct {
+ Receiver *Type // function receiver
+ Results *Type // function results
+ Params *Type // function params
+
+ Nname *Node
+ pkg *Pkg
+
+ // Argwid is the total width of the function receiver, params, and results.
+ // It gets calculated via a temporary TFUNCARGS type.
+ // Note that TFUNC's Width is Widthptr.
+ Argwid int64
+
+ Outnamed bool
+}
+
+// FuncType returns t's extra func-specific fields.
+func (t *Type) FuncType() *Func {
+ t.wantEtype(TFUNC)
+ return t.Extra.(*Func)
+}
+
+// StructType contains Type fields specific to struct types.
+type Struct struct {
+ fields Fields
+ pkg *Pkg
+
+ // Maps have three associated internal structs (see struct MapType).
+ // Map links such structs back to their map type.
+ Map *Type
+
+ Funarg Funarg // type of function arguments for arg struct
+}
+
+// Fnstruct records the kind of function argument
+type Funarg uint8
+
+const (
+ FunargNone Funarg = iota
+ FunargRcvr // receiver
+ FunargParams // input parameters
+ FunargResults // output results
+)
+
+// StructType returns t's extra struct-specific fields.
+func (t *Type) StructType() *Struct {
+ t.wantEtype(TSTRUCT)
+ return t.Extra.(*Struct)
+}
+
+// Interface contains Type fields specific to interface types.
+type Interface struct {
+ Fields Fields
+ pkg *Pkg
+}
+
+// Ptr contains Type fields specific to pointer types.
+type Ptr struct {
+ Elem *Type // element type
+}
+
+// ChanArgs contains Type fields specific to TCHANARGS types.
+type ChanArgs struct {
+ T *Type // reference to a chan type whose elements need a width check
+}
+
+// // FuncArgs contains Type fields specific to TFUNCARGS types.
+type FuncArgs struct {
+ T *Type // reference to a func type whose elements need a width check
+}
+
+// Chan contains Type fields specific to channel types.
+type Chan struct {
+ Elem *Type // element type
+ Dir ChanDir // channel direction
+}
+
+// ChanType returns t's extra channel-specific fields.
+func (t *Type) ChanType() *Chan {
+ t.wantEtype(TCHAN)
+ return t.Extra.(*Chan)
+}
+
+type Tuple struct {
+ first *Type
+ second *Type
+ // Any tuple with a memory type must put that memory type second.
+}
+
+// Results are the output from calls that will be late-expanded.
+type Results struct {
+ Types []*Type // Last element is memory output from call.
+}
+
+// Array contains Type fields specific to array types.
+type Array struct {
+ Elem *Type // element type
+ Bound int64 // number of elements; <0 if unknown yet
+}
+
+// Slice contains Type fields specific to slice types.
+type Slice struct {
+ Elem *Type // element type
+}
+
+// A Field represents a field in a struct or a method in an interface or
+// associated with a named type.
+type Field struct {
+ flags bitset8
+
+ Embedded uint8 // embedded field
+
+ Pos src.XPos
+ Sym *Sym
+ Type *Type // field type
+ Note string // literal string annotation
+
+ // For fields that represent function parameters, Nname points
+ // to the associated ONAME Node.
+ Nname *Node
+
+ // Offset in bytes of this field or method within its enclosing struct
+ // or interface Type.
+ Offset int64
+}
+
+const (
+ fieldIsDDD = 1 << iota // field is ... argument
+ fieldBroke // broken field definition
+ fieldNointerface
+)
+
+func (f *Field) IsDDD() bool { return f.flags&fieldIsDDD != 0 }
+func (f *Field) Broke() bool { return f.flags&fieldBroke != 0 }
+func (f *Field) Nointerface() bool { return f.flags&fieldNointerface != 0 }
+
+func (f *Field) SetIsDDD(b bool) { f.flags.set(fieldIsDDD, b) }
+func (f *Field) SetBroke(b bool) { f.flags.set(fieldBroke, b) }
+func (f *Field) SetNointerface(b bool) { f.flags.set(fieldNointerface, b) }
+
+// End returns the offset of the first byte immediately after this field.
+func (f *Field) End() int64 {
+ return f.Offset + f.Type.Width
+}
+
+// IsMethod reports whether f represents a method rather than a struct field.
+func (f *Field) IsMethod() bool {
+ return f.Type.Etype == TFUNC && f.Type.Recv() != nil
+}
+
+// Fields is a pointer to a slice of *Field.
+// This saves space in Types that do not have fields or methods
+// compared to a simple slice of *Field.
+type Fields struct {
+ s *[]*Field
+}
+
+// Len returns the number of entries in f.
+func (f *Fields) Len() int {
+ if f.s == nil {
+ return 0
+ }
+ return len(*f.s)
+}
+
+// Slice returns the entries in f as a slice.
+// Changes to the slice entries will be reflected in f.
+func (f *Fields) Slice() []*Field {
+ if f.s == nil {
+ return nil
+ }
+ return *f.s
+}
+
+// Index returns the i'th element of Fields.
+// It panics if f does not have at least i+1 elements.
+func (f *Fields) Index(i int) *Field {
+ return (*f.s)[i]
+}
+
+// Set sets f to a slice.
+// This takes ownership of the slice.
+func (f *Fields) Set(s []*Field) {
+ if len(s) == 0 {
+ f.s = nil
+ } else {
+ // Copy s and take address of t rather than s to avoid
+ // allocation in the case where len(s) == 0.
+ t := s
+ f.s = &t
+ }
+}
+
+// Append appends entries to f.
+func (f *Fields) Append(s ...*Field) {
+ if f.s == nil {
+ f.s = new([]*Field)
+ }
+ *f.s = append(*f.s, s...)
+}
+
+// New returns a new Type of the specified kind.
+func New(et EType) *Type {
+ t := &Type{
+ Etype: et,
+ Width: BADWIDTH,
+ }
+ t.Orig = t
+ // TODO(josharian): lazily initialize some of these?
+ switch t.Etype {
+ case TMAP:
+ t.Extra = new(Map)
+ case TFORW:
+ t.Extra = new(Forward)
+ case TFUNC:
+ t.Extra = new(Func)
+ case TSTRUCT:
+ t.Extra = new(Struct)
+ case TINTER:
+ t.Extra = new(Interface)
+ case TPTR:
+ t.Extra = Ptr{}
+ case TCHANARGS:
+ t.Extra = ChanArgs{}
+ case TFUNCARGS:
+ t.Extra = FuncArgs{}
+ case TCHAN:
+ t.Extra = new(Chan)
+ case TTUPLE:
+ t.Extra = new(Tuple)
+ case TRESULTS:
+ t.Extra = new(Results)
+ }
+ return t
+}
+
+// NewArray returns a new fixed-length array Type.
+func NewArray(elem *Type, bound int64) *Type {
+ if bound < 0 {
+ Fatalf("NewArray: invalid bound %v", bound)
+ }
+ t := New(TARRAY)
+ t.Extra = &Array{Elem: elem, Bound: bound}
+ t.SetNotInHeap(elem.NotInHeap())
+ return t
+}
+
+// NewSlice returns the slice Type with element type elem.
+func NewSlice(elem *Type) *Type {
+ if t := elem.Cache.slice; t != nil {
+ if t.Elem() != elem {
+ Fatalf("elem mismatch")
+ }
+ return t
+ }
+
+ t := New(TSLICE)
+ t.Extra = Slice{Elem: elem}
+ elem.Cache.slice = t
+ return t
+}
+
+// NewChan returns a new chan Type with direction dir.
+func NewChan(elem *Type, dir ChanDir) *Type {
+ t := New(TCHAN)
+ ct := t.ChanType()
+ ct.Elem = elem
+ ct.Dir = dir
+ return t
+}
+
+func NewTuple(t1, t2 *Type) *Type {
+ t := New(TTUPLE)
+ t.Extra.(*Tuple).first = t1
+ t.Extra.(*Tuple).second = t2
+ return t
+}
+
+func NewResults(types []*Type) *Type {
+ t := New(TRESULTS)
+ t.Extra.(*Results).Types = types
+ return t
+}
+
+func newSSA(name string) *Type {
+ t := New(TSSA)
+ t.Extra = name
+ return t
+}
+
+// NewMap returns a new map Type with key type k and element (aka value) type v.
+func NewMap(k, v *Type) *Type {
+ t := New(TMAP)
+ mt := t.MapType()
+ mt.Key = k
+ mt.Elem = v
+ return t
+}
+
+// NewPtrCacheEnabled controls whether *T Types are cached in T.
+// Caching is disabled just before starting the backend.
+// This allows the backend to run concurrently.
+var NewPtrCacheEnabled = true
+
+// NewPtr returns the pointer type pointing to t.
+func NewPtr(elem *Type) *Type {
+ if elem == nil {
+ Fatalf("NewPtr: pointer to elem Type is nil")
+ }
+
+ if t := elem.Cache.ptr; t != nil {
+ if t.Elem() != elem {
+ Fatalf("NewPtr: elem mismatch")
+ }
+ return t
+ }
+
+ t := New(TPTR)
+ t.Extra = Ptr{Elem: elem}
+ t.Width = int64(Widthptr)
+ t.Align = uint8(Widthptr)
+ if NewPtrCacheEnabled {
+ elem.Cache.ptr = t
+ }
+ return t
+}
+
+// NewChanArgs returns a new TCHANARGS type for channel type c.
+func NewChanArgs(c *Type) *Type {
+ t := New(TCHANARGS)
+ t.Extra = ChanArgs{T: c}
+ return t
+}
+
+// NewFuncArgs returns a new TFUNCARGS type for func type f.
+func NewFuncArgs(f *Type) *Type {
+ t := New(TFUNCARGS)
+ t.Extra = FuncArgs{T: f}
+ return t
+}
+
+func NewField() *Field {
+ return &Field{
+ Offset: BADWIDTH,
+ }
+}
+
+// SubstAny walks t, replacing instances of "any" with successive
+// elements removed from types. It returns the substituted type.
+func SubstAny(t *Type, types *[]*Type) *Type {
+ if t == nil {
+ return nil
+ }
+
+ switch t.Etype {
+ default:
+ // Leave the type unchanged.
+
+ case TANY:
+ if len(*types) == 0 {
+ Fatalf("substArgTypes: not enough argument types")
+ }
+ t = (*types)[0]
+ *types = (*types)[1:]
+
+ case TPTR:
+ elem := SubstAny(t.Elem(), types)
+ if elem != t.Elem() {
+ t = t.copy()
+ t.Extra = Ptr{Elem: elem}
+ }
+
+ case TARRAY:
+ elem := SubstAny(t.Elem(), types)
+ if elem != t.Elem() {
+ t = t.copy()
+ t.Extra.(*Array).Elem = elem
+ }
+
+ case TSLICE:
+ elem := SubstAny(t.Elem(), types)
+ if elem != t.Elem() {
+ t = t.copy()
+ t.Extra = Slice{Elem: elem}
+ }
+
+ case TCHAN:
+ elem := SubstAny(t.Elem(), types)
+ if elem != t.Elem() {
+ t = t.copy()
+ t.Extra.(*Chan).Elem = elem
+ }
+
+ case TMAP:
+ key := SubstAny(t.Key(), types)
+ elem := SubstAny(t.Elem(), types)
+ if key != t.Key() || elem != t.Elem() {
+ t = t.copy()
+ t.Extra.(*Map).Key = key
+ t.Extra.(*Map).Elem = elem
+ }
+
+ case TFUNC:
+ recvs := SubstAny(t.Recvs(), types)
+ params := SubstAny(t.Params(), types)
+ results := SubstAny(t.Results(), types)
+ if recvs != t.Recvs() || params != t.Params() || results != t.Results() {
+ t = t.copy()
+ t.FuncType().Receiver = recvs
+ t.FuncType().Results = results
+ t.FuncType().Params = params
+ }
+
+ case TSTRUCT:
+ // Make a copy of all fields, including ones whose type does not change.
+ // This prevents aliasing across functions, which can lead to later
+ // fields getting their Offset incorrectly overwritten.
+ fields := t.FieldSlice()
+ nfs := make([]*Field, len(fields))
+ for i, f := range fields {
+ nft := SubstAny(f.Type, types)
+ nfs[i] = f.Copy()
+ nfs[i].Type = nft
+ }
+ t = t.copy()
+ t.SetFields(nfs)
+ }
+
+ return t
+}
+
+// copy returns a shallow copy of the Type.
+func (t *Type) copy() *Type {
+ if t == nil {
+ return nil
+ }
+ nt := *t
+ // copy any *T Extra fields, to avoid aliasing
+ switch t.Etype {
+ case TMAP:
+ x := *t.Extra.(*Map)
+ nt.Extra = &x
+ case TFORW:
+ x := *t.Extra.(*Forward)
+ nt.Extra = &x
+ case TFUNC:
+ x := *t.Extra.(*Func)
+ nt.Extra = &x
+ case TSTRUCT:
+ x := *t.Extra.(*Struct)
+ nt.Extra = &x
+ case TINTER:
+ x := *t.Extra.(*Interface)
+ nt.Extra = &x
+ case TCHAN:
+ x := *t.Extra.(*Chan)
+ nt.Extra = &x
+ case TARRAY:
+ x := *t.Extra.(*Array)
+ nt.Extra = &x
+ case TTUPLE, TSSA, TRESULTS:
+ Fatalf("ssa types cannot be copied")
+ }
+ // TODO(mdempsky): Find out why this is necessary and explain.
+ if t.Orig == t {
+ nt.Orig = &nt
+ }
+ return &nt
+}
+
+func (f *Field) Copy() *Field {
+ nf := *f
+ return &nf
+}
+
+func (t *Type) wantEtype(et EType) {
+ if t.Etype != et {
+ Fatalf("want %v, but have %v", et, t)
+ }
+}
+
+func (t *Type) Recvs() *Type { return t.FuncType().Receiver }
+func (t *Type) Params() *Type { return t.FuncType().Params }
+func (t *Type) Results() *Type { return t.FuncType().Results }
+
+func (t *Type) NumRecvs() int { return t.FuncType().Receiver.NumFields() }
+func (t *Type) NumParams() int { return t.FuncType().Params.NumFields() }
+func (t *Type) NumResults() int { return t.FuncType().Results.NumFields() }
+
+// IsVariadic reports whether function type t is variadic.
+func (t *Type) IsVariadic() bool {
+ n := t.NumParams()
+ return n > 0 && t.Params().Field(n-1).IsDDD()
+}
+
+// Recv returns the receiver of function type t, if any.
+func (t *Type) Recv() *Field {
+ s := t.Recvs()
+ if s.NumFields() == 0 {
+ return nil
+ }
+ return s.Field(0)
+}
+
+// RecvsParamsResults stores the accessor functions for a function Type's
+// receiver, parameters, and result parameters, in that order.
+// It can be used to iterate over all of a function's parameter lists.
+var RecvsParamsResults = [3]func(*Type) *Type{
+ (*Type).Recvs, (*Type).Params, (*Type).Results,
+}
+
+// RecvsParams is like RecvsParamsResults, but omits result parameters.
+var RecvsParams = [2]func(*Type) *Type{
+ (*Type).Recvs, (*Type).Params,
+}
+
+// ParamsResults is like RecvsParamsResults, but omits receiver parameters.
+var ParamsResults = [2]func(*Type) *Type{
+ (*Type).Params, (*Type).Results,
+}
+
+// Key returns the key type of map type t.
+func (t *Type) Key() *Type {
+ t.wantEtype(TMAP)
+ return t.Extra.(*Map).Key
+}
+
+// Elem returns the type of elements of t.
+// Usable with pointers, channels, arrays, slices, and maps.
+func (t *Type) Elem() *Type {
+ switch t.Etype {
+ case TPTR:
+ return t.Extra.(Ptr).Elem
+ case TARRAY:
+ return t.Extra.(*Array).Elem
+ case TSLICE:
+ return t.Extra.(Slice).Elem
+ case TCHAN:
+ return t.Extra.(*Chan).Elem
+ case TMAP:
+ return t.Extra.(*Map).Elem
+ }
+ Fatalf("Type.Elem %s", t.Etype)
+ return nil
+}
+
+// ChanArgs returns the channel type for TCHANARGS type t.
+func (t *Type) ChanArgs() *Type {
+ t.wantEtype(TCHANARGS)
+ return t.Extra.(ChanArgs).T
+}
+
+// FuncArgs returns the func type for TFUNCARGS type t.
+func (t *Type) FuncArgs() *Type {
+ t.wantEtype(TFUNCARGS)
+ return t.Extra.(FuncArgs).T
+}
+
+// Nname returns the associated function's nname.
+func (t *Type) Nname() *Node {
+ switch t.Etype {
+ case TFUNC:
+ return t.Extra.(*Func).Nname
+ }
+ Fatalf("Type.Nname %v %v", t.Etype, t)
+ return nil
+}
+
+// Nname sets the associated function's nname.
+func (t *Type) SetNname(n *Node) {
+ switch t.Etype {
+ case TFUNC:
+ t.Extra.(*Func).Nname = n
+ default:
+ Fatalf("Type.SetNname %v %v", t.Etype, t)
+ }
+}
+
+// IsFuncArgStruct reports whether t is a struct representing function parameters.
+func (t *Type) IsFuncArgStruct() bool {
+ return t.Etype == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone
+}
+
+func (t *Type) Methods() *Fields {
+ // TODO(mdempsky): Validate t?
+ return &t.methods
+}
+
+func (t *Type) AllMethods() *Fields {
+ // TODO(mdempsky): Validate t?
+ return &t.allMethods
+}
+
+func (t *Type) Fields() *Fields {
+ switch t.Etype {
+ case TSTRUCT:
+ return &t.Extra.(*Struct).fields
+ case TINTER:
+ Dowidth(t)
+ return &t.Extra.(*Interface).Fields
+ }
+ Fatalf("Fields: type %v does not have fields", t)
+ return nil
+}
+
+// Field returns the i'th field/method of struct/interface type t.
+func (t *Type) Field(i int) *Field {
+ return t.Fields().Slice()[i]
+}
+
+// FieldSlice returns a slice of containing all fields/methods of
+// struct/interface type t.
+func (t *Type) FieldSlice() []*Field {
+ return t.Fields().Slice()
+}
+
+// SetFields sets struct/interface type t's fields/methods to fields.
+func (t *Type) SetFields(fields []*Field) {
+ // If we've calculated the width of t before,
+ // then some other type such as a function signature
+ // might now have the wrong type.
+ // Rather than try to track and invalidate those,
+ // enforce that SetFields cannot be called once
+ // t's width has been calculated.
+ if t.WidthCalculated() {
+ Fatalf("SetFields of %v: width previously calculated", t)
+ }
+ t.wantEtype(TSTRUCT)
+ for _, f := range fields {
+ // If type T contains a field F with a go:notinheap
+ // type, then T must also be go:notinheap. Otherwise,
+ // you could heap allocate T and then get a pointer F,
+ // which would be a heap pointer to a go:notinheap
+ // type.
+ if f.Type != nil && f.Type.NotInHeap() {
+ t.SetNotInHeap(true)
+ break
+ }
+ }
+ t.Fields().Set(fields)
+}
+
+func (t *Type) SetInterface(methods []*Field) {
+ t.wantEtype(TINTER)
+ t.Methods().Set(methods)
+}
+
+func (t *Type) WidthCalculated() bool {
+ return t.Align > 0
+}
+
+// ArgWidth returns the total aligned argument size for a function.
+// It includes the receiver, parameters, and results.
+func (t *Type) ArgWidth() int64 {
+ t.wantEtype(TFUNC)
+ return t.Extra.(*Func).Argwid
+}
+
+func (t *Type) Size() int64 {
+ if t.Etype == TSSA {
+ if t == TypeInt128 {
+ return 16
+ }
+ return 0
+ }
+ Dowidth(t)
+ return t.Width
+}
+
+func (t *Type) Alignment() int64 {
+ Dowidth(t)
+ return int64(t.Align)
+}
+
+func (t *Type) SimpleString() string {
+ return t.Etype.String()
+}
+
+// Cmp is a comparison between values a and b.
+// -1 if a < b
+// 0 if a == b
+// 1 if a > b
+type Cmp int8
+
+const (
+ CMPlt = Cmp(-1)
+ CMPeq = Cmp(0)
+ CMPgt = Cmp(1)
+)
+
+// Compare compares types for purposes of the SSA back
+// end, returning a Cmp (one of CMPlt, CMPeq, CMPgt).
+// The answers are correct for an optimizer
+// or code generator, but not necessarily typechecking.
+// The order chosen is arbitrary, only consistency and division
+// into equivalence classes (Types that compare CMPeq) matters.
+func (t *Type) Compare(x *Type) Cmp {
+ if x == t {
+ return CMPeq
+ }
+ return t.cmp(x)
+}
+
+func cmpForNe(x bool) Cmp {
+ if x {
+ return CMPlt
+ }
+ return CMPgt
+}
+
+func (r *Sym) cmpsym(s *Sym) Cmp {
+ if r == s {
+ return CMPeq
+ }
+ if r == nil {
+ return CMPlt
+ }
+ if s == nil {
+ return CMPgt
+ }
+ // Fast sort, not pretty sort
+ if len(r.Name) != len(s.Name) {
+ return cmpForNe(len(r.Name) < len(s.Name))
+ }
+ if r.Pkg != s.Pkg {
+ if len(r.Pkg.Prefix) != len(s.Pkg.Prefix) {
+ return cmpForNe(len(r.Pkg.Prefix) < len(s.Pkg.Prefix))
+ }
+ if r.Pkg.Prefix != s.Pkg.Prefix {
+ return cmpForNe(r.Pkg.Prefix < s.Pkg.Prefix)
+ }
+ }
+ if r.Name != s.Name {
+ return cmpForNe(r.Name < s.Name)
+ }
+ return CMPeq
+}
+
+// cmp compares two *Types t and x, returning CMPlt,
+// CMPeq, CMPgt as t<x, t==x, t>x, for an arbitrary
+// and optimizer-centric notion of comparison.
+// TODO(josharian): make this safe for recursive interface types
+// and use in signatlist sorting. See issue 19869.
+func (t *Type) cmp(x *Type) Cmp {
+ // This follows the structure of function identical in identity.go
+ // with two exceptions.
+ // 1. Symbols are compared more carefully because a <,=,> result is desired.
+ // 2. Maps are treated specially to avoid endless recursion -- maps
+ // contain an internal data type not expressible in Go source code.
+ if t == x {
+ return CMPeq
+ }
+ if t == nil {
+ return CMPlt
+ }
+ if x == nil {
+ return CMPgt
+ }
+
+ if t.Etype != x.Etype {
+ return cmpForNe(t.Etype < x.Etype)
+ }
+
+ if t.Sym != nil || x.Sym != nil {
+ // Special case: we keep byte and uint8 separate
+ // for error messages. Treat them as equal.
+ switch t.Etype {
+ case TUINT8:
+ if (t == Types[TUINT8] || t == Bytetype) && (x == Types[TUINT8] || x == Bytetype) {
+ return CMPeq
+ }
+
+ case TINT32:
+ if (t == Types[Runetype.Etype] || t == Runetype) && (x == Types[Runetype.Etype] || x == Runetype) {
+ return CMPeq
+ }
+ }
+ }
+
+ if c := t.Sym.cmpsym(x.Sym); c != CMPeq {
+ return c
+ }
+
+ if x.Sym != nil {
+ // Syms non-nil, if vargens match then equal.
+ if t.Vargen != x.Vargen {
+ return cmpForNe(t.Vargen < x.Vargen)
+ }
+ return CMPeq
+ }
+ // both syms nil, look at structure below.
+
+ switch t.Etype {
+ case TBOOL, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TUNSAFEPTR, TUINTPTR,
+ TINT8, TINT16, TINT32, TINT64, TINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINT:
+ return CMPeq
+
+ case TSSA:
+ tname := t.Extra.(string)
+ xname := x.Extra.(string)
+ // desire fast sorting, not pretty sorting.
+ if len(tname) == len(xname) {
+ if tname == xname {
+ return CMPeq
+ }
+ if tname < xname {
+ return CMPlt
+ }
+ return CMPgt
+ }
+ if len(tname) > len(xname) {
+ return CMPgt
+ }
+ return CMPlt
+
+ case TTUPLE:
+ xtup := x.Extra.(*Tuple)
+ ttup := t.Extra.(*Tuple)
+ if c := ttup.first.Compare(xtup.first); c != CMPeq {
+ return c
+ }
+ return ttup.second.Compare(xtup.second)
+
+ case TRESULTS:
+ xResults := x.Extra.(*Results)
+ tResults := t.Extra.(*Results)
+ xl, tl := len(xResults.Types), len(tResults.Types)
+ if tl != xl {
+ if tl < xl {
+ return CMPlt
+ }
+ return CMPgt
+ }
+ for i := 0; i < tl; i++ {
+ if c := tResults.Types[i].Compare(xResults.Types[i]); c != CMPeq {
+ return c
+ }
+ }
+ return CMPeq
+
+ case TMAP:
+ if c := t.Key().cmp(x.Key()); c != CMPeq {
+ return c
+ }
+ return t.Elem().cmp(x.Elem())
+
+ case TPTR, TSLICE:
+ // No special cases for these, they are handled
+ // by the general code after the switch.
+
+ case TSTRUCT:
+ if t.StructType().Map == nil {
+ if x.StructType().Map != nil {
+ return CMPlt // nil < non-nil
+ }
+ // to the fallthrough
+ } else if x.StructType().Map == nil {
+ return CMPgt // nil > non-nil
+ } else if t.StructType().Map.MapType().Bucket == t {
+ // Both have non-nil Map
+ // Special case for Maps which include a recursive type where the recursion is not broken with a named type
+ if x.StructType().Map.MapType().Bucket != x {
+ return CMPlt // bucket maps are least
+ }
+ return t.StructType().Map.cmp(x.StructType().Map)
+ } else if x.StructType().Map.MapType().Bucket == x {
+ return CMPgt // bucket maps are least
+ } // If t != t.Map.Bucket, fall through to general case
+
+ tfs := t.FieldSlice()
+ xfs := x.FieldSlice()
+ for i := 0; i < len(tfs) && i < len(xfs); i++ {
+ t1, x1 := tfs[i], xfs[i]
+ if t1.Embedded != x1.Embedded {
+ return cmpForNe(t1.Embedded < x1.Embedded)
+ }
+ if t1.Note != x1.Note {
+ return cmpForNe(t1.Note < x1.Note)
+ }
+ if c := t1.Sym.cmpsym(x1.Sym); c != CMPeq {
+ return c
+ }
+ if c := t1.Type.cmp(x1.Type); c != CMPeq {
+ return c
+ }
+ }
+ if len(tfs) != len(xfs) {
+ return cmpForNe(len(tfs) < len(xfs))
+ }
+ return CMPeq
+
+ case TINTER:
+ tfs := t.FieldSlice()
+ xfs := x.FieldSlice()
+ for i := 0; i < len(tfs) && i < len(xfs); i++ {
+ t1, x1 := tfs[i], xfs[i]
+ if c := t1.Sym.cmpsym(x1.Sym); c != CMPeq {
+ return c
+ }
+ if c := t1.Type.cmp(x1.Type); c != CMPeq {
+ return c
+ }
+ }
+ if len(tfs) != len(xfs) {
+ return cmpForNe(len(tfs) < len(xfs))
+ }
+ return CMPeq
+
+ case TFUNC:
+ for _, f := range RecvsParamsResults {
+ // Loop over fields in structs, ignoring argument names.
+ tfs := f(t).FieldSlice()
+ xfs := f(x).FieldSlice()
+ for i := 0; i < len(tfs) && i < len(xfs); i++ {
+ ta := tfs[i]
+ tb := xfs[i]
+ if ta.IsDDD() != tb.IsDDD() {
+ return cmpForNe(!ta.IsDDD())
+ }
+ if c := ta.Type.cmp(tb.Type); c != CMPeq {
+ return c
+ }
+ }
+ if len(tfs) != len(xfs) {
+ return cmpForNe(len(tfs) < len(xfs))
+ }
+ }
+ return CMPeq
+
+ case TARRAY:
+ if t.NumElem() != x.NumElem() {
+ return cmpForNe(t.NumElem() < x.NumElem())
+ }
+
+ case TCHAN:
+ if t.ChanDir() != x.ChanDir() {
+ return cmpForNe(t.ChanDir() < x.ChanDir())
+ }
+
+ default:
+ e := fmt.Sprintf("Do not know how to compare %v with %v", t, x)
+ panic(e)
+ }
+
+ // Common element type comparison for TARRAY, TCHAN, TPTR, and TSLICE.
+ return t.Elem().cmp(x.Elem())
+}
+
+// IsKind reports whether t is a Type of the specified kind.
+func (t *Type) IsKind(et EType) bool {
+ return t != nil && t.Etype == et
+}
+
+func (t *Type) IsBoolean() bool {
+ return t.Etype == TBOOL
+}
+
+var unsignedEType = [...]EType{
+ TINT8: TUINT8,
+ TUINT8: TUINT8,
+ TINT16: TUINT16,
+ TUINT16: TUINT16,
+ TINT32: TUINT32,
+ TUINT32: TUINT32,
+ TINT64: TUINT64,
+ TUINT64: TUINT64,
+ TINT: TUINT,
+ TUINT: TUINT,
+ TUINTPTR: TUINTPTR,
+}
+
+// ToUnsigned returns the unsigned equivalent of integer type t.
+func (t *Type) ToUnsigned() *Type {
+ if !t.IsInteger() {
+ Fatalf("unsignedType(%v)", t)
+ }
+ return Types[unsignedEType[t.Etype]]
+}
+
+func (t *Type) IsInteger() bool {
+ switch t.Etype {
+ case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR:
+ return true
+ }
+ return false
+}
+
+func (t *Type) IsSigned() bool {
+ switch t.Etype {
+ case TINT8, TINT16, TINT32, TINT64, TINT:
+ return true
+ }
+ return false
+}
+
+func (t *Type) IsFloat() bool {
+ return t.Etype == TFLOAT32 || t.Etype == TFLOAT64
+}
+
+func (t *Type) IsComplex() bool {
+ return t.Etype == TCOMPLEX64 || t.Etype == TCOMPLEX128
+}
+
+// IsPtr reports whether t is a regular Go pointer type.
+// This does not include unsafe.Pointer.
+func (t *Type) IsPtr() bool {
+ return t.Etype == TPTR
+}
+
+// IsPtrElem reports whether t is the element of a pointer (to t).
+func (t *Type) IsPtrElem() bool {
+ return t.Cache.ptr != nil
+}
+
+// IsUnsafePtr reports whether t is an unsafe pointer.
+func (t *Type) IsUnsafePtr() bool {
+ return t.Etype == TUNSAFEPTR
+}
+
+// IsUintptr reports whether t is an uintptr.
+func (t *Type) IsUintptr() bool {
+ return t.Etype == TUINTPTR
+}
+
+// IsPtrShaped reports whether t is represented by a single machine pointer.
+// In addition to regular Go pointer types, this includes map, channel, and
+// function types and unsafe.Pointer. It does not include array or struct types
+// that consist of a single pointer shaped type.
+// TODO(mdempsky): Should it? See golang.org/issue/15028.
+func (t *Type) IsPtrShaped() bool {
+ return t.Etype == TPTR || t.Etype == TUNSAFEPTR ||
+ t.Etype == TMAP || t.Etype == TCHAN || t.Etype == TFUNC
+}
+
+// HasNil reports whether the set of values determined by t includes nil.
+func (t *Type) HasNil() bool {
+ switch t.Etype {
+ case TCHAN, TFUNC, TINTER, TMAP, TPTR, TSLICE, TUNSAFEPTR:
+ return true
+ }
+ return false
+}
+
+func (t *Type) IsString() bool {
+ return t.Etype == TSTRING
+}
+
+func (t *Type) IsMap() bool {
+ return t.Etype == TMAP
+}
+
+func (t *Type) IsChan() bool {
+ return t.Etype == TCHAN
+}
+
+func (t *Type) IsSlice() bool {
+ return t.Etype == TSLICE
+}
+
+func (t *Type) IsArray() bool {
+ return t.Etype == TARRAY
+}
+
+func (t *Type) IsStruct() bool {
+ return t.Etype == TSTRUCT
+}
+
+func (t *Type) IsInterface() bool {
+ return t.Etype == TINTER
+}
+
+// IsEmptyInterface reports whether t is an empty interface type.
+func (t *Type) IsEmptyInterface() bool {
+ return t.IsInterface() && t.NumFields() == 0
+}
+
+func (t *Type) PtrTo() *Type {
+ return NewPtr(t)
+}
+
+func (t *Type) NumFields() int {
+ return t.Fields().Len()
+}
+func (t *Type) FieldType(i int) *Type {
+ if t.Etype == TTUPLE {
+ switch i {
+ case 0:
+ return t.Extra.(*Tuple).first
+ case 1:
+ return t.Extra.(*Tuple).second
+ default:
+ panic("bad tuple index")
+ }
+ }
+ if t.Etype == TRESULTS {
+ return t.Extra.(*Results).Types[i]
+ }
+ return t.Field(i).Type
+}
+func (t *Type) FieldOff(i int) int64 {
+ return t.Field(i).Offset
+}
+func (t *Type) FieldName(i int) string {
+ return t.Field(i).Sym.Name
+}
+
+func (t *Type) NumElem() int64 {
+ t.wantEtype(TARRAY)
+ return t.Extra.(*Array).Bound
+}
+
+type componentsIncludeBlankFields bool
+
+const (
+ IgnoreBlankFields componentsIncludeBlankFields = false
+ CountBlankFields componentsIncludeBlankFields = true
+)
+
+// NumComponents returns the number of primitive elements that compose t.
+// Struct and array types are flattened for the purpose of counting.
+// All other types (including string, slice, and interface types) count as one element.
+// If countBlank is IgnoreBlankFields, then blank struct fields
+// (and their comprised elements) are excluded from the count.
+// struct { x, y [3]int } has six components; [10]struct{ x, y string } has twenty.
+func (t *Type) NumComponents(countBlank componentsIncludeBlankFields) int64 {
+ switch t.Etype {
+ case TSTRUCT:
+ if t.IsFuncArgStruct() {
+ Fatalf("NumComponents func arg struct")
+ }
+ var n int64
+ for _, f := range t.FieldSlice() {
+ if countBlank == IgnoreBlankFields && f.Sym.IsBlank() {
+ continue
+ }
+ n += f.Type.NumComponents(countBlank)
+ }
+ return n
+ case TARRAY:
+ return t.NumElem() * t.Elem().NumComponents(countBlank)
+ }
+ return 1
+}
+
+// SoleComponent returns the only primitive component in t,
+// if there is exactly one. Otherwise, it returns nil.
+// Components are counted as in NumComponents, including blank fields.
+func (t *Type) SoleComponent() *Type {
+ switch t.Etype {
+ case TSTRUCT:
+ if t.IsFuncArgStruct() {
+ Fatalf("SoleComponent func arg struct")
+ }
+ if t.NumFields() != 1 {
+ return nil
+ }
+ return t.Field(0).Type.SoleComponent()
+ case TARRAY:
+ if t.NumElem() != 1 {
+ return nil
+ }
+ return t.Elem().SoleComponent()
+ }
+ return t
+}
+
+// ChanDir returns the direction of a channel type t.
+// The direction will be one of Crecv, Csend, or Cboth.
+func (t *Type) ChanDir() ChanDir {
+ t.wantEtype(TCHAN)
+ return t.Extra.(*Chan).Dir
+}
+
+func (t *Type) IsMemory() bool {
+ if t == TypeMem || t.Etype == TTUPLE && t.Extra.(*Tuple).second == TypeMem {
+ return true
+ }
+ if t.Etype == TRESULTS {
+ if types := t.Extra.(*Results).Types; len(types) > 0 && types[len(types)-1] == TypeMem {
+ return true
+ }
+ }
+ return false
+}
+func (t *Type) IsFlags() bool { return t == TypeFlags }
+func (t *Type) IsVoid() bool { return t == TypeVoid }
+func (t *Type) IsTuple() bool { return t.Etype == TTUPLE }
+func (t *Type) IsResults() bool { return t.Etype == TRESULTS }
+
+// IsUntyped reports whether t is an untyped type.
+func (t *Type) IsUntyped() bool {
+ if t == nil {
+ return false
+ }
+ if t == UntypedString || t == UntypedBool {
+ return true
+ }
+ switch t.Etype {
+ case TNIL, TIDEAL:
+ return true
+ }
+ return false
+}
+
+// HasPointers reports whether t contains a heap pointer.
+// Note that this function ignores pointers to go:notinheap types.
+func (t *Type) HasPointers() bool {
+ switch t.Etype {
+ case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64,
+ TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL, TSSA:
+ return false
+
+ case TARRAY:
+ if t.NumElem() == 0 { // empty array has no pointers
+ return false
+ }
+ return t.Elem().HasPointers()
+
+ case TSTRUCT:
+ for _, t1 := range t.Fields().Slice() {
+ if t1.Type.HasPointers() {
+ return true
+ }
+ }
+ return false
+
+ case TPTR, TSLICE:
+ return !t.Elem().NotInHeap()
+
+ case TTUPLE:
+ ttup := t.Extra.(*Tuple)
+ return ttup.first.HasPointers() || ttup.second.HasPointers()
+
+ case TRESULTS:
+ types := t.Extra.(*Results).Types
+ for _, et := range types {
+ if et.HasPointers() {
+ return true
+ }
+ }
+ return false
+ }
+
+ return true
+}
+
+func (t *Type) Symbol() *obj.LSym {
+ return TypeLinkSym(t)
+}
+
+// Tie returns 'T' if t is a concrete type,
+// 'I' if t is an interface type, and 'E' if t is an empty interface type.
+// It is used to build calls to the conv* and assert* runtime routines.
+func (t *Type) Tie() byte {
+ if t.IsEmptyInterface() {
+ return 'E'
+ }
+ if t.IsInterface() {
+ return 'I'
+ }
+ return 'T'
+}
+
+var recvType *Type
+
+// FakeRecvType returns the singleton type used for interface method receivers.
+func FakeRecvType() *Type {
+ if recvType == nil {
+ recvType = NewPtr(New(TSTRUCT))
+ }
+ return recvType
+}
+
+var (
+ // TSSA types. HasPointers assumes these are pointer-free.
+ TypeInvalid = newSSA("invalid")
+ TypeMem = newSSA("mem")
+ TypeFlags = newSSA("flags")
+ TypeVoid = newSSA("void")
+ TypeInt128 = newSSA("int128")
+)
diff --git a/src/cmd/compile/internal/types/type_test.go b/src/cmd/compile/internal/types/type_test.go
new file mode 100644
index 0000000..fe3f380
--- /dev/null
+++ b/src/cmd/compile/internal/types/type_test.go
@@ -0,0 +1,28 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types_test
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestSSACompare(t *testing.T) {
+ a := []*types.Type{
+ types.TypeInvalid,
+ types.TypeMem,
+ types.TypeFlags,
+ types.TypeVoid,
+ types.TypeInt128,
+ }
+ for _, x := range a {
+ for _, y := range a {
+ c := x.Compare(y)
+ if x == y && c != types.CMPeq || x != y && c == types.CMPeq {
+ t.Errorf("%s compare %s == %d\n", x.Extra, y.Extra, c)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types/utils.go b/src/cmd/compile/internal/types/utils.go
new file mode 100644
index 0000000..e8b1073
--- /dev/null
+++ b/src/cmd/compile/internal/types/utils.go
@@ -0,0 +1,73 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+const BADWIDTH = -1000000000
+
+// The following variables must be initialized early by the frontend.
+// They are here to break import cycles.
+// TODO(gri) eliminate these dependencies.
+var (
+ Widthptr int
+ Dowidth func(*Type)
+ Fatalf func(string, ...interface{})
+ Sconv func(*Sym, int, int) string // orig: func sconv(s *Sym, flag FmtFlag, mode fmtMode) string
+ Tconv func(*Type, int, int) string // orig: func tconv(t *Type, flag FmtFlag, mode fmtMode) string
+ FormatSym func(*Sym, fmt.State, rune, int) // orig: func symFormat(sym *Sym, s fmt.State, verb rune, mode fmtMode)
+ FormatType func(*Type, fmt.State, rune, int) // orig: func typeFormat(t *Type, s fmt.State, verb rune, mode fmtMode)
+ TypeLinkSym func(*Type) *obj.LSym
+ Ctxt *obj.Link
+
+ FmtLeft int
+ FmtUnsigned int
+ FErr int
+)
+
+func (s *Sym) String() string {
+ return Sconv(s, 0, FErr)
+}
+
+func (sym *Sym) Format(s fmt.State, verb rune) {
+ FormatSym(sym, s, verb, FErr)
+}
+
+func (t *Type) String() string {
+ // The implementation of tconv (including typefmt and fldconv)
+ // must handle recursive types correctly.
+ return Tconv(t, 0, FErr)
+}
+
+// ShortString generates a short description of t.
+// It is used in autogenerated method names, reflection,
+// and itab names.
+func (t *Type) ShortString() string {
+ return Tconv(t, FmtLeft, FErr)
+}
+
+// LongString generates a complete description of t.
+// It is useful for reflection,
+// or when a unique fingerprint or hash of a type is required.
+func (t *Type) LongString() string {
+ return Tconv(t, FmtLeft|FmtUnsigned, FErr)
+}
+
+func (t *Type) Format(s fmt.State, verb rune) {
+ FormatType(t, s, verb, FErr)
+}
+
+type bitset8 uint8
+
+func (f *bitset8) set(mask uint8, b bool) {
+ if b {
+ *(*uint8)(f) |= mask
+ } else {
+ *(*uint8)(f) &^= mask
+ }
+}
diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go
new file mode 100644
index 0000000..9c9f6ed
--- /dev/null
+++ b/src/cmd/compile/internal/wasm/ssa.go
@@ -0,0 +1,505 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package wasm
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/wasm"
+ "cmd/internal/objabi"
+)
+
+func Init(arch *gc.Arch) {
+ arch.LinkArch = &wasm.Linkwasm
+ arch.REGSP = wasm.REG_SP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.ZeroRange = zeroRange
+ arch.Ginsnop = ginsnop
+ arch.Ginsnopdefer = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
+
+func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if cnt%8 != 0 {
+ gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
+ }
+
+ for i := int64(0); i < cnt; i += 8 {
+ p = pp.Appendpp(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
+ p = pp.Appendpp(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
+ p = pp.Appendpp(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
+ }
+
+ return p
+}
+
+func ginsnop(pp *gc.Progs) *obj.Prog {
+ return pp.Prog(wasm.ANop)
+}
+
+func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if next != b.Succs[0].Block() {
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+
+ case ssa.BlockIf:
+ switch next {
+ case b.Succs[0].Block():
+ // if false, jump to b.Succs[1]
+ getValue32(s, b.Controls[0])
+ s.Prog(wasm.AI32Eqz)
+ s.Prog(wasm.AIf)
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ s.Prog(wasm.AEnd)
+ case b.Succs[1].Block():
+ // if true, jump to b.Succs[0]
+ getValue32(s, b.Controls[0])
+ s.Prog(wasm.AIf)
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ s.Prog(wasm.AEnd)
+ default:
+ // if true, jump to b.Succs[0], else jump to b.Succs[1]
+ getValue32(s, b.Controls[0])
+ s.Prog(wasm.AIf)
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ s.Prog(wasm.AEnd)
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ }
+
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.BlockRetJmp:
+ p := s.Prog(obj.ARET)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = b.Aux.(*obj.LSym)
+
+ case ssa.BlockExit:
+
+ case ssa.BlockDefer:
+ p := s.Prog(wasm.AGet)
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: wasm.REG_RET0}
+ s.Prog(wasm.AI64Eqz)
+ s.Prog(wasm.AI32Eqz)
+ s.Prog(wasm.AIf)
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ s.Prog(wasm.AEnd)
+ if next != b.Succs[0].Block() {
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+
+ default:
+ panic("unexpected block")
+ }
+
+ // Entry point for the next block. Used by the JMP in goToBlock.
+ s.Prog(wasm.ARESUMEPOINT)
+
+ if s.OnWasmStackSkipped != 0 {
+ panic("wasm: bad stack")
+ }
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall:
+ s.PrepareCall(v)
+ if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == gc.Deferreturn {
+ // add a resume point before call to deferreturn so it can be called again via jmpdefer
+ s.Prog(wasm.ARESUMEPOINT)
+ }
+ if v.Op == ssa.OpWasmLoweredClosureCall {
+ getValue64(s, v.Args[1])
+ setReg(s, wasm.REG_CTXT)
+ }
+ if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn != nil {
+ sym := call.Fn
+ p := s.Prog(obj.ACALL)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: sym}
+ p.Pos = v.Pos
+ } else {
+ getValue64(s, v.Args[0])
+ p := s.Prog(obj.ACALL)
+ p.To = obj.Addr{Type: obj.TYPE_NONE}
+ p.Pos = v.Pos
+ }
+
+ case ssa.OpWasmLoweredMove:
+ getValue32(s, v.Args[0])
+ getValue32(s, v.Args[1])
+ i32Const(s, int32(v.AuxInt))
+ p := s.Prog(wasm.ACall)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmMove}
+
+ case ssa.OpWasmLoweredZero:
+ getValue32(s, v.Args[0])
+ i32Const(s, int32(v.AuxInt))
+ p := s.Prog(wasm.ACall)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmZero}
+
+ case ssa.OpWasmLoweredNilCheck:
+ getValue64(s, v.Args[0])
+ s.Prog(wasm.AI64Eqz)
+ s.Prog(wasm.AIf)
+ p := s.Prog(wasm.ACALLNORESUME)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.SigPanic}
+ s.Prog(wasm.AEnd)
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ gc.Warnl(v.Pos, "generated nil check")
+ }
+
+ case ssa.OpWasmLoweredWB:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ p := s.Prog(wasm.ACALLNORESUME) // TODO(neelance): If possible, turn this into a simple wasm.ACall).
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: v.Aux.(*obj.LSym)}
+
+ case ssa.OpWasmI64Store8, ssa.OpWasmI64Store16, ssa.OpWasmI64Store32, ssa.OpWasmI64Store, ssa.OpWasmF32Store, ssa.OpWasmF64Store:
+ getValue32(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ p := s.Prog(v.Op.Asm())
+ p.To = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
+
+ case ssa.OpStoreReg:
+ getReg(s, wasm.REG_SP)
+ getValue64(s, v.Args[0])
+ p := s.Prog(storeOp(v.Type))
+ gc.AddrAuto(&p.To, v)
+
+ default:
+ if v.Type.IsMemory() {
+ return
+ }
+ if v.OnWasmStack {
+ s.OnWasmStackSkipped++
+ // If a Value is marked OnWasmStack, we don't generate the value and store it to a register now.
+ // Instead, we delay the generation to when the value is used and then directly generate it on the WebAssembly stack.
+ return
+ }
+ ssaGenValueOnStack(s, v, true)
+ if s.OnWasmStackSkipped != 0 {
+ panic("wasm: bad stack")
+ }
+ setReg(s, v.Reg())
+ }
+}
+
+func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) {
+ switch v.Op {
+ case ssa.OpWasmLoweredGetClosurePtr:
+ getReg(s, wasm.REG_CTXT)
+
+ case ssa.OpWasmLoweredGetCallerPC:
+ p := s.Prog(wasm.AI64Load)
+ // Caller PC is stored 8 bytes below first parameter.
+ p.From = obj.Addr{
+ Type: obj.TYPE_MEM,
+ Name: obj.NAME_PARAM,
+ Offset: -8,
+ }
+
+ case ssa.OpWasmLoweredGetCallerSP:
+ p := s.Prog(wasm.AGet)
+ // Caller SP is the address of the first parameter.
+ p.From = obj.Addr{
+ Type: obj.TYPE_ADDR,
+ Name: obj.NAME_PARAM,
+ Reg: wasm.REG_SP,
+ Offset: 0,
+ }
+
+ case ssa.OpWasmLoweredAddr:
+ if v.Aux == nil { // address of off(SP), no symbol
+ getValue64(s, v.Args[0])
+ i64Const(s, v.AuxInt)
+ s.Prog(wasm.AI64Add)
+ break
+ }
+ p := s.Prog(wasm.AGet)
+ p.From.Type = obj.TYPE_ADDR
+ switch v.Aux.(type) {
+ case *obj.LSym:
+ gc.AddAux(&p.From, v)
+ case *gc.Node:
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ default:
+ panic("wasm: bad LoweredAddr")
+ }
+
+ case ssa.OpWasmLoweredConvert:
+ getValue64(s, v.Args[0])
+
+ case ssa.OpWasmSelect:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ getValue32(s, v.Args[2])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmI64AddConst:
+ getValue64(s, v.Args[0])
+ i64Const(s, v.AuxInt)
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmI64Const:
+ i64Const(s, v.AuxInt)
+
+ case ssa.OpWasmF32Const:
+ f32Const(s, v.AuxFloat())
+
+ case ssa.OpWasmF64Const:
+ f64Const(s, v.AuxFloat())
+
+ case ssa.OpWasmI64Load8U, ssa.OpWasmI64Load8S, ssa.OpWasmI64Load16U, ssa.OpWasmI64Load16S, ssa.OpWasmI64Load32U, ssa.OpWasmI64Load32S, ssa.OpWasmI64Load, ssa.OpWasmF32Load, ssa.OpWasmF64Load:
+ getValue32(s, v.Args[0])
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
+
+ case ssa.OpWasmI64Eqz:
+ getValue64(s, v.Args[0])
+ s.Prog(v.Op.Asm())
+ if extend {
+ s.Prog(wasm.AI64ExtendI32U)
+ }
+
+ case ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
+ ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
+ ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ s.Prog(v.Op.Asm())
+ if extend {
+ s.Prog(wasm.AI64ExtendI32U)
+ }
+
+ case ssa.OpWasmI64Add, ssa.OpWasmI64Sub, ssa.OpWasmI64Mul, ssa.OpWasmI64DivU, ssa.OpWasmI64RemS, ssa.OpWasmI64RemU, ssa.OpWasmI64And, ssa.OpWasmI64Or, ssa.OpWasmI64Xor, ssa.OpWasmI64Shl, ssa.OpWasmI64ShrS, ssa.OpWasmI64ShrU, ssa.OpWasmI64Rotl,
+ ssa.OpWasmF32Add, ssa.OpWasmF32Sub, ssa.OpWasmF32Mul, ssa.OpWasmF32Div, ssa.OpWasmF32Copysign,
+ ssa.OpWasmF64Add, ssa.OpWasmF64Sub, ssa.OpWasmF64Mul, ssa.OpWasmF64Div, ssa.OpWasmF64Copysign:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmI32Rotl:
+ getValue32(s, v.Args[0])
+ getValue32(s, v.Args[1])
+ s.Prog(wasm.AI32Rotl)
+ s.Prog(wasm.AI64ExtendI32U)
+
+ case ssa.OpWasmI64DivS:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ if v.Type.Size() == 8 {
+ // Division of int64 needs helper function wasmDiv to handle the MinInt64 / -1 case.
+ p := s.Prog(wasm.ACall)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmDiv}
+ break
+ }
+ s.Prog(wasm.AI64DivS)
+
+ case ssa.OpWasmI64TruncSatF32S, ssa.OpWasmI64TruncSatF64S:
+ getValue64(s, v.Args[0])
+ if objabi.GOWASM.SatConv {
+ s.Prog(v.Op.Asm())
+ } else {
+ if v.Op == ssa.OpWasmI64TruncSatF32S {
+ s.Prog(wasm.AF64PromoteF32)
+ }
+ p := s.Prog(wasm.ACall)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmTruncS}
+ }
+
+ case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U:
+ getValue64(s, v.Args[0])
+ if objabi.GOWASM.SatConv {
+ s.Prog(v.Op.Asm())
+ } else {
+ if v.Op == ssa.OpWasmI64TruncSatF32U {
+ s.Prog(wasm.AF64PromoteF32)
+ }
+ p := s.Prog(wasm.ACall)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: gc.WasmTruncU}
+ }
+
+ case ssa.OpWasmF32DemoteF64:
+ getValue64(s, v.Args[0])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmF64PromoteF32:
+ getValue64(s, v.Args[0])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmF32ConvertI64S, ssa.OpWasmF32ConvertI64U,
+ ssa.OpWasmF64ConvertI64S, ssa.OpWasmF64ConvertI64U,
+ ssa.OpWasmI64Extend8S, ssa.OpWasmI64Extend16S, ssa.OpWasmI64Extend32S,
+ ssa.OpWasmF32Neg, ssa.OpWasmF32Sqrt, ssa.OpWasmF32Trunc, ssa.OpWasmF32Ceil, ssa.OpWasmF32Floor, ssa.OpWasmF32Nearest, ssa.OpWasmF32Abs,
+ ssa.OpWasmF64Neg, ssa.OpWasmF64Sqrt, ssa.OpWasmF64Trunc, ssa.OpWasmF64Ceil, ssa.OpWasmF64Floor, ssa.OpWasmF64Nearest, ssa.OpWasmF64Abs,
+ ssa.OpWasmI64Ctz, ssa.OpWasmI64Clz, ssa.OpWasmI64Popcnt:
+ getValue64(s, v.Args[0])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpLoadReg:
+ p := s.Prog(loadOp(v.Type))
+ gc.AddrAuto(&p.From, v.Args[0])
+
+ case ssa.OpCopy:
+ getValue64(s, v.Args[0])
+
+ default:
+ v.Fatalf("unexpected op: %s", v.Op)
+
+ }
+}
+
+func isCmp(v *ssa.Value) bool {
+ switch v.Op {
+ case ssa.OpWasmI64Eqz, ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
+ ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
+ ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
+ return true
+ default:
+ return false
+ }
+}
+
+func getValue32(s *gc.SSAGenState, v *ssa.Value) {
+ if v.OnWasmStack {
+ s.OnWasmStackSkipped--
+ ssaGenValueOnStack(s, v, false)
+ if !isCmp(v) {
+ s.Prog(wasm.AI32WrapI64)
+ }
+ return
+ }
+
+ reg := v.Reg()
+ getReg(s, reg)
+ if reg != wasm.REG_SP {
+ s.Prog(wasm.AI32WrapI64)
+ }
+}
+
+func getValue64(s *gc.SSAGenState, v *ssa.Value) {
+ if v.OnWasmStack {
+ s.OnWasmStackSkipped--
+ ssaGenValueOnStack(s, v, true)
+ return
+ }
+
+ reg := v.Reg()
+ getReg(s, reg)
+ if reg == wasm.REG_SP {
+ s.Prog(wasm.AI64ExtendI32U)
+ }
+}
+
+func i32Const(s *gc.SSAGenState, val int32) {
+ p := s.Prog(wasm.AI32Const)
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(val)}
+}
+
+func i64Const(s *gc.SSAGenState, val int64) {
+ p := s.Prog(wasm.AI64Const)
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: val}
+}
+
+func f32Const(s *gc.SSAGenState, val float64) {
+ p := s.Prog(wasm.AF32Const)
+ p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
+}
+
+func f64Const(s *gc.SSAGenState, val float64) {
+ p := s.Prog(wasm.AF64Const)
+ p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
+}
+
+func getReg(s *gc.SSAGenState, reg int16) {
+ p := s.Prog(wasm.AGet)
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
+}
+
+func setReg(s *gc.SSAGenState, reg int16) {
+ p := s.Prog(wasm.ASet)
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
+}
+
+func loadOp(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return wasm.AF32Load
+ case 8:
+ return wasm.AF64Load
+ default:
+ panic("bad load type")
+ }
+ }
+
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return wasm.AI64Load8S
+ }
+ return wasm.AI64Load8U
+ case 2:
+ if t.IsSigned() {
+ return wasm.AI64Load16S
+ }
+ return wasm.AI64Load16U
+ case 4:
+ if t.IsSigned() {
+ return wasm.AI64Load32S
+ }
+ return wasm.AI64Load32U
+ case 8:
+ return wasm.AI64Load
+ default:
+ panic("bad load type")
+ }
+}
+
+func storeOp(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return wasm.AF32Store
+ case 8:
+ return wasm.AF64Store
+ default:
+ panic("bad store type")
+ }
+ }
+
+ switch t.Size() {
+ case 1:
+ return wasm.AI64Store8
+ case 2:
+ return wasm.AI64Store16
+ case 4:
+ return wasm.AI64Store32
+ case 8:
+ return wasm.AI64Store
+ default:
+ panic("bad store type")
+ }
+}
diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go
new file mode 100644
index 0000000..e137daa
--- /dev/null
+++ b/src/cmd/compile/internal/x86/galign.go
@@ -0,0 +1,39 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj/x86"
+ "cmd/internal/objabi"
+ "fmt"
+ "os"
+)
+
+func Init(arch *gc.Arch) {
+ arch.LinkArch = &x86.Link386
+ arch.REGSP = x86.REGSP
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+ arch.MAXWIDTH = (1 << 32) - 1
+ switch v := objabi.GO386; v {
+ case "sse2":
+ case "softfloat":
+ arch.SoftFloat = true
+ case "387":
+ fmt.Fprintf(os.Stderr, "unsupported setting GO386=387. Consider using GO386=softfloat instead.\n")
+ gc.Exit(1)
+ default:
+ fmt.Fprintf(os.Stderr, "unsupported setting GO386=%s\n", v)
+ gc.Exit(1)
+
+ }
+
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+ arch.Ginsnopdefer = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+}
diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go
new file mode 100644
index 0000000..a33ddc8
--- /dev/null
+++ b/src/cmd/compile/internal/x86/ggen.go
@@ -0,0 +1,48 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+
+func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if *ax == 0 {
+ p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ *ax = 1
+ }
+
+ if cnt <= int64(4*gc.Widthreg) {
+ for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
+ p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i)
+ }
+ } else if cnt <= int64(128*gc.Widthreg) {
+ p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg)))
+ p.To.Sym = gc.Duffzero
+ } else {
+ p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
+ p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Appendpp(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ }
+
+ return p
+}
+
+func ginsnop(pp *gc.Progs) *obj.Prog {
+ // See comment in ../amd64/ggen.go.
+ p := pp.Prog(x86.AXCHGL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ return p
+}
diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go
new file mode 100644
index 0000000..fbf76d0
--- /dev/null
+++ b/src/cmd/compile/internal/x86/ssa.go
@@ -0,0 +1,960 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+ "fmt"
+ "math"
+
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+
+// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
+ flive := b.FlagsLiveAtEnd
+ for _, c := range b.ControlValues() {
+ flive = c.Type.IsFlags() || flive
+ }
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if flive && v.Op == ssa.Op386MOVLconst {
+ // The "mark" is any non-nil Aux value.
+ v.Aux = v
+ }
+ if v.Type.IsFlags() {
+ flive = false
+ }
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ flive = true
+ }
+ }
+ }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ // Avoid partial register write
+ if !t.IsFloat() {
+ switch t.Size() {
+ case 1:
+ return x86.AMOVBLZX
+ case 2:
+ return x86.AMOVWLZX
+ }
+ }
+ // Otherwise, there's no difference between load and store opcodes.
+ return storeByType(t)
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ width := t.Size()
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return x86.AMOVSS
+ case 8:
+ return x86.AMOVSD
+ }
+ } else {
+ switch width {
+ case 1:
+ return x86.AMOVB
+ case 2:
+ return x86.AMOVW
+ case 4:
+ return x86.AMOVL
+ }
+ }
+ panic("bad store type")
+}
+
+// moveByType returns the reg->reg move instruction of the given type.
+func moveByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return x86.AMOVSS
+ case 8:
+ return x86.AMOVSD
+ default:
+ panic(fmt.Sprintf("bad float register width %d:%s", t.Size(), t))
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ // Avoids partial register write
+ return x86.AMOVL
+ case 2:
+ return x86.AMOVL
+ case 4:
+ return x86.AMOVL
+ default:
+ panic(fmt.Sprintf("bad int register width %d:%s", t.Size(), t))
+ }
+ }
+}
+
+// opregreg emits instructions for
+// dest := dest(To) op src(From)
+// and also returns the created obj.Prog so it
+// may be further adjusted (offset, scale, etc).
+func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dest
+ p.From.Reg = src
+ return p
+}
+
+func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
+ switch v.Op {
+ case ssa.Op386ADDL:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ switch {
+ case r == r1:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case r == r2:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ default:
+ p := s.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r1
+ p.From.Scale = 1
+ p.From.Index = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+
+ // 2-address opcode arithmetic
+ case ssa.Op386SUBL,
+ ssa.Op386MULL,
+ ssa.Op386ANDL,
+ ssa.Op386ORL,
+ ssa.Op386XORL,
+ ssa.Op386SHLL,
+ ssa.Op386SHRL, ssa.Op386SHRW, ssa.Op386SHRB,
+ ssa.Op386SARL, ssa.Op386SARW, ssa.Op386SARB,
+ ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD,
+ ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD,
+ ssa.Op386PXOR,
+ ssa.Op386ADCL,
+ ssa.Op386SBBL:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
+
+ case ssa.Op386ADDLcarry, ssa.Op386SUBLcarry:
+ // output 0 is carry/borrow, output 1 is the low 32 bits.
+ r := v.Reg0()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
+ }
+ opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
+
+ case ssa.Op386ADDLconstcarry, ssa.Op386SUBLconstcarry:
+ // output 0 is carry/borrow, output 1 is the low 32 bits.
+ r := v.Reg0()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.Op386DIVL, ssa.Op386DIVW,
+ ssa.Op386DIVLU, ssa.Op386DIVWU,
+ ssa.Op386MODL, ssa.Op386MODW,
+ ssa.Op386MODLU, ssa.Op386MODWU:
+
+ // Arg[0] is already in AX as it's the only register we allow
+ // and AX is the only output
+ x := v.Args[1].Reg()
+
+ // CPU faults upon signed overflow, which occurs when most
+ // negative int is divided by -1.
+ var j *obj.Prog
+ if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW ||
+ v.Op == ssa.Op386MODL || v.Op == ssa.Op386MODW {
+
+ if ssa.DivisionNeedsFixUp(v) {
+ var c *obj.Prog
+ switch v.Op {
+ case ssa.Op386DIVL, ssa.Op386MODL:
+ c = s.Prog(x86.ACMPL)
+ j = s.Prog(x86.AJEQ)
+
+ case ssa.Op386DIVW, ssa.Op386MODW:
+ c = s.Prog(x86.ACMPW)
+ j = s.Prog(x86.AJEQ)
+ }
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = x
+ c.To.Type = obj.TYPE_CONST
+ c.To.Offset = -1
+
+ j.To.Type = obj.TYPE_BRANCH
+ }
+ // sign extend the dividend
+ switch v.Op {
+ case ssa.Op386DIVL, ssa.Op386MODL:
+ s.Prog(x86.ACDQ)
+ case ssa.Op386DIVW, ssa.Op386MODW:
+ s.Prog(x86.ACWD)
+ }
+ }
+
+ // for unsigned ints, we sign extend by setting DX = 0
+ // signed ints were sign extended above
+ if v.Op == ssa.Op386DIVLU || v.Op == ssa.Op386MODLU ||
+ v.Op == ssa.Op386DIVWU || v.Op == ssa.Op386MODWU {
+ c := s.Prog(x86.AXORL)
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = x86.REG_DX
+ c.To.Type = obj.TYPE_REG
+ c.To.Reg = x86.REG_DX
+ }
+
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+
+ // signed division, rest of the check for -1 case
+ if j != nil {
+ j2 := s.Prog(obj.AJMP)
+ j2.To.Type = obj.TYPE_BRANCH
+
+ var n *obj.Prog
+ if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW {
+ // n * -1 = -n
+ n = s.Prog(x86.ANEGL)
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = x86.REG_AX
+ } else {
+ // n % -1 == 0
+ n = s.Prog(x86.AXORL)
+ n.From.Type = obj.TYPE_REG
+ n.From.Reg = x86.REG_DX
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = x86.REG_DX
+ }
+
+ j.To.SetTarget(n)
+ j2.To.SetTarget(s.Pc())
+ }
+
+ case ssa.Op386HMULL, ssa.Op386HMULLU:
+ // the frontend rewrites constant division by 8/16/32 bit integers into
+ // HMUL by a constant
+ // SSA rewrites generate the 64 bit versions
+
+ // Arg[0] is already in AX as it's the only register we allow
+ // and DX is the only output we care about (the high bits)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ // IMULB puts the high portion in AH instead of DL,
+ // so move it to DL for consistency
+ if v.Type.Size() == 1 {
+ m := s.Prog(x86.AMOVB)
+ m.From.Type = obj.TYPE_REG
+ m.From.Reg = x86.REG_AH
+ m.To.Type = obj.TYPE_REG
+ m.To.Reg = x86.REG_DX
+ }
+
+ case ssa.Op386MULLU:
+ // Arg[0] is already in AX as it's the only register we allow
+ // results lo in AX
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.Op386MULLQU:
+ // AX * args[1], high 32 bits in DX (result[0]), low 32 bits in AX (result[1]).
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.Op386AVGLU:
+ // compute (x+y)/2 unsigned.
+ // Do a 32-bit add, the overflow goes into the carry.
+ // Shift right once and pull the carry back into the 31st bit.
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(x86.AADDL)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Reg = v.Args[1].Reg()
+ p = s.Prog(x86.ARCRL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.Op386ADDLconst:
+ r := v.Reg()
+ a := v.Args[0].Reg()
+ if r == a {
+ if v.AuxInt == 1 {
+ p := s.Prog(x86.AINCL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ if v.AuxInt == -1 {
+ p := s.Prog(x86.ADECL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ p := s.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = a
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.Op386MULLconst:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()})
+
+ case ssa.Op386SUBLconst,
+ ssa.Op386ADCLconst,
+ ssa.Op386SBBLconst,
+ ssa.Op386ANDLconst,
+ ssa.Op386ORLconst,
+ ssa.Op386XORLconst,
+ ssa.Op386SHLLconst,
+ ssa.Op386SHRLconst, ssa.Op386SHRWconst, ssa.Op386SHRBconst,
+ ssa.Op386SARLconst, ssa.Op386SARWconst, ssa.Op386SARBconst,
+ ssa.Op386ROLLconst, ssa.Op386ROLWconst, ssa.Op386ROLBconst:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.Op386SBBLcarrymask:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.Op386LEAL1, ssa.Op386LEAL2, ssa.Op386LEAL4, ssa.Op386LEAL8:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ p := s.Prog(x86.ALEAL)
+ switch v.Op {
+ case ssa.Op386LEAL1:
+ p.From.Scale = 1
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ case ssa.Op386LEAL2:
+ p.From.Scale = 2
+ case ssa.Op386LEAL4:
+ p.From.Scale = 4
+ case ssa.Op386LEAL8:
+ p.From.Scale = 8
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r
+ p.From.Index = i
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386LEAL:
+ p := s.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB,
+ ssa.Op386TESTL, ssa.Op386TESTW, ssa.Op386TESTB:
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ case ssa.Op386UCOMISS, ssa.Op386UCOMISD:
+ // Go assembler has swapped operands for UCOMISx relative to CMP,
+ // must account for that right here.
+ opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
+ case ssa.Op386CMPLconst, ssa.Op386CMPWconst, ssa.Op386CMPBconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+ case ssa.Op386TESTLconst, ssa.Op386TESTWconst, ssa.Op386TESTBconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[0].Reg()
+ case ssa.Op386CMPLload, ssa.Op386CMPWload, ssa.Op386CMPBload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[1].Reg()
+ case ssa.Op386CMPLconstload, ssa.Op386CMPWconstload, ssa.Op386CMPBconstload:
+ sc := v.AuxValAndOff()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.From, v, sc.Off())
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = sc.Val()
+ case ssa.Op386MOVLconst:
+ x := v.Reg()
+
+ // If flags aren't live (indicated by v.Aux == nil),
+ // then we can rewrite MOV $0, AX into XOR AX, AX.
+ if v.AuxInt == 0 && v.Aux == nil {
+ p := s.Prog(x86.AXORL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ break
+ }
+
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
+ x := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.Op386MOVSSconst1, ssa.Op386MOVSDconst1:
+ p := s.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ f := math.Float64frombits(uint64(v.AuxInt))
+ if v.Op == ssa.Op386MOVSDconst1 {
+ p.From.Sym = gc.Ctxt.Float64Sym(f)
+ } else {
+ p.From.Sym = gc.Ctxt.Float32Sym(float32(f))
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVLload, ssa.Op386MOVWload, ssa.Op386MOVBload, ssa.Op386MOVBLSXload, ssa.Op386MOVWLSXload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1,
+ ssa.Op386MOVSDloadidx8, ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4, ssa.Op386MOVWloadidx2:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ switch v.Op {
+ case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1:
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ p.From.Scale = 1
+ case ssa.Op386MOVSDloadidx8:
+ p.From.Scale = 8
+ case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4:
+ p.From.Scale = 4
+ case ssa.Op386MOVWloadidx2:
+ p.From.Scale = 2
+ }
+ p.From.Reg = r
+ p.From.Index = i
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386ADDLloadidx4, ssa.Op386SUBLloadidx4, ssa.Op386MULLloadidx4,
+ ssa.Op386ANDLloadidx4, ssa.Op386ORLloadidx4, ssa.Op386XORLloadidx4:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ p.From.Index = v.Args[2].Reg()
+ p.From.Scale = 4
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ case ssa.Op386ADDLload, ssa.Op386SUBLload, ssa.Op386MULLload,
+ ssa.Op386ANDLload, ssa.Op386ORLload, ssa.Op386XORLload,
+ ssa.Op386ADDSDload, ssa.Op386ADDSSload, ssa.Op386SUBSDload, ssa.Op386SUBSSload,
+ ssa.Op386MULSDload, ssa.Op386MULSSload, ssa.Op386DIVSSload, ssa.Op386DIVSDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ gc.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ if v.Reg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore, ssa.Op386MOVLstore, ssa.Op386MOVWstore, ssa.Op386MOVBstore,
+ ssa.Op386ADDLmodify, ssa.Op386SUBLmodify, ssa.Op386ANDLmodify, ssa.Op386ORLmodify, ssa.Op386XORLmodify:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ case ssa.Op386ADDLconstmodify:
+ sc := v.AuxValAndOff()
+ val := sc.Val()
+ if val == 1 || val == -1 {
+ var p *obj.Prog
+ if val == 1 {
+ p = s.Prog(x86.AINCL)
+ } else {
+ p = s.Prog(x86.ADECL)
+ }
+ off := sc.Off()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.To, v, off)
+ break
+ }
+ fallthrough
+ case ssa.Op386ANDLconstmodify, ssa.Op386ORLconstmodify, ssa.Op386XORLconstmodify:
+ sc := v.AuxValAndOff()
+ off := sc.Off()
+ val := sc.Val()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = val
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.To, v, off)
+ case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1,
+ ssa.Op386MOVSDstoreidx8, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4, ssa.Op386MOVWstoreidx2,
+ ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ switch v.Op {
+ case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1:
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ p.To.Scale = 1
+ case ssa.Op386MOVSDstoreidx8:
+ p.To.Scale = 8
+ case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4,
+ ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4:
+ p.To.Scale = 4
+ case ssa.Op386MOVWstoreidx2:
+ p.To.Scale = 2
+ }
+ p.To.Reg = r
+ p.To.Index = i
+ gc.AddAux(&p.To, v)
+ case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.To, v, sc.Off())
+ case ssa.Op386ADDLconstmodifyidx4:
+ sc := v.AuxValAndOff()
+ val := sc.Val()
+ if val == 1 || val == -1 {
+ var p *obj.Prog
+ if val == 1 {
+ p = s.Prog(x86.AINCL)
+ } else {
+ p = s.Prog(x86.ADECL)
+ }
+ off := sc.Off()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Scale = 4
+ p.To.Index = v.Args[1].Reg()
+ gc.AddAux2(&p.To, v, off)
+ break
+ }
+ fallthrough
+ case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1,
+ ssa.Op386ANDLconstmodifyidx4, ssa.Op386ORLconstmodifyidx4, ssa.Op386XORLconstmodifyidx4:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val()
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ switch v.Op {
+ case ssa.Op386MOVBstoreconstidx1, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVLstoreconstidx1:
+ p.To.Scale = 1
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ case ssa.Op386MOVWstoreconstidx2:
+ p.To.Scale = 2
+ case ssa.Op386MOVLstoreconstidx4,
+ ssa.Op386ADDLconstmodifyidx4, ssa.Op386ANDLconstmodifyidx4, ssa.Op386ORLconstmodifyidx4, ssa.Op386XORLconstmodifyidx4:
+ p.To.Scale = 4
+ }
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r
+ p.To.Index = i
+ gc.AddAux2(&p.To, v, sc.Off())
+ case ssa.Op386MOVWLSX, ssa.Op386MOVBLSX, ssa.Op386MOVWLZX, ssa.Op386MOVBLZX,
+ ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD,
+ ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL,
+ ssa.Op386CVTSS2SD, ssa.Op386CVTSD2SS:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
+ case ssa.Op386DUFFZERO:
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Duffzero
+ p.To.Offset = v.AuxInt
+ case ssa.Op386DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Duffcopy
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpCopy: // TODO: use MOVLreg for reg->reg copies instead of OpCopy?
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x != y {
+ opregreg(s, moveByType(v.Type), y, x)
+ }
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ gc.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ gc.AddrAuto(&p.To, v)
+ case ssa.Op386LoweredGetClosurePtr:
+ // Closure pointer is DX.
+ gc.CheckLoweredGetClosurePtr(v)
+ case ssa.Op386LoweredGetG:
+ r := v.Reg()
+ // See the comments in cmd/internal/obj/x86/obj6.go
+ // near CanUse1InsnTLS for a detailed explanation of these instructions.
+ if x86.CanUse1InsnTLS(gc.Ctxt) {
+ // MOVL (TLS), r
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ } else {
+ // MOVL TLS, r
+ // MOVL (r)(TLS*1), r
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ q := s.Prog(x86.AMOVL)
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = r
+ q.From.Index = x86.REG_TLS
+ q.From.Scale = 1
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = r
+ }
+
+ case ssa.Op386LoweredGetCallerPC:
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = -4 // PC is stored 4 bytes below first parameter.
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386LoweredGetCallerSP:
+ // caller's SP is the address of the first arg
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on 386, just to be consistent with other architectures
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+
+ case ssa.Op386LoweredPanicBoundsA, ssa.Op386LoweredPanicBoundsB, ssa.Op386LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(8) // space used in callee args area by assembly stubs
+
+ case ssa.Op386LoweredPanicExtendA, ssa.Op386LoweredPanicExtendB, ssa.Op386LoweredPanicExtendC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
+ s.UseArgs(12) // space used in callee args area by assembly stubs
+
+ case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter:
+ s.Call(v)
+ case ssa.Op386NEGL,
+ ssa.Op386BSWAPL,
+ ssa.Op386NOTL:
+ r := v.Reg()
+ if r != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.Op386BSFL, ssa.Op386BSFW,
+ ssa.Op386BSRL, ssa.Op386BSRW,
+ ssa.Op386SQRTSD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386SETEQ, ssa.Op386SETNE,
+ ssa.Op386SETL, ssa.Op386SETLE,
+ ssa.Op386SETG, ssa.Op386SETGE,
+ ssa.Op386SETGF, ssa.Op386SETGEF,
+ ssa.Op386SETB, ssa.Op386SETBE,
+ ssa.Op386SETORD, ssa.Op386SETNAN,
+ ssa.Op386SETA, ssa.Op386SETAE,
+ ssa.Op386SETO:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386SETNEF:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ q := s.Prog(x86.ASETPS)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = x86.REG_AX
+ opregreg(s, x86.AORL, v.Reg(), x86.REG_AX)
+
+ case ssa.Op386SETEQF:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ q := s.Prog(x86.ASETPC)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = x86.REG_AX
+ opregreg(s, x86.AANDL, v.Reg(), x86.REG_AX)
+
+ case ssa.Op386InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.Op386FlagEQ, ssa.Op386FlagLT_ULT, ssa.Op386FlagLT_UGT, ssa.Op386FlagGT_ULT, ssa.Op386FlagGT_UGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.Op386REPSTOSL:
+ s.Prog(x86.AREP)
+ s.Prog(x86.ASTOSL)
+ case ssa.Op386REPMOVSL:
+ s.Prog(x86.AREP)
+ s.Prog(x86.AMOVSL)
+ case ssa.Op386LoweredNilCheck:
+ // Issue a load which will fault if the input is nil.
+ // TODO: We currently use the 2-byte instruction TESTB AX, (reg).
+ // Should we use the 3-byte TESTB $0, (reg) instead? It is larger
+ // but it doesn't have false dependency on AX.
+ // Or maybe allocate an output register and use MOVL (reg),reg2 ?
+ // That trades clobbering flags for clobbering a register.
+ p := s.Prog(x86.ATESTB)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ gc.Warnl(v.Pos, "generated nil check")
+ }
+ case ssa.OpClobber:
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = x86.REG_SP
+ gc.AddAux(&p.To, v)
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = [...]struct {
+ asm, invasm obj.As
+}{
+ ssa.Block386EQ: {x86.AJEQ, x86.AJNE},
+ ssa.Block386NE: {x86.AJNE, x86.AJEQ},
+ ssa.Block386LT: {x86.AJLT, x86.AJGE},
+ ssa.Block386GE: {x86.AJGE, x86.AJLT},
+ ssa.Block386LE: {x86.AJLE, x86.AJGT},
+ ssa.Block386GT: {x86.AJGT, x86.AJLE},
+ ssa.Block386OS: {x86.AJOS, x86.AJOC},
+ ssa.Block386OC: {x86.AJOC, x86.AJOS},
+ ssa.Block386ULT: {x86.AJCS, x86.AJCC},
+ ssa.Block386UGE: {x86.AJCC, x86.AJCS},
+ ssa.Block386UGT: {x86.AJHI, x86.AJLS},
+ ssa.Block386ULE: {x86.AJLS, x86.AJHI},
+ ssa.Block386ORD: {x86.AJPC, x86.AJPS},
+ ssa.Block386NAN: {x86.AJPS, x86.AJPC},
+}
+
+var eqfJumps = [2][2]gc.IndexJump{
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
+}
+var nefJumps = [2][2]gc.IndexJump{
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
+}
+
+func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in rax:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(x86.ATESTL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ p = s.Prog(x86.AJNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockRetJmp:
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = b.Aux.(*obj.LSym)
+
+ case ssa.Block386EQF:
+ s.CombJump(b, next, &eqfJumps)
+
+ case ssa.Block386NEF:
+ s.CombJump(b, next, &nefJumps)
+
+ case ssa.Block386EQ, ssa.Block386NE,
+ ssa.Block386LT, ssa.Block386GE,
+ ssa.Block386LE, ssa.Block386GT,
+ ssa.Block386OS, ssa.Block386OC,
+ ssa.Block386ULT, ssa.Block386UGT,
+ ssa.Block386ULE, ssa.Block386UGE:
+ jmp := blockJump[b.Kind]
+ switch next {
+ case b.Succs[0].Block():
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}